Merge branch 'master' into nixos-nixpkgs-pkgs-use-overlays
This commit is contained in:
commit
03fc1167e8
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
# Libraries
|
# Libraries
|
||||||
/lib @edolstra @nbp
|
/lib @edolstra @nbp
|
||||||
/lib/systems @nbp @ericson2314
|
/lib/systems @nbp @ericson2314 @matthewbauer
|
||||||
/lib/generators.nix @edolstra @nbp @Profpatsch
|
/lib/generators.nix @edolstra @nbp @Profpatsch
|
||||||
/lib/debug.nix @edolstra @nbp @Profpatsch
|
/lib/debug.nix @edolstra @nbp @Profpatsch
|
||||||
|
|
||||||
@ -20,9 +20,11 @@
|
|||||||
/default.nix @nbp
|
/default.nix @nbp
|
||||||
/pkgs/top-level/default.nix @nbp @Ericson2314
|
/pkgs/top-level/default.nix @nbp @Ericson2314
|
||||||
/pkgs/top-level/impure.nix @nbp @Ericson2314
|
/pkgs/top-level/impure.nix @nbp @Ericson2314
|
||||||
/pkgs/top-level/stage.nix @nbp @Ericson2314
|
/pkgs/top-level/stage.nix @nbp @Ericson2314 @matthewbauer
|
||||||
/pkgs/stdenv/generic @Ericson2314
|
/pkgs/top-level/splice.nix @Ericson2314 @matthewbauer
|
||||||
/pkgs/stdenv/cross @Ericson2314
|
/pkgs/top-level/release-cross.nix @Ericson2314 @matthewbauer
|
||||||
|
/pkgs/stdenv/generic @Ericson2314 @matthewbauer
|
||||||
|
/pkgs/stdenv/cross @Ericson2314 @matthewbauer
|
||||||
/pkgs/build-support/cc-wrapper @Ericson2314 @orivej
|
/pkgs/build-support/cc-wrapper @Ericson2314 @orivej
|
||||||
/pkgs/build-support/bintools-wrapper @Ericson2314 @orivej
|
/pkgs/build-support/bintools-wrapper @Ericson2314 @orivej
|
||||||
/pkgs/build-support/setup-hooks @Ericson2314
|
/pkgs/build-support/setup-hooks @Ericson2314
|
||||||
@ -74,6 +76,14 @@
|
|||||||
/pkgs/stdenv/darwin @NixOS/darwin-maintainers
|
/pkgs/stdenv/darwin @NixOS/darwin-maintainers
|
||||||
/pkgs/os-specific/darwin @NixOS/darwin-maintainers
|
/pkgs/os-specific/darwin @NixOS/darwin-maintainers
|
||||||
|
|
||||||
|
# C compilers
|
||||||
|
/pkgs/development/compilers/gcc @matthewbauer
|
||||||
|
/pkgs/development/compilers/llvm @matthewbauer
|
||||||
|
|
||||||
|
# Compatibility stuff
|
||||||
|
/pkgs/top-level/unix-tools.nix @matthewbauer
|
||||||
|
/pkgs/development/tools/xcbuild @matthewbauer
|
||||||
|
|
||||||
# Beam-related (Erlang, Elixir, LFE, etc)
|
# Beam-related (Erlang, Elixir, LFE, etc)
|
||||||
/pkgs/development/beam-modules @gleber
|
/pkgs/development/beam-modules @gleber
|
||||||
/pkgs/development/interpreters/erlang @gleber
|
/pkgs/development/interpreters/erlang @gleber
|
||||||
@ -97,3 +107,9 @@
|
|||||||
/pkgs/desktops/plasma-5 @ttuegel
|
/pkgs/desktops/plasma-5 @ttuegel
|
||||||
/pkgs/development/libraries/kde-frameworks @ttuegel
|
/pkgs/development/libraries/kde-frameworks @ttuegel
|
||||||
/pkgs/development/libraries/qt-5 @ttuegel
|
/pkgs/development/libraries/qt-5 @ttuegel
|
||||||
|
|
||||||
|
# PostgreSQL and related stuff
|
||||||
|
/pkgs/servers/sql/postgresql @thoughtpolice
|
||||||
|
/nixos/modules/services/databases/postgresql.xml @thoughtpolice
|
||||||
|
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
|
||||||
|
/nixos/tests/postgresql.nix @thoughtpolice
|
||||||
|
@ -842,9 +842,12 @@ src = fetchFromGitHub {
|
|||||||
owner = "NixOS";
|
owner = "NixOS";
|
||||||
repo = "nix";
|
repo = "nix";
|
||||||
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
||||||
sha256 = "04yri911rj9j19qqqn6m82266fl05pz98inasni0vxr1cf1gdgv9";
|
sha256 = "1i2yxndxb6yc9l6c99pypbd92lfq5aac4klq7y2v93c9qvx2cgpc";
|
||||||
}
|
}
|
||||||
</programlisting>
|
</programlisting>
|
||||||
|
Find the value to put as <literal>sha256</literal> by running
|
||||||
|
<literal>nix run -f '<nixpkgs>' nix-prefetch-github -c nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS nix</literal>
|
||||||
|
or <literal>nix-prefetch-url --unpack https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
|
@ -670,7 +670,7 @@ python3Packages.buildPythonApplication rec {
|
|||||||
sha256 = "035w8gqql36zlan0xjrzz9j4lh9hs0qrsgnbyw07qs7lnkvbdv9x";
|
sha256 = "035w8gqql36zlan0xjrzz9j4lh9hs0qrsgnbyw07qs7lnkvbdv9x";
|
||||||
};
|
};
|
||||||
|
|
||||||
propagatedBuildInputs = with python3Packages; [ tornado_4 pythondaemon ];
|
propagatedBuildInputs = with python3Packages; [ tornado_4 python-daemon ];
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
...
|
...
|
||||||
|
@ -23,27 +23,54 @@ rec {
|
|||||||
|
|
||||||
# -- TRACING --
|
# -- TRACING --
|
||||||
|
|
||||||
/* Trace msg, but only if pred is true.
|
/* Conditionally trace the supplied message, based on a predicate.
|
||||||
|
|
||||||
|
Type: traceIf :: bool -> string -> a -> a
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
traceIf true "hello" 3
|
traceIf true "hello" 3
|
||||||
trace: hello
|
trace: hello
|
||||||
=> 3
|
=> 3
|
||||||
*/
|
*/
|
||||||
traceIf = pred: msg: x: if pred then trace msg x else x;
|
traceIf =
|
||||||
|
# Predicate to check
|
||||||
|
pred:
|
||||||
|
# Message that should be traced
|
||||||
|
msg:
|
||||||
|
# Value to return
|
||||||
|
x: if pred then trace msg x else x;
|
||||||
|
|
||||||
/* Trace the value and also return it.
|
/* Trace the supplied value after applying a function to it, and
|
||||||
|
return the original value.
|
||||||
|
|
||||||
|
Type: traceValFn :: (a -> b) -> a -> a
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
traceValFn (v: "mystring ${v}") "foo"
|
traceValFn (v: "mystring ${v}") "foo"
|
||||||
trace: mystring foo
|
trace: mystring foo
|
||||||
=> "foo"
|
=> "foo"
|
||||||
*/
|
*/
|
||||||
traceValFn = f: x: trace (f x) x;
|
traceValFn =
|
||||||
|
# Function to apply
|
||||||
|
f:
|
||||||
|
# Value to trace and return
|
||||||
|
x: trace (f x) x;
|
||||||
|
|
||||||
|
/* Trace the supplied value and return it.
|
||||||
|
|
||||||
|
Type: traceVal :: a -> a
|
||||||
|
|
||||||
|
Example:
|
||||||
|
traceVal 42
|
||||||
|
# trace: 42
|
||||||
|
=> 42
|
||||||
|
*/
|
||||||
traceVal = traceValFn id;
|
traceVal = traceValFn id;
|
||||||
|
|
||||||
/* `builtins.trace`, but the value is `builtins.deepSeq`ed first.
|
/* `builtins.trace`, but the value is `builtins.deepSeq`ed first.
|
||||||
|
|
||||||
|
Type: traceSeq :: a -> b -> b
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
trace { a.b.c = 3; } null
|
trace { a.b.c = 3; } null
|
||||||
trace: { a = <CODE>; }
|
trace: { a = <CODE>; }
|
||||||
@ -52,7 +79,11 @@ rec {
|
|||||||
trace: { a = { b = { c = 3; }; }; }
|
trace: { a = { b = { c = 3; }; }; }
|
||||||
=> null
|
=> null
|
||||||
*/
|
*/
|
||||||
traceSeq = x: y: trace (builtins.deepSeq x x) y;
|
traceSeq =
|
||||||
|
# The value to trace
|
||||||
|
x:
|
||||||
|
# The value to return
|
||||||
|
y: trace (builtins.deepSeq x x) y;
|
||||||
|
|
||||||
/* Like `traceSeq`, but only evaluate down to depth n.
|
/* Like `traceSeq`, but only evaluate down to depth n.
|
||||||
This is very useful because lots of `traceSeq` usages
|
This is very useful because lots of `traceSeq` usages
|
||||||
@ -76,27 +107,49 @@ rec {
|
|||||||
in trace (generators.toPretty { allowPrettyValues = true; }
|
in trace (generators.toPretty { allowPrettyValues = true; }
|
||||||
(modify depth snip x)) y;
|
(modify depth snip x)) y;
|
||||||
|
|
||||||
/* A combination of `traceVal` and `traceSeq` */
|
/* A combination of `traceVal` and `traceSeq` that applies a
|
||||||
traceValSeqFn = f: v: traceValFn f (builtins.deepSeq v v);
|
provided function to the value to be traced after `deepSeq`ing
|
||||||
|
it.
|
||||||
|
*/
|
||||||
|
traceValSeqFn =
|
||||||
|
# Function to apply
|
||||||
|
f:
|
||||||
|
# Value to trace
|
||||||
|
v: traceValFn f (builtins.deepSeq v v);
|
||||||
|
|
||||||
|
/* A combination of `traceVal` and `traceSeq`. */
|
||||||
traceValSeq = traceValSeqFn id;
|
traceValSeq = traceValSeqFn id;
|
||||||
|
|
||||||
|
/* A combination of `traceVal` and `traceSeqN` that applies a
|
||||||
|
provided function to the value to be traced. */
|
||||||
|
traceValSeqNFn =
|
||||||
|
# Function to apply
|
||||||
|
f:
|
||||||
|
depth:
|
||||||
|
# Value to trace
|
||||||
|
v: traceSeqN depth (f v) v;
|
||||||
|
|
||||||
/* A combination of `traceVal` and `traceSeqN`. */
|
/* A combination of `traceVal` and `traceSeqN`. */
|
||||||
traceValSeqNFn = f: depth: v: traceSeqN depth (f v) v;
|
|
||||||
traceValSeqN = traceValSeqNFn id;
|
traceValSeqN = traceValSeqNFn id;
|
||||||
|
|
||||||
|
|
||||||
# -- TESTING --
|
# -- TESTING --
|
||||||
|
|
||||||
/* Evaluate a set of tests. A test is an attribute set {expr,
|
/* Evaluate a set of tests. A test is an attribute set `{expr,
|
||||||
expected}, denoting an expression and its expected result. The
|
expected}`, denoting an expression and its expected result. The
|
||||||
result is a list of failed tests, each represented as {name,
|
result is a list of failed tests, each represented as `{name,
|
||||||
expected, actual}, denoting the attribute name of the failing
|
expected, actual}`, denoting the attribute name of the failing
|
||||||
test and its expected and actual results. Used for regression
|
test and its expected and actual results.
|
||||||
testing of the functions in lib; see tests.nix for an example.
|
|
||||||
Only tests having names starting with "test" are run.
|
Used for regression testing of the functions in lib; see
|
||||||
Add attr { tests = ["testName"]; } to run these test only
|
tests.nix for an example. Only tests having names starting with
|
||||||
|
"test" are run.
|
||||||
|
|
||||||
|
Add attr { tests = ["testName"]; } to run these tests only.
|
||||||
*/
|
*/
|
||||||
runTests = tests: lib.concatLists (lib.attrValues (lib.mapAttrs (name: test:
|
runTests =
|
||||||
|
# Tests to run
|
||||||
|
tests: lib.concatLists (lib.attrValues (lib.mapAttrs (name: test:
|
||||||
let testsToRun = if tests ? tests then tests.tests else [];
|
let testsToRun = if tests ? tests then tests.tests else [];
|
||||||
in if (substring 0 4 name == "test" || elem name testsToRun)
|
in if (substring 0 4 name == "test" || elem name testsToRun)
|
||||||
&& ((testsToRun == []) || elem name tests.tests)
|
&& ((testsToRun == []) || elem name tests.tests)
|
||||||
@ -105,8 +158,11 @@ rec {
|
|||||||
then [ { inherit name; expected = test.expected; result = test.expr; } ]
|
then [ { inherit name; expected = test.expected; result = test.expr; } ]
|
||||||
else [] ) tests));
|
else [] ) tests));
|
||||||
|
|
||||||
# create a test assuming that list elements are true
|
/* Create a test assuming that list elements are `true`.
|
||||||
# usage: { testX = allTrue [ true ]; }
|
|
||||||
|
Example:
|
||||||
|
{ testX = allTrue [ true ]; }
|
||||||
|
*/
|
||||||
testAllTrue = expr: { inherit expr; expected = map (x: true) expr; };
|
testAllTrue = expr: { inherit expr; expected = map (x: true) expr; };
|
||||||
|
|
||||||
|
|
||||||
|
@ -309,6 +309,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||||||
fullName = "GNU General Public License v2.0 only";
|
fullName = "GNU General Public License v2.0 only";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
gpl2Classpath = {
|
||||||
|
spdxId = "GPL-2.0-with-classpath-exception";
|
||||||
|
fullName = "GNU General Public License v2.0 only (with Classpath exception)";
|
||||||
|
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
||||||
|
};
|
||||||
|
|
||||||
gpl2ClasspathPlus = {
|
gpl2ClasspathPlus = {
|
||||||
fullName = "GNU General Public License v2.0 or later (with Classpath exception)";
|
fullName = "GNU General Public License v2.0 or later (with Classpath exception)";
|
||||||
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
|
||||||
@ -394,6 +400,10 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||||||
free = false;
|
free = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
jasper = spdx {
|
||||||
|
spdxId = "JasPer-2.0";
|
||||||
|
fullName = "JasPer License";
|
||||||
|
};
|
||||||
|
|
||||||
lgpl2 = spdx {
|
lgpl2 = spdx {
|
||||||
spdxId = "LGPL-2.0";
|
spdxId = "LGPL-2.0";
|
||||||
|
184
lib/lists.nix
184
lib/lists.nix
@ -1,4 +1,5 @@
|
|||||||
# General list operations.
|
# General list operations.
|
||||||
|
|
||||||
{ lib }:
|
{ lib }:
|
||||||
with lib.trivial;
|
with lib.trivial;
|
||||||
let
|
let
|
||||||
@ -8,21 +9,23 @@ rec {
|
|||||||
|
|
||||||
inherit (builtins) head tail length isList elemAt concatLists filter elem genList;
|
inherit (builtins) head tail length isList elemAt concatLists filter elem genList;
|
||||||
|
|
||||||
/* Create a list consisting of a single element. `singleton x' is
|
/* Create a list consisting of a single element. `singleton x` is
|
||||||
sometimes more convenient with respect to indentation than `[x]'
|
sometimes more convenient with respect to indentation than `[x]`
|
||||||
when x spans multiple lines.
|
when x spans multiple lines.
|
||||||
|
|
||||||
|
Type: singleton :: a -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
singleton "foo"
|
singleton "foo"
|
||||||
=> [ "foo" ]
|
=> [ "foo" ]
|
||||||
*/
|
*/
|
||||||
singleton = x: [x];
|
singleton = x: [x];
|
||||||
|
|
||||||
/* “right fold” a binary function `op' between successive elements of
|
/* “right fold” a binary function `op` between successive elements of
|
||||||
`list' with `nul' as the starting value, i.e.,
|
`list` with `nul' as the starting value, i.e.,
|
||||||
`foldr op nul [x_1 x_2 ... x_n] == op x_1 (op x_2 ... (op x_n nul))'.
|
`foldr op nul [x_1 x_2 ... x_n] == op x_1 (op x_2 ... (op x_n nul))`.
|
||||||
Type:
|
|
||||||
foldr :: (a -> b -> b) -> b -> [a] -> b
|
Type: foldr :: (a -> b -> b) -> b -> [a] -> b
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concat = foldr (a: b: a + b) "z"
|
concat = foldr (a: b: a + b) "z"
|
||||||
@ -42,16 +45,15 @@ rec {
|
|||||||
else op (elemAt list n) (fold' (n + 1));
|
else op (elemAt list n) (fold' (n + 1));
|
||||||
in fold' 0;
|
in fold' 0;
|
||||||
|
|
||||||
/* `fold' is an alias of `foldr' for historic reasons */
|
/* `fold` is an alias of `foldr` for historic reasons */
|
||||||
# FIXME(Profpatsch): deprecate?
|
# FIXME(Profpatsch): deprecate?
|
||||||
fold = foldr;
|
fold = foldr;
|
||||||
|
|
||||||
|
|
||||||
/* “left fold”, like `foldr', but from the left:
|
/* “left fold”, like `foldr`, but from the left:
|
||||||
`foldl op nul [x_1 x_2 ... x_n] == op (... (op (op nul x_1) x_2) ... x_n)`.
|
`foldl op nul [x_1 x_2 ... x_n] == op (... (op (op nul x_1) x_2) ... x_n)`.
|
||||||
|
|
||||||
Type:
|
Type: foldl :: (b -> a -> b) -> b -> [a] -> b
|
||||||
foldl :: (b -> a -> b) -> b -> [a] -> b
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
lconcat = foldl (a: b: a + b) "z"
|
lconcat = foldl (a: b: a + b) "z"
|
||||||
@ -70,16 +72,20 @@ rec {
|
|||||||
else op (foldl' (n - 1)) (elemAt list n);
|
else op (foldl' (n - 1)) (elemAt list n);
|
||||||
in foldl' (length list - 1);
|
in foldl' (length list - 1);
|
||||||
|
|
||||||
/* Strict version of `foldl'.
|
/* Strict version of `foldl`.
|
||||||
|
|
||||||
The difference is that evaluation is forced upon access. Usually used
|
The difference is that evaluation is forced upon access. Usually used
|
||||||
with small whole results (in contract with lazily-generated list or large
|
with small whole results (in contract with lazily-generated list or large
|
||||||
lists where only a part is consumed.)
|
lists where only a part is consumed.)
|
||||||
|
|
||||||
|
Type: foldl' :: (b -> a -> b) -> b -> [a] -> b
|
||||||
*/
|
*/
|
||||||
foldl' = builtins.foldl' or foldl;
|
foldl' = builtins.foldl' or foldl;
|
||||||
|
|
||||||
/* Map with index starting from 0
|
/* Map with index starting from 0
|
||||||
|
|
||||||
|
Type: imap0 :: (int -> a -> b) -> [a] -> [b]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
imap0 (i: v: "${v}-${toString i}") ["a" "b"]
|
imap0 (i: v: "${v}-${toString i}") ["a" "b"]
|
||||||
=> [ "a-0" "b-1" ]
|
=> [ "a-0" "b-1" ]
|
||||||
@ -88,6 +94,8 @@ rec {
|
|||||||
|
|
||||||
/* Map with index starting from 1
|
/* Map with index starting from 1
|
||||||
|
|
||||||
|
Type: imap1 :: (int -> a -> b) -> [a] -> [b]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
imap1 (i: v: "${v}-${toString i}") ["a" "b"]
|
imap1 (i: v: "${v}-${toString i}") ["a" "b"]
|
||||||
=> [ "a-1" "b-2" ]
|
=> [ "a-1" "b-2" ]
|
||||||
@ -96,6 +104,8 @@ rec {
|
|||||||
|
|
||||||
/* Map and concatenate the result.
|
/* Map and concatenate the result.
|
||||||
|
|
||||||
|
Type: concatMap :: (a -> [b]) -> [a] -> [b]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatMap (x: [x] ++ ["z"]) ["a" "b"]
|
concatMap (x: [x] ++ ["z"]) ["a" "b"]
|
||||||
=> [ "a" "z" "b" "z" ]
|
=> [ "a" "z" "b" "z" ]
|
||||||
@ -118,15 +128,21 @@ rec {
|
|||||||
|
|
||||||
/* Remove elements equal to 'e' from a list. Useful for buildInputs.
|
/* Remove elements equal to 'e' from a list. Useful for buildInputs.
|
||||||
|
|
||||||
|
Type: remove :: a -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
remove 3 [ 1 3 4 3 ]
|
remove 3 [ 1 3 4 3 ]
|
||||||
=> [ 1 4 ]
|
=> [ 1 4 ]
|
||||||
*/
|
*/
|
||||||
remove = e: filter (x: x != e);
|
remove =
|
||||||
|
# Element to remove from the list
|
||||||
|
e: filter (x: x != e);
|
||||||
|
|
||||||
/* Find the sole element in the list matching the specified
|
/* Find the sole element in the list matching the specified
|
||||||
predicate, returns `default' if no such element exists, or
|
predicate, returns `default` if no such element exists, or
|
||||||
`multiple' if there are multiple matching elements.
|
`multiple` if there are multiple matching elements.
|
||||||
|
|
||||||
|
Type: findSingle :: (a -> bool) -> a -> a -> [a] -> a
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
findSingle (x: x == 3) "none" "multiple" [ 1 3 3 ]
|
findSingle (x: x == 3) "none" "multiple" [ 1 3 3 ]
|
||||||
@ -136,14 +152,24 @@ rec {
|
|||||||
findSingle (x: x == 3) "none" "multiple" [ 1 9 ]
|
findSingle (x: x == 3) "none" "multiple" [ 1 9 ]
|
||||||
=> "none"
|
=> "none"
|
||||||
*/
|
*/
|
||||||
findSingle = pred: default: multiple: list:
|
findSingle =
|
||||||
|
# Predicate
|
||||||
|
pred:
|
||||||
|
# Default value to return if element was not found.
|
||||||
|
default:
|
||||||
|
# Default value to return if more than one element was found
|
||||||
|
multiple:
|
||||||
|
# Input list
|
||||||
|
list:
|
||||||
let found = filter pred list; len = length found;
|
let found = filter pred list; len = length found;
|
||||||
in if len == 0 then default
|
in if len == 0 then default
|
||||||
else if len != 1 then multiple
|
else if len != 1 then multiple
|
||||||
else head found;
|
else head found;
|
||||||
|
|
||||||
/* Find the first element in the list matching the specified
|
/* Find the first element in the list matching the specified
|
||||||
predicate or returns `default' if no such element exists.
|
predicate or return `default` if no such element exists.
|
||||||
|
|
||||||
|
Type: findFirst :: (a -> bool) -> a -> [a] -> a
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
findFirst (x: x > 3) 7 [ 1 6 4 ]
|
findFirst (x: x > 3) 7 [ 1 6 4 ]
|
||||||
@ -151,12 +177,20 @@ rec {
|
|||||||
findFirst (x: x > 9) 7 [ 1 6 4 ]
|
findFirst (x: x > 9) 7 [ 1 6 4 ]
|
||||||
=> 7
|
=> 7
|
||||||
*/
|
*/
|
||||||
findFirst = pred: default: list:
|
findFirst =
|
||||||
|
# Predicate
|
||||||
|
pred:
|
||||||
|
# Default value to return
|
||||||
|
default:
|
||||||
|
# Input list
|
||||||
|
list:
|
||||||
let found = filter pred list;
|
let found = filter pred list;
|
||||||
in if found == [] then default else head found;
|
in if found == [] then default else head found;
|
||||||
|
|
||||||
/* Return true iff function `pred' returns true for at least element
|
/* Return true if function `pred` returns true for at least one
|
||||||
of `list'.
|
element of `list`.
|
||||||
|
|
||||||
|
Type: any :: (a -> bool) -> [a] -> bool
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
any isString [ 1 "a" { } ]
|
any isString [ 1 "a" { } ]
|
||||||
@ -166,8 +200,10 @@ rec {
|
|||||||
*/
|
*/
|
||||||
any = builtins.any or (pred: foldr (x: y: if pred x then true else y) false);
|
any = builtins.any or (pred: foldr (x: y: if pred x then true else y) false);
|
||||||
|
|
||||||
/* Return true iff function `pred' returns true for all elements of
|
/* Return true if function `pred` returns true for all elements of
|
||||||
`list'.
|
`list`.
|
||||||
|
|
||||||
|
Type: all :: (a -> bool) -> [a] -> bool
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
all (x: x < 3) [ 1 2 ]
|
all (x: x < 3) [ 1 2 ]
|
||||||
@ -177,19 +213,25 @@ rec {
|
|||||||
*/
|
*/
|
||||||
all = builtins.all or (pred: foldr (x: y: if pred x then y else false) true);
|
all = builtins.all or (pred: foldr (x: y: if pred x then y else false) true);
|
||||||
|
|
||||||
/* Count how many times function `pred' returns true for the elements
|
/* Count how many elements of `list` match the supplied predicate
|
||||||
of `list'.
|
function.
|
||||||
|
|
||||||
|
Type: count :: (a -> bool) -> [a] -> int
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
count (x: x == 3) [ 3 2 3 4 6 ]
|
count (x: x == 3) [ 3 2 3 4 6 ]
|
||||||
=> 2
|
=> 2
|
||||||
*/
|
*/
|
||||||
count = pred: foldl' (c: x: if pred x then c + 1 else c) 0;
|
count =
|
||||||
|
# Predicate
|
||||||
|
pred: foldl' (c: x: if pred x then c + 1 else c) 0;
|
||||||
|
|
||||||
/* Return a singleton list or an empty list, depending on a boolean
|
/* Return a singleton list or an empty list, depending on a boolean
|
||||||
value. Useful when building lists with optional elements
|
value. Useful when building lists with optional elements
|
||||||
(e.g. `++ optional (system == "i686-linux") flashplayer').
|
(e.g. `++ optional (system == "i686-linux") flashplayer').
|
||||||
|
|
||||||
|
Type: optional :: bool -> a -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
optional true "foo"
|
optional true "foo"
|
||||||
=> [ "foo" ]
|
=> [ "foo" ]
|
||||||
@ -200,13 +242,19 @@ rec {
|
|||||||
|
|
||||||
/* Return a list or an empty list, depending on a boolean value.
|
/* Return a list or an empty list, depending on a boolean value.
|
||||||
|
|
||||||
|
Type: optionals :: bool -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
optionals true [ 2 3 ]
|
optionals true [ 2 3 ]
|
||||||
=> [ 2 3 ]
|
=> [ 2 3 ]
|
||||||
optionals false [ 2 3 ]
|
optionals false [ 2 3 ]
|
||||||
=> [ ]
|
=> [ ]
|
||||||
*/
|
*/
|
||||||
optionals = cond: elems: if cond then elems else [];
|
optionals =
|
||||||
|
# Condition
|
||||||
|
cond:
|
||||||
|
# List to return if condition is true
|
||||||
|
elems: if cond then elems else [];
|
||||||
|
|
||||||
|
|
||||||
/* If argument is a list, return it; else, wrap it in a singleton
|
/* If argument is a list, return it; else, wrap it in a singleton
|
||||||
@ -223,20 +271,28 @@ rec {
|
|||||||
|
|
||||||
/* Return a list of integers from `first' up to and including `last'.
|
/* Return a list of integers from `first' up to and including `last'.
|
||||||
|
|
||||||
|
Type: range :: int -> int -> [int]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
range 2 4
|
range 2 4
|
||||||
=> [ 2 3 4 ]
|
=> [ 2 3 4 ]
|
||||||
range 3 2
|
range 3 2
|
||||||
=> [ ]
|
=> [ ]
|
||||||
*/
|
*/
|
||||||
range = first: last:
|
range =
|
||||||
|
# First integer in the range
|
||||||
|
first:
|
||||||
|
# Last integer in the range
|
||||||
|
last:
|
||||||
if first > last then
|
if first > last then
|
||||||
[]
|
[]
|
||||||
else
|
else
|
||||||
genList (n: first + n) (last - first + 1);
|
genList (n: first + n) (last - first + 1);
|
||||||
|
|
||||||
/* Splits the elements of a list in two lists, `right' and
|
/* Splits the elements of a list in two lists, `right` and
|
||||||
`wrong', depending on the evaluation of a predicate.
|
`wrong`, depending on the evaluation of a predicate.
|
||||||
|
|
||||||
|
Type: (a -> bool) -> [a] -> { right :: [a], wrong :: [a] }
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
partition (x: x > 2) [ 5 1 2 3 4 ]
|
partition (x: x > 2) [ 5 1 2 3 4 ]
|
||||||
@ -252,7 +308,7 @@ rec {
|
|||||||
/* Splits the elements of a list into many lists, using the return value of a predicate.
|
/* Splits the elements of a list into many lists, using the return value of a predicate.
|
||||||
Predicate should return a string which becomes keys of attrset `groupBy' returns.
|
Predicate should return a string which becomes keys of attrset `groupBy' returns.
|
||||||
|
|
||||||
`groupBy'' allows to customise the combining function and initial value
|
`groupBy'` allows to customise the combining function and initial value
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
groupBy (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
|
groupBy (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
|
||||||
@ -268,10 +324,6 @@ rec {
|
|||||||
xfce = [ { name = "xfce"; script = "xfce4-session &"; } ];
|
xfce = [ { name = "xfce"; script = "xfce4-session &"; } ];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
groupBy' allows to customise the combining function and initial value
|
|
||||||
|
|
||||||
Example:
|
|
||||||
groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
|
groupBy' builtins.add 0 (x: boolToString (x > 2)) [ 5 1 2 3 4 ]
|
||||||
=> { true = 12; false = 3; }
|
=> { true = 12; false = 3; }
|
||||||
*/
|
*/
|
||||||
@ -289,17 +341,27 @@ rec {
|
|||||||
the merging stops at the shortest. How both lists are merged is defined
|
the merging stops at the shortest. How both lists are merged is defined
|
||||||
by the first argument.
|
by the first argument.
|
||||||
|
|
||||||
|
Type: zipListsWith :: (a -> b -> c) -> [a] -> [b] -> [c]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
zipListsWith (a: b: a + b) ["h" "l"] ["e" "o"]
|
zipListsWith (a: b: a + b) ["h" "l"] ["e" "o"]
|
||||||
=> ["he" "lo"]
|
=> ["he" "lo"]
|
||||||
*/
|
*/
|
||||||
zipListsWith = f: fst: snd:
|
zipListsWith =
|
||||||
|
# Function to zip elements of both lists
|
||||||
|
f:
|
||||||
|
# First list
|
||||||
|
fst:
|
||||||
|
# Second list
|
||||||
|
snd:
|
||||||
genList
|
genList
|
||||||
(n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd));
|
(n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd));
|
||||||
|
|
||||||
/* Merges two lists of the same size together. If the sizes aren't the same
|
/* Merges two lists of the same size together. If the sizes aren't the same
|
||||||
the merging stops at the shortest.
|
the merging stops at the shortest.
|
||||||
|
|
||||||
|
Type: zipLists :: [a] -> [b] -> [{ fst :: a, snd :: b}]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
zipLists [ 1 2 ] [ "a" "b" ]
|
zipLists [ 1 2 ] [ "a" "b" ]
|
||||||
=> [ { fst = 1; snd = "a"; } { fst = 2; snd = "b"; } ]
|
=> [ { fst = 1; snd = "a"; } { fst = 2; snd = "b"; } ]
|
||||||
@ -308,6 +370,8 @@ rec {
|
|||||||
|
|
||||||
/* Reverse the order of the elements of a list.
|
/* Reverse the order of the elements of a list.
|
||||||
|
|
||||||
|
Type: reverseList :: [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
reverseList [ "b" "o" "j" ]
|
reverseList [ "b" "o" "j" ]
|
||||||
@ -321,8 +385,7 @@ rec {
|
|||||||
`before a b == true` means that `b` depends on `a` (there's an
|
`before a b == true` means that `b` depends on `a` (there's an
|
||||||
edge from `b` to `a`).
|
edge from `b` to `a`).
|
||||||
|
|
||||||
Examples:
|
Example:
|
||||||
|
|
||||||
listDfs true hasPrefix [ "/home/user" "other" "/" "/home" ]
|
listDfs true hasPrefix [ "/home/user" "other" "/" "/home" ]
|
||||||
== { minimal = "/"; # minimal element
|
== { minimal = "/"; # minimal element
|
||||||
visited = [ "/home/user" ]; # seen elements (in reverse order)
|
visited = [ "/home/user" ]; # seen elements (in reverse order)
|
||||||
@ -336,7 +399,6 @@ rec {
|
|||||||
rest = [ "/home" "other" ]; # everything else
|
rest = [ "/home" "other" ]; # everything else
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
listDfs = stopOnCycles: before: list:
|
listDfs = stopOnCycles: before: list:
|
||||||
let
|
let
|
||||||
dfs' = us: visited: rest:
|
dfs' = us: visited: rest:
|
||||||
@ -361,7 +423,7 @@ rec {
|
|||||||
`before a b == true` means that `b` should be after `a`
|
`before a b == true` means that `b` should be after `a`
|
||||||
in the result.
|
in the result.
|
||||||
|
|
||||||
Examples:
|
Example:
|
||||||
|
|
||||||
toposort hasPrefix [ "/home/user" "other" "/" "/home" ]
|
toposort hasPrefix [ "/home/user" "other" "/" "/home" ]
|
||||||
== { result = [ "/" "/home" "/home/user" "other" ]; }
|
== { result = [ "/" "/home" "/home/user" "other" ]; }
|
||||||
@ -376,7 +438,6 @@ rec {
|
|||||||
toposort (a: b: a < b) [ 3 2 1 ] == { result = [ 1 2 3 ]; }
|
toposort (a: b: a < b) [ 3 2 1 ] == { result = [ 1 2 3 ]; }
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
toposort = before: list:
|
toposort = before: list:
|
||||||
let
|
let
|
||||||
dfsthis = listDfs true before list;
|
dfsthis = listDfs true before list;
|
||||||
@ -467,26 +528,38 @@ rec {
|
|||||||
|
|
||||||
/* Return the first (at most) N elements of a list.
|
/* Return the first (at most) N elements of a list.
|
||||||
|
|
||||||
|
Type: take :: int -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
take 2 [ "a" "b" "c" "d" ]
|
take 2 [ "a" "b" "c" "d" ]
|
||||||
=> [ "a" "b" ]
|
=> [ "a" "b" ]
|
||||||
take 2 [ ]
|
take 2 [ ]
|
||||||
=> [ ]
|
=> [ ]
|
||||||
*/
|
*/
|
||||||
take = count: sublist 0 count;
|
take =
|
||||||
|
# Number of elements to take
|
||||||
|
count: sublist 0 count;
|
||||||
|
|
||||||
/* Remove the first (at most) N elements of a list.
|
/* Remove the first (at most) N elements of a list.
|
||||||
|
|
||||||
|
Type: drop :: int -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
drop 2 [ "a" "b" "c" "d" ]
|
drop 2 [ "a" "b" "c" "d" ]
|
||||||
=> [ "c" "d" ]
|
=> [ "c" "d" ]
|
||||||
drop 2 [ ]
|
drop 2 [ ]
|
||||||
=> [ ]
|
=> [ ]
|
||||||
*/
|
*/
|
||||||
drop = count: list: sublist count (length list) list;
|
drop =
|
||||||
|
# Number of elements to drop
|
||||||
|
count:
|
||||||
|
# Input list
|
||||||
|
list: sublist count (length list) list;
|
||||||
|
|
||||||
/* Return a list consisting of at most ‘count’ elements of ‘list’,
|
/* Return a list consisting of at most `count` elements of `list`,
|
||||||
starting at index ‘start’.
|
starting at index `start`.
|
||||||
|
|
||||||
|
Type: sublist :: int -> int -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
sublist 1 3 [ "a" "b" "c" "d" "e" ]
|
sublist 1 3 [ "a" "b" "c" "d" "e" ]
|
||||||
@ -494,7 +567,13 @@ rec {
|
|||||||
sublist 1 3 [ ]
|
sublist 1 3 [ ]
|
||||||
=> [ ]
|
=> [ ]
|
||||||
*/
|
*/
|
||||||
sublist = start: count: list:
|
sublist =
|
||||||
|
# Index at which to start the sublist
|
||||||
|
start:
|
||||||
|
# Number of elements to take
|
||||||
|
count:
|
||||||
|
# Input list
|
||||||
|
list:
|
||||||
let len = length list; in
|
let len = length list; in
|
||||||
genList
|
genList
|
||||||
(n: elemAt list (n + start))
|
(n: elemAt list (n + start))
|
||||||
@ -504,6 +583,10 @@ rec {
|
|||||||
|
|
||||||
/* Return the last element of a list.
|
/* Return the last element of a list.
|
||||||
|
|
||||||
|
This function throws an error if the list is empty.
|
||||||
|
|
||||||
|
Type: last :: [a] -> a
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
last [ 1 2 3 ]
|
last [ 1 2 3 ]
|
||||||
=> 3
|
=> 3
|
||||||
@ -512,7 +595,11 @@ rec {
|
|||||||
assert lib.assertMsg (list != []) "lists.last: list must not be empty!";
|
assert lib.assertMsg (list != []) "lists.last: list must not be empty!";
|
||||||
elemAt list (length list - 1);
|
elemAt list (length list - 1);
|
||||||
|
|
||||||
/* Return all elements but the last
|
/* Return all elements but the last.
|
||||||
|
|
||||||
|
This function throws an error if the list is empty.
|
||||||
|
|
||||||
|
Type: init :: [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
init [ 1 2 3 ]
|
init [ 1 2 3 ]
|
||||||
@ -523,7 +610,7 @@ rec {
|
|||||||
take (length list - 1) list;
|
take (length list - 1) list;
|
||||||
|
|
||||||
|
|
||||||
/* return the image of the cross product of some lists by a function
|
/* Return the image of the cross product of some lists by a function.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]]
|
crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]]
|
||||||
@ -534,8 +621,9 @@ rec {
|
|||||||
|
|
||||||
/* Remove duplicate elements from the list. O(n^2) complexity.
|
/* Remove duplicate elements from the list. O(n^2) complexity.
|
||||||
|
|
||||||
Example:
|
Type: unique :: [a] -> [a]
|
||||||
|
|
||||||
|
Example:
|
||||||
unique [ 3 2 3 4 ]
|
unique [ 3 2 3 4 ]
|
||||||
=> [ 3 2 4 ]
|
=> [ 3 2 4 ]
|
||||||
*/
|
*/
|
||||||
|
153
lib/options.nix
153
lib/options.nix
@ -8,61 +8,72 @@ with lib.strings;
|
|||||||
|
|
||||||
rec {
|
rec {
|
||||||
|
|
||||||
# Returns true when the given argument is an option
|
/* Returns true when the given argument is an option
|
||||||
#
|
|
||||||
# Examples:
|
Type: isOption :: a -> bool
|
||||||
# isOption 1 // => false
|
|
||||||
# isOption (mkOption {}) // => true
|
Example:
|
||||||
|
isOption 1 // => false
|
||||||
|
isOption (mkOption {}) // => true
|
||||||
|
*/
|
||||||
isOption = lib.isType "option";
|
isOption = lib.isType "option";
|
||||||
|
|
||||||
# Creates an Option attribute set. mkOption accepts an attribute set with the following keys:
|
/* Creates an Option attribute set. mkOption accepts an attribute set with the following keys:
|
||||||
#
|
|
||||||
# default: Default value used when no definition is given in the configuration.
|
All keys default to `null` when not given.
|
||||||
# defaultText: Textual representation of the default, for in the manual.
|
|
||||||
# example: Example value used in the manual.
|
Example:
|
||||||
# description: String describing the option.
|
mkOption { } // => { _type = "option"; }
|
||||||
# type: Option type, providing type-checking and value merging.
|
mkOption { defaultText = "foo"; } // => { _type = "option"; defaultText = "foo"; }
|
||||||
# apply: Function that converts the option value to something else.
|
*/
|
||||||
# internal: Whether the option is for NixOS developers only.
|
|
||||||
# visible: Whether the option shows up in the manual.
|
|
||||||
# readOnly: Whether the option can be set only once
|
|
||||||
# options: Obsolete, used by types.optionSet.
|
|
||||||
#
|
|
||||||
# All keys default to `null` when not given.
|
|
||||||
#
|
|
||||||
# Examples:
|
|
||||||
# mkOption { } // => { _type = "option"; }
|
|
||||||
# mkOption { defaultText = "foo"; } // => { _type = "option"; defaultText = "foo"; }
|
|
||||||
mkOption =
|
mkOption =
|
||||||
{ default ? null # Default value used when no definition is given in the configuration.
|
{
|
||||||
, defaultText ? null # Textual representation of the default, for in the manual.
|
# Default value used when no definition is given in the configuration.
|
||||||
, example ? null # Example value used in the manual.
|
default ? null,
|
||||||
, description ? null # String describing the option.
|
# Textual representation of the default, for the manual.
|
||||||
, relatedPackages ? null # Related packages used in the manual (see `genRelatedPackages` in ../nixos/doc/manual/default.nix).
|
defaultText ? null,
|
||||||
, type ? null # Option type, providing type-checking and value merging.
|
# Example value used in the manual.
|
||||||
, apply ? null # Function that converts the option value to something else.
|
example ? null,
|
||||||
, internal ? null # Whether the option is for NixOS developers only.
|
# String describing the option.
|
||||||
, visible ? null # Whether the option shows up in the manual.
|
description ? null,
|
||||||
, readOnly ? null # Whether the option can be set only once
|
# Related packages used in the manual (see `genRelatedPackages` in ../nixos/doc/manual/default.nix).
|
||||||
, options ? null # Obsolete, used by types.optionSet.
|
relatedPackages ? null,
|
||||||
|
# Option type, providing type-checking and value merging.
|
||||||
|
type ? null,
|
||||||
|
# Function that converts the option value to something else.
|
||||||
|
apply ? null,
|
||||||
|
# Whether the option is for NixOS developers only.
|
||||||
|
internal ? null,
|
||||||
|
# Whether the option shows up in the manual.
|
||||||
|
visible ? null,
|
||||||
|
# Whether the option can be set only once
|
||||||
|
readOnly ? null,
|
||||||
|
# Obsolete, used by types.optionSet.
|
||||||
|
options ? null
|
||||||
} @ attrs:
|
} @ attrs:
|
||||||
attrs // { _type = "option"; };
|
attrs // { _type = "option"; };
|
||||||
|
|
||||||
# Creates a Option attribute set for a boolean value option i.e an option to be toggled on or off:
|
/* Creates an Option attribute set for a boolean value option i.e an
|
||||||
#
|
option to be toggled on or off:
|
||||||
# Examples:
|
|
||||||
# mkEnableOption "foo" // => { _type = "option"; default = false; description = "Whether to enable foo."; example = true; type = { ... }; }
|
Example:
|
||||||
mkEnableOption = name: mkOption {
|
mkEnableOption "foo"
|
||||||
|
=> { _type = "option"; default = false; description = "Whether to enable foo."; example = true; type = { ... }; }
|
||||||
|
*/
|
||||||
|
mkEnableOption =
|
||||||
|
# Name for the created option
|
||||||
|
name: mkOption {
|
||||||
default = false;
|
default = false;
|
||||||
example = true;
|
example = true;
|
||||||
description = "Whether to enable ${name}.";
|
description = "Whether to enable ${name}.";
|
||||||
type = lib.types.bool;
|
type = lib.types.bool;
|
||||||
};
|
};
|
||||||
|
|
||||||
# This option accept anything, but it does not produce any result. This
|
/* This option accepts anything, but it does not produce any result.
|
||||||
# is useful for sharing a module across different module sets without
|
|
||||||
# having to implement similar features as long as the value of the options
|
This is useful for sharing a module across different module sets
|
||||||
# are not expected.
|
without having to implement similar features as long as the
|
||||||
|
values of the options are not accessed. */
|
||||||
mkSinkUndeclaredOptions = attrs: mkOption ({
|
mkSinkUndeclaredOptions = attrs: mkOption ({
|
||||||
internal = true;
|
internal = true;
|
||||||
visible = false;
|
visible = false;
|
||||||
@ -102,18 +113,24 @@ rec {
|
|||||||
else
|
else
|
||||||
val) (head defs).value defs;
|
val) (head defs).value defs;
|
||||||
|
|
||||||
# Extracts values of all "value" keys of the given list
|
/* Extracts values of all "value" keys of the given list.
|
||||||
#
|
|
||||||
# Examples:
|
Type: getValues :: [ { value :: a } ] -> [a]
|
||||||
# getValues [ { value = 1; } { value = 2; } ] // => [ 1 2 ]
|
|
||||||
# getValues [ ] // => [ ]
|
Example:
|
||||||
|
getValues [ { value = 1; } { value = 2; } ] // => [ 1 2 ]
|
||||||
|
getValues [ ] // => [ ]
|
||||||
|
*/
|
||||||
getValues = map (x: x.value);
|
getValues = map (x: x.value);
|
||||||
|
|
||||||
# Extracts values of all "file" keys of the given list
|
/* Extracts values of all "file" keys of the given list
|
||||||
#
|
|
||||||
# Examples:
|
Type: getFiles :: [ { file :: a } ] -> [a]
|
||||||
# getFiles [ { file = "file1"; } { file = "file2"; } ] // => [ "file1" "file2" ]
|
|
||||||
# getFiles [ ] // => [ ]
|
Example:
|
||||||
|
getFiles [ { file = "file1"; } { file = "file2"; } ] // => [ "file1" "file2" ]
|
||||||
|
getFiles [ ] // => [ ]
|
||||||
|
*/
|
||||||
getFiles = map (x: x.file);
|
getFiles = map (x: x.file);
|
||||||
|
|
||||||
# Generate documentation template from the list of option declaration like
|
# Generate documentation template from the list of option declaration like
|
||||||
@ -146,10 +163,13 @@ rec {
|
|||||||
|
|
||||||
|
|
||||||
/* This function recursively removes all derivation attributes from
|
/* This function recursively removes all derivation attributes from
|
||||||
`x' except for the `name' attribute. This is to make the
|
`x` except for the `name` attribute.
|
||||||
generation of `options.xml' much more efficient: the XML
|
|
||||||
representation of derivations is very large (on the order of
|
This is to make the generation of `options.xml` much more
|
||||||
megabytes) and is not actually used by the manual generator. */
|
efficient: the XML representation of derivations is very large
|
||||||
|
(on the order of megabytes) and is not actually used by the
|
||||||
|
manual generator.
|
||||||
|
*/
|
||||||
scrubOptionValue = x:
|
scrubOptionValue = x:
|
||||||
if isDerivation x then
|
if isDerivation x then
|
||||||
{ type = "derivation"; drvPath = x.name; outPath = x.name; name = x.name; }
|
{ type = "derivation"; drvPath = x.name; outPath = x.name; name = x.name; }
|
||||||
@ -158,20 +178,21 @@ rec {
|
|||||||
else x;
|
else x;
|
||||||
|
|
||||||
|
|
||||||
/* For use in the ‘example’ option attribute. It causes the given
|
/* For use in the `example` option attribute. It causes the given
|
||||||
text to be included verbatim in documentation. This is necessary
|
text to be included verbatim in documentation. This is necessary
|
||||||
for example values that are not simple values, e.g.,
|
for example values that are not simple values, e.g., functions.
|
||||||
functions. */
|
*/
|
||||||
literalExample = text: { _type = "literalExample"; inherit text; };
|
literalExample = text: { _type = "literalExample"; inherit text; };
|
||||||
|
|
||||||
|
# Helper functions.
|
||||||
|
|
||||||
/* Helper functions. */
|
/* Convert an option, described as a list of the option parts in to a
|
||||||
|
safe, human readable version.
|
||||||
|
|
||||||
# Convert an option, described as a list of the option parts in to a
|
Example:
|
||||||
# safe, human readable version. ie:
|
(showOption ["foo" "bar" "baz"]) == "foo.bar.baz"
|
||||||
#
|
(showOption ["foo" "bar.baz" "tux"]) == "foo.\"bar.baz\".tux"
|
||||||
# (showOption ["foo" "bar" "baz"]) == "foo.bar.baz"
|
*/
|
||||||
# (showOption ["foo" "bar.baz" "tux"]) == "foo.\"bar.baz\".tux"
|
|
||||||
showOption = parts: let
|
showOption = parts: let
|
||||||
escapeOptionPart = part:
|
escapeOptionPart = part:
|
||||||
let
|
let
|
||||||
|
213
lib/strings.nix
213
lib/strings.nix
@ -12,6 +12,8 @@ rec {
|
|||||||
|
|
||||||
/* Concatenate a list of strings.
|
/* Concatenate a list of strings.
|
||||||
|
|
||||||
|
Type: concatStrings :: [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatStrings ["foo" "bar"]
|
concatStrings ["foo" "bar"]
|
||||||
=> "foobar"
|
=> "foobar"
|
||||||
@ -20,15 +22,19 @@ rec {
|
|||||||
|
|
||||||
/* Map a function over a list and concatenate the resulting strings.
|
/* Map a function over a list and concatenate the resulting strings.
|
||||||
|
|
||||||
|
Type: concatMapStrings :: (a -> string) -> [a] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatMapStrings (x: "a" + x) ["foo" "bar"]
|
concatMapStrings (x: "a" + x) ["foo" "bar"]
|
||||||
=> "afooabar"
|
=> "afooabar"
|
||||||
*/
|
*/
|
||||||
concatMapStrings = f: list: concatStrings (map f list);
|
concatMapStrings = f: list: concatStrings (map f list);
|
||||||
|
|
||||||
/* Like `concatMapStrings' except that the f functions also gets the
|
/* Like `concatMapStrings` except that the f functions also gets the
|
||||||
position as a parameter.
|
position as a parameter.
|
||||||
|
|
||||||
|
Type: concatImapStrings :: (int -> a -> string) -> [a] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatImapStrings (pos: x: "${toString pos}-${x}") ["foo" "bar"]
|
concatImapStrings (pos: x: "${toString pos}-${x}") ["foo" "bar"]
|
||||||
=> "1-foo2-bar"
|
=> "1-foo2-bar"
|
||||||
@ -37,17 +43,25 @@ rec {
|
|||||||
|
|
||||||
/* Place an element between each element of a list
|
/* Place an element between each element of a list
|
||||||
|
|
||||||
|
Type: intersperse :: a -> [a] -> [a]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
intersperse "/" ["usr" "local" "bin"]
|
intersperse "/" ["usr" "local" "bin"]
|
||||||
=> ["usr" "/" "local" "/" "bin"].
|
=> ["usr" "/" "local" "/" "bin"].
|
||||||
*/
|
*/
|
||||||
intersperse = separator: list:
|
intersperse =
|
||||||
|
# Separator to add between elements
|
||||||
|
separator:
|
||||||
|
# Input list
|
||||||
|
list:
|
||||||
if list == [] || length list == 1
|
if list == [] || length list == 1
|
||||||
then list
|
then list
|
||||||
else tail (lib.concatMap (x: [separator x]) list);
|
else tail (lib.concatMap (x: [separator x]) list);
|
||||||
|
|
||||||
/* Concatenate a list of strings with a separator between each element
|
/* Concatenate a list of strings with a separator between each element
|
||||||
|
|
||||||
|
Type: concatStringsSep :: string -> [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatStringsSep "/" ["usr" "local" "bin"]
|
concatStringsSep "/" ["usr" "local" "bin"]
|
||||||
=> "usr/local/bin"
|
=> "usr/local/bin"
|
||||||
@ -55,43 +69,77 @@ rec {
|
|||||||
concatStringsSep = builtins.concatStringsSep or (separator: list:
|
concatStringsSep = builtins.concatStringsSep or (separator: list:
|
||||||
concatStrings (intersperse separator list));
|
concatStrings (intersperse separator list));
|
||||||
|
|
||||||
/* First maps over the list and then concatenates it.
|
/* Maps a function over a list of strings and then concatenates the
|
||||||
|
result with the specified separator interspersed between
|
||||||
|
elements.
|
||||||
|
|
||||||
|
Type: concatMapStringsSep :: string -> (string -> string) -> [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
concatMapStringsSep "-" (x: toUpper x) ["foo" "bar" "baz"]
|
concatMapStringsSep "-" (x: toUpper x) ["foo" "bar" "baz"]
|
||||||
=> "FOO-BAR-BAZ"
|
=> "FOO-BAR-BAZ"
|
||||||
*/
|
*/
|
||||||
concatMapStringsSep = sep: f: list: concatStringsSep sep (map f list);
|
concatMapStringsSep =
|
||||||
|
# Separator to add between elements
|
||||||
|
sep:
|
||||||
|
# Function to map over the list
|
||||||
|
f:
|
||||||
|
# List of input strings
|
||||||
|
list: concatStringsSep sep (map f list);
|
||||||
|
|
||||||
/* First imaps over the list and then concatenates it.
|
/* Same as `concatMapStringsSep`, but the mapping function
|
||||||
|
additionally receives the position of its argument.
|
||||||
|
|
||||||
|
Type: concatMapStringsSep :: string -> (int -> string -> string) -> [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ]
|
concatImapStringsSep "-" (pos: x: toString (x / pos)) [ 6 6 6 ]
|
||||||
=> "6-3-2"
|
=> "6-3-2"
|
||||||
*/
|
*/
|
||||||
concatImapStringsSep = sep: f: list: concatStringsSep sep (lib.imap1 f list);
|
concatImapStringsSep =
|
||||||
|
# Separator to add between elements
|
||||||
|
sep:
|
||||||
|
# Function that receives elements and their positions
|
||||||
|
f:
|
||||||
|
# List of input strings
|
||||||
|
list: concatStringsSep sep (lib.imap1 f list);
|
||||||
|
|
||||||
/* Construct a Unix-style search path consisting of each `subDir"
|
/* Construct a Unix-style, colon-separated search path consisting of
|
||||||
directory of the given list of packages.
|
the given `subDir` appended to each of the given paths.
|
||||||
|
|
||||||
|
Type: makeSearchPath :: string -> [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
makeSearchPath "bin" ["/root" "/usr" "/usr/local"]
|
makeSearchPath "bin" ["/root" "/usr" "/usr/local"]
|
||||||
=> "/root/bin:/usr/bin:/usr/local/bin"
|
=> "/root/bin:/usr/bin:/usr/local/bin"
|
||||||
makeSearchPath "bin" ["/"]
|
makeSearchPath "bin" [""]
|
||||||
=> "//bin"
|
=> "/bin"
|
||||||
*/
|
*/
|
||||||
makeSearchPath = subDir: packages:
|
makeSearchPath =
|
||||||
concatStringsSep ":" (map (path: path + "/" + subDir) (builtins.filter (x: x != null) packages));
|
# Directory name to append
|
||||||
|
subDir:
|
||||||
|
# List of base paths
|
||||||
|
paths:
|
||||||
|
concatStringsSep ":" (map (path: path + "/" + subDir) (builtins.filter (x: x != null) paths));
|
||||||
|
|
||||||
/* Construct a Unix-style search path, using given package output.
|
/* Construct a Unix-style search path by appending the given
|
||||||
If no output is found, fallback to `.out` and then to the default.
|
`subDir` to the specified `output` of each of the packages. If no
|
||||||
|
output by the given name is found, fallback to `.out` and then to
|
||||||
|
the default.
|
||||||
|
|
||||||
|
Type: string -> string -> [package] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
makeSearchPathOutput "dev" "bin" [ pkgs.openssl pkgs.zlib ]
|
makeSearchPathOutput "dev" "bin" [ pkgs.openssl pkgs.zlib ]
|
||||||
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev/bin:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/bin"
|
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev/bin:/nix/store/wwh7mhwh269sfjkm6k5665b5kgp7jrk2-zlib-1.2.8/bin"
|
||||||
*/
|
*/
|
||||||
makeSearchPathOutput = output: subDir: pkgs: makeSearchPath subDir (map (lib.getOutput output) pkgs);
|
makeSearchPathOutput =
|
||||||
|
# Package output to use
|
||||||
|
output:
|
||||||
|
# Directory name to append
|
||||||
|
subDir:
|
||||||
|
# List of packages
|
||||||
|
pkgs: makeSearchPath subDir (map (lib.getOutput output) pkgs);
|
||||||
|
|
||||||
/* Construct a library search path (such as RPATH) containing the
|
/* Construct a library search path (such as RPATH) containing the
|
||||||
libraries for a set of packages
|
libraries for a set of packages
|
||||||
@ -117,13 +165,12 @@ rec {
|
|||||||
|
|
||||||
/* Construct a perl search path (such as $PERL5LIB)
|
/* Construct a perl search path (such as $PERL5LIB)
|
||||||
|
|
||||||
FIXME(zimbatm): this should be moved in perl-specific code
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
pkgs = import <nixpkgs> { }
|
pkgs = import <nixpkgs> { }
|
||||||
makePerlPath [ pkgs.perlPackages.libnet ]
|
makePerlPath [ pkgs.perlPackages.libnet ]
|
||||||
=> "/nix/store/n0m1fk9c960d8wlrs62sncnadygqqc6y-perl-Net-SMTP-1.25/lib/perl5/site_perl"
|
=> "/nix/store/n0m1fk9c960d8wlrs62sncnadygqqc6y-perl-Net-SMTP-1.25/lib/perl5/site_perl"
|
||||||
*/
|
*/
|
||||||
|
# FIXME(zimbatm): this should be moved in perl-specific code
|
||||||
makePerlPath = makeSearchPathOutput "lib" "lib/perl5/site_perl";
|
makePerlPath = makeSearchPathOutput "lib" "lib/perl5/site_perl";
|
||||||
|
|
||||||
/* Construct a perl search path recursively including all dependencies (such as $PERL5LIB)
|
/* Construct a perl search path recursively including all dependencies (such as $PERL5LIB)
|
||||||
@ -138,34 +185,51 @@ rec {
|
|||||||
/* Depending on the boolean `cond', return either the given string
|
/* Depending on the boolean `cond', return either the given string
|
||||||
or the empty string. Useful to concatenate against a bigger string.
|
or the empty string. Useful to concatenate against a bigger string.
|
||||||
|
|
||||||
|
Type: optionalString :: bool -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
optionalString true "some-string"
|
optionalString true "some-string"
|
||||||
=> "some-string"
|
=> "some-string"
|
||||||
optionalString false "some-string"
|
optionalString false "some-string"
|
||||||
=> ""
|
=> ""
|
||||||
*/
|
*/
|
||||||
optionalString = cond: string: if cond then string else "";
|
optionalString =
|
||||||
|
# Condition
|
||||||
|
cond:
|
||||||
|
# String to return if condition is true
|
||||||
|
string: if cond then string else "";
|
||||||
|
|
||||||
/* Determine whether a string has given prefix.
|
/* Determine whether a string has given prefix.
|
||||||
|
|
||||||
|
Type: hasPrefix :: string -> string -> bool
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
hasPrefix "foo" "foobar"
|
hasPrefix "foo" "foobar"
|
||||||
=> true
|
=> true
|
||||||
hasPrefix "foo" "barfoo"
|
hasPrefix "foo" "barfoo"
|
||||||
=> false
|
=> false
|
||||||
*/
|
*/
|
||||||
hasPrefix = pref: str:
|
hasPrefix =
|
||||||
substring 0 (stringLength pref) str == pref;
|
# Prefix to check for
|
||||||
|
pref:
|
||||||
|
# Input string
|
||||||
|
str: substring 0 (stringLength pref) str == pref;
|
||||||
|
|
||||||
/* Determine whether a string has given suffix.
|
/* Determine whether a string has given suffix.
|
||||||
|
|
||||||
|
Type: hasSuffix :: string -> string -> bool
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
hasSuffix "foo" "foobar"
|
hasSuffix "foo" "foobar"
|
||||||
=> false
|
=> false
|
||||||
hasSuffix "foo" "barfoo"
|
hasSuffix "foo" "barfoo"
|
||||||
=> true
|
=> true
|
||||||
*/
|
*/
|
||||||
hasSuffix = suffix: content:
|
hasSuffix =
|
||||||
|
# Suffix to check for
|
||||||
|
suffix:
|
||||||
|
# Input string
|
||||||
|
content:
|
||||||
let
|
let
|
||||||
lenContent = stringLength content;
|
lenContent = stringLength content;
|
||||||
lenSuffix = stringLength suffix;
|
lenSuffix = stringLength suffix;
|
||||||
@ -180,6 +244,8 @@ rec {
|
|||||||
Also note that Nix treats strings as a list of bytes and thus doesn't
|
Also note that Nix treats strings as a list of bytes and thus doesn't
|
||||||
handle unicode.
|
handle unicode.
|
||||||
|
|
||||||
|
Type: stringtoCharacters :: string -> [string]
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
stringToCharacters ""
|
stringToCharacters ""
|
||||||
=> [ ]
|
=> [ ]
|
||||||
@ -194,18 +260,25 @@ rec {
|
|||||||
/* Manipulate a string character by character and replace them by
|
/* Manipulate a string character by character and replace them by
|
||||||
strings before concatenating the results.
|
strings before concatenating the results.
|
||||||
|
|
||||||
|
Type: stringAsChars :: (string -> string) -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
stringAsChars (x: if x == "a" then "i" else x) "nax"
|
stringAsChars (x: if x == "a" then "i" else x) "nax"
|
||||||
=> "nix"
|
=> "nix"
|
||||||
*/
|
*/
|
||||||
stringAsChars = f: s:
|
stringAsChars =
|
||||||
concatStrings (
|
# Function to map over each individual character
|
||||||
|
f:
|
||||||
|
# Input string
|
||||||
|
s: concatStrings (
|
||||||
map f (stringToCharacters s)
|
map f (stringToCharacters s)
|
||||||
);
|
);
|
||||||
|
|
||||||
/* Escape occurrence of the elements of ‘list’ in ‘string’ by
|
/* Escape occurrence of the elements of `list` in `string` by
|
||||||
prefixing it with a backslash.
|
prefixing it with a backslash.
|
||||||
|
|
||||||
|
Type: escape :: [string] -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
escape ["(" ")"] "(foo)"
|
escape ["(" ")"] "(foo)"
|
||||||
=> "\\(foo\\)"
|
=> "\\(foo\\)"
|
||||||
@ -214,6 +287,8 @@ rec {
|
|||||||
|
|
||||||
/* Quote string to be used safely within the Bourne shell.
|
/* Quote string to be used safely within the Bourne shell.
|
||||||
|
|
||||||
|
Type: escapeShellArg :: string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
escapeShellArg "esc'ape\nme"
|
escapeShellArg "esc'ape\nme"
|
||||||
=> "'esc'\\''ape\nme'"
|
=> "'esc'\\''ape\nme'"
|
||||||
@ -222,6 +297,8 @@ rec {
|
|||||||
|
|
||||||
/* Quote all arguments to be safely passed to the Bourne shell.
|
/* Quote all arguments to be safely passed to the Bourne shell.
|
||||||
|
|
||||||
|
Type: escapeShellArgs :: [string] -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
escapeShellArgs ["one" "two three" "four'five"]
|
escapeShellArgs ["one" "two three" "four'five"]
|
||||||
=> "'one' 'two three' 'four'\\''five'"
|
=> "'one' 'two three' 'four'\\''five'"
|
||||||
@ -230,13 +307,15 @@ rec {
|
|||||||
|
|
||||||
/* Turn a string into a Nix expression representing that string
|
/* Turn a string into a Nix expression representing that string
|
||||||
|
|
||||||
|
Type: string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
escapeNixString "hello\${}\n"
|
escapeNixString "hello\${}\n"
|
||||||
=> "\"hello\\\${}\\n\""
|
=> "\"hello\\\${}\\n\""
|
||||||
*/
|
*/
|
||||||
escapeNixString = s: escape ["$"] (builtins.toJSON s);
|
escapeNixString = s: escape ["$"] (builtins.toJSON s);
|
||||||
|
|
||||||
/* Obsolete - use replaceStrings instead. */
|
# Obsolete - use replaceStrings instead.
|
||||||
replaceChars = builtins.replaceStrings or (
|
replaceChars = builtins.replaceStrings or (
|
||||||
del: new: s:
|
del: new: s:
|
||||||
let
|
let
|
||||||
@ -256,6 +335,8 @@ rec {
|
|||||||
|
|
||||||
/* Converts an ASCII string to lower-case.
|
/* Converts an ASCII string to lower-case.
|
||||||
|
|
||||||
|
Type: toLower :: string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
toLower "HOME"
|
toLower "HOME"
|
||||||
=> "home"
|
=> "home"
|
||||||
@ -264,6 +345,8 @@ rec {
|
|||||||
|
|
||||||
/* Converts an ASCII string to upper-case.
|
/* Converts an ASCII string to upper-case.
|
||||||
|
|
||||||
|
Type: toUpper :: string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
toUpper "home"
|
toUpper "home"
|
||||||
=> "HOME"
|
=> "HOME"
|
||||||
@ -273,7 +356,7 @@ rec {
|
|||||||
/* Appends string context from another string. This is an implementation
|
/* Appends string context from another string. This is an implementation
|
||||||
detail of Nix.
|
detail of Nix.
|
||||||
|
|
||||||
Strings in Nix carry an invisible `context' which is a list of strings
|
Strings in Nix carry an invisible `context` which is a list of strings
|
||||||
representing store paths. If the string is later used in a derivation
|
representing store paths. If the string is later used in a derivation
|
||||||
attribute, the derivation will properly populate the inputDrvs and
|
attribute, the derivation will properly populate the inputDrvs and
|
||||||
inputSrcs.
|
inputSrcs.
|
||||||
@ -319,8 +402,9 @@ rec {
|
|||||||
in
|
in
|
||||||
recurse 0 0;
|
recurse 0 0;
|
||||||
|
|
||||||
/* Return the suffix of the second argument if the first argument matches
|
/* Return a string without the specified prefix, if the prefix matches.
|
||||||
its prefix.
|
|
||||||
|
Type: string -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
removePrefix "foo." "foo.bar.baz"
|
removePrefix "foo." "foo.bar.baz"
|
||||||
@ -328,18 +412,23 @@ rec {
|
|||||||
removePrefix "xxx" "foo.bar.baz"
|
removePrefix "xxx" "foo.bar.baz"
|
||||||
=> "foo.bar.baz"
|
=> "foo.bar.baz"
|
||||||
*/
|
*/
|
||||||
removePrefix = pre: s:
|
removePrefix =
|
||||||
|
# Prefix to remove if it matches
|
||||||
|
prefix:
|
||||||
|
# Input string
|
||||||
|
str:
|
||||||
let
|
let
|
||||||
preLen = stringLength pre;
|
preLen = stringLength prefix;
|
||||||
sLen = stringLength s;
|
sLen = stringLength str;
|
||||||
in
|
in
|
||||||
if hasPrefix pre s then
|
if hasPrefix prefix str then
|
||||||
substring preLen (sLen - preLen) s
|
substring preLen (sLen - preLen) str
|
||||||
else
|
else
|
||||||
s;
|
str;
|
||||||
|
|
||||||
/* Return the prefix of the second argument if the first argument matches
|
/* Return a string without the specified suffix, if the suffix matches.
|
||||||
its suffix.
|
|
||||||
|
Type: string -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
removeSuffix "front" "homefront"
|
removeSuffix "front" "homefront"
|
||||||
@ -347,17 +436,21 @@ rec {
|
|||||||
removeSuffix "xxx" "homefront"
|
removeSuffix "xxx" "homefront"
|
||||||
=> "homefront"
|
=> "homefront"
|
||||||
*/
|
*/
|
||||||
removeSuffix = suf: s:
|
removeSuffix =
|
||||||
|
# Suffix to remove if it matches
|
||||||
|
suffix:
|
||||||
|
# Input string
|
||||||
|
str:
|
||||||
let
|
let
|
||||||
sufLen = stringLength suf;
|
sufLen = stringLength suffix;
|
||||||
sLen = stringLength s;
|
sLen = stringLength str;
|
||||||
in
|
in
|
||||||
if sufLen <= sLen && suf == substring (sLen - sufLen) sufLen s then
|
if sufLen <= sLen && suffix == substring (sLen - sufLen) sufLen str then
|
||||||
substring 0 (sLen - sufLen) s
|
substring 0 (sLen - sufLen) str
|
||||||
else
|
else
|
||||||
s;
|
str;
|
||||||
|
|
||||||
/* Return true iff string v1 denotes a version older than v2.
|
/* Return true if string v1 denotes a version older than v2.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
versionOlder "1.1" "1.2"
|
versionOlder "1.1" "1.2"
|
||||||
@ -367,7 +460,7 @@ rec {
|
|||||||
*/
|
*/
|
||||||
versionOlder = v1: v2: builtins.compareVersions v2 v1 == 1;
|
versionOlder = v1: v2: builtins.compareVersions v2 v1 == 1;
|
||||||
|
|
||||||
/* Return true iff string v1 denotes a version equal to or newer than v2.
|
/* Return true if string v1 denotes a version equal to or newer than v2.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
versionAtLeast "1.1" "1.0"
|
versionAtLeast "1.1" "1.0"
|
||||||
@ -459,6 +552,11 @@ rec {
|
|||||||
/* Create a fixed width string with additional prefix to match
|
/* Create a fixed width string with additional prefix to match
|
||||||
required width.
|
required width.
|
||||||
|
|
||||||
|
This function will fail if the input string is longer than the
|
||||||
|
requested length.
|
||||||
|
|
||||||
|
Type: fixedWidthString :: int -> string -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
fixedWidthString 5 "0" (toString 15)
|
fixedWidthString 5 "0" (toString 15)
|
||||||
=> "00015"
|
=> "00015"
|
||||||
@ -502,12 +600,16 @@ rec {
|
|||||||
=> false
|
=> false
|
||||||
*/
|
*/
|
||||||
isStorePath = x:
|
isStorePath = x:
|
||||||
isCoercibleToString x
|
if isCoercibleToString x then
|
||||||
&& builtins.substring 0 1 (toString x) == "/"
|
let str = toString x; in
|
||||||
&& dirOf x == builtins.storeDir;
|
builtins.substring 0 1 str == "/"
|
||||||
|
&& dirOf str == builtins.storeDir
|
||||||
|
else
|
||||||
|
false;
|
||||||
|
|
||||||
/* Convert string to int
|
/* Parse a string string as an int.
|
||||||
Obviously, it is a bit hacky to use fromJSON that way.
|
|
||||||
|
Type: string -> int
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
toInt "1337"
|
toInt "1337"
|
||||||
@ -517,17 +619,18 @@ rec {
|
|||||||
toInt "3.14"
|
toInt "3.14"
|
||||||
=> error: floating point JSON numbers are not supported
|
=> error: floating point JSON numbers are not supported
|
||||||
*/
|
*/
|
||||||
|
# Obviously, it is a bit hacky to use fromJSON this way.
|
||||||
toInt = str:
|
toInt = str:
|
||||||
let may_be_int = builtins.fromJSON str; in
|
let may_be_int = builtins.fromJSON str; in
|
||||||
if builtins.isInt may_be_int
|
if builtins.isInt may_be_int
|
||||||
then may_be_int
|
then may_be_int
|
||||||
else throw "Could not convert ${str} to int.";
|
else throw "Could not convert ${str} to int.";
|
||||||
|
|
||||||
/* Read a list of paths from `file', relative to the `rootPath'. Lines
|
/* Read a list of paths from `file`, relative to the `rootPath`.
|
||||||
beginning with `#' are treated as comments and ignored. Whitespace
|
Lines beginning with `#` are treated as comments and ignored.
|
||||||
is significant.
|
Whitespace is significant.
|
||||||
|
|
||||||
NOTE: this function is not performant and should be avoided
|
NOTE: This function is not performant and should be avoided.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
readPathsFromFile /prefix
|
readPathsFromFile /prefix
|
||||||
@ -549,6 +652,8 @@ rec {
|
|||||||
|
|
||||||
/* Read the contents of a file removing the trailing \n
|
/* Read the contents of a file removing the trailing \n
|
||||||
|
|
||||||
|
Type: fileContents :: path -> string
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
$ echo "1.0" > ./version
|
$ echo "1.0" > ./version
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@ rec {
|
|||||||
else if final.isUClibc then "uclibc"
|
else if final.isUClibc then "uclibc"
|
||||||
else if final.isAndroid then "bionic"
|
else if final.isAndroid then "bionic"
|
||||||
else if final.isLinux /* default */ then "glibc"
|
else if final.isLinux /* default */ then "glibc"
|
||||||
|
else if final.isAvr then "avrlibc"
|
||||||
# TODO(@Ericson2314) think more about other operating systems
|
# TODO(@Ericson2314) think more about other operating systems
|
||||||
else "native/impure";
|
else "native/impure";
|
||||||
extensions = {
|
extensions = {
|
||||||
|
@ -99,6 +99,34 @@ rec {
|
|||||||
riscv64 = riscv "64";
|
riscv64 = riscv "64";
|
||||||
riscv32 = riscv "32";
|
riscv32 = riscv "32";
|
||||||
|
|
||||||
|
avr = {
|
||||||
|
config = "avr";
|
||||||
|
};
|
||||||
|
|
||||||
|
arm-embedded = {
|
||||||
|
config = "arm-none-eabi";
|
||||||
|
libc = "newlib";
|
||||||
|
};
|
||||||
|
|
||||||
|
aarch64-embedded = {
|
||||||
|
config = "aarch64-none-elf";
|
||||||
|
libc = "newlib";
|
||||||
|
};
|
||||||
|
|
||||||
|
ppc-embedded = {
|
||||||
|
config = "powerpc-none-eabi";
|
||||||
|
libc = "newlib";
|
||||||
|
};
|
||||||
|
|
||||||
|
i686-embedded = {
|
||||||
|
config = "i686-elf";
|
||||||
|
libc = "newlib";
|
||||||
|
};
|
||||||
|
|
||||||
|
x86_64-embedded = {
|
||||||
|
config = "x86_64-elf";
|
||||||
|
libc = "newlib";
|
||||||
|
};
|
||||||
|
|
||||||
#
|
#
|
||||||
# Darwin
|
# Darwin
|
||||||
|
@ -19,6 +19,7 @@ rec {
|
|||||||
isRiscV = { cpu = { family = "riscv"; }; };
|
isRiscV = { cpu = { family = "riscv"; }; };
|
||||||
isSparc = { cpu = { family = "sparc"; }; };
|
isSparc = { cpu = { family = "sparc"; }; };
|
||||||
isWasm = { cpu = { family = "wasm"; }; };
|
isWasm = { cpu = { family = "wasm"; }; };
|
||||||
|
isAvr = { cpu = { family = "avr"; }; };
|
||||||
|
|
||||||
is32bit = { cpu = { bits = 32; }; };
|
is32bit = { cpu = { bits = 32; }; };
|
||||||
is64bit = { cpu = { bits = 64; }; };
|
is64bit = { cpu = { bits = 64; }; };
|
||||||
|
@ -101,6 +101,8 @@ rec {
|
|||||||
|
|
||||||
wasm32 = { bits = 32; significantByte = littleEndian; family = "wasm"; };
|
wasm32 = { bits = 32; significantByte = littleEndian; family = "wasm"; };
|
||||||
wasm64 = { bits = 64; significantByte = littleEndian; family = "wasm"; };
|
wasm64 = { bits = 64; significantByte = littleEndian; family = "wasm"; };
|
||||||
|
|
||||||
|
avr = { bits = 8; family = "avr"; };
|
||||||
};
|
};
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
@ -117,6 +119,7 @@ rec {
|
|||||||
apple = {};
|
apple = {};
|
||||||
pc = {};
|
pc = {};
|
||||||
|
|
||||||
|
none = {};
|
||||||
unknown = {};
|
unknown = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -200,6 +203,7 @@ rec {
|
|||||||
cygnus = {};
|
cygnus = {};
|
||||||
msvc = {};
|
msvc = {};
|
||||||
eabi = {};
|
eabi = {};
|
||||||
|
elf = {};
|
||||||
|
|
||||||
androideabi = {};
|
androideabi = {};
|
||||||
android = {
|
android = {
|
||||||
@ -255,9 +259,16 @@ rec {
|
|||||||
setType "system" components;
|
setType "system" components;
|
||||||
|
|
||||||
mkSkeletonFromList = l: {
|
mkSkeletonFromList = l: {
|
||||||
|
"1" = if elemAt l 0 == "avr"
|
||||||
|
then { cpu = elemAt l 0; kernel = "none"; abi = "unknown"; }
|
||||||
|
else throw "Target specification with 1 components is ambiguous";
|
||||||
"2" = # We only do 2-part hacks for things Nix already supports
|
"2" = # We only do 2-part hacks for things Nix already supports
|
||||||
if elemAt l 1 == "cygwin"
|
if elemAt l 1 == "cygwin"
|
||||||
then { cpu = elemAt l 0; kernel = "windows"; abi = "cygnus"; }
|
then { cpu = elemAt l 0; kernel = "windows"; abi = "cygnus"; }
|
||||||
|
else if (elemAt l 1 == "eabi")
|
||||||
|
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||||
|
else if (elemAt l 1 == "elf")
|
||||||
|
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||||
else { cpu = elemAt l 0; kernel = elemAt l 1; };
|
else { cpu = elemAt l 0; kernel = elemAt l 1; };
|
||||||
"3" = # Awkwards hacks, beware!
|
"3" = # Awkwards hacks, beware!
|
||||||
if elemAt l 1 == "apple"
|
if elemAt l 1 == "apple"
|
||||||
@ -268,6 +279,10 @@ rec {
|
|||||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; abi = "gnu"; }
|
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; abi = "gnu"; }
|
||||||
else if hasPrefix "netbsd" (elemAt l 2)
|
else if hasPrefix "netbsd" (elemAt l 2)
|
||||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
|
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
|
||||||
|
else if (elemAt l 2 == "eabi")
|
||||||
|
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||||
|
else if (elemAt l 2 == "elf")
|
||||||
|
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||||
else throw "Target specification with 3 components is ambiguous";
|
else throw "Target specification with 3 components is ambiguous";
|
||||||
"4" = { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; abi = elemAt l 3; };
|
"4" = { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; abi = elemAt l 3; };
|
||||||
}.${toString (length l)}
|
}.${toString (length l)}
|
||||||
|
@ -471,6 +471,7 @@ rec {
|
|||||||
"x86_64-linux" = pc64;
|
"x86_64-linux" = pc64;
|
||||||
"armv5tel-linux" = sheevaplug;
|
"armv5tel-linux" = sheevaplug;
|
||||||
"armv6l-linux" = raspberrypi;
|
"armv6l-linux" = raspberrypi;
|
||||||
|
"armv7a-linux" = armv7l-hf-multiplatform;
|
||||||
"armv7l-linux" = armv7l-hf-multiplatform;
|
"armv7l-linux" = armv7l-hf-multiplatform;
|
||||||
"aarch64-linux" = aarch64-multiplatform;
|
"aarch64-linux" = aarch64-multiplatform;
|
||||||
"mipsel-linux" = fuloong2f_n32;
|
"mipsel-linux" = fuloong2f_n32;
|
||||||
|
@ -112,7 +112,7 @@ runTests {
|
|||||||
storePathAppendix = isStorePath
|
storePathAppendix = isStorePath
|
||||||
"${goodPath}/bin/python";
|
"${goodPath}/bin/python";
|
||||||
nonAbsolute = isStorePath (concatStrings (tail (stringToCharacters goodPath)));
|
nonAbsolute = isStorePath (concatStrings (tail (stringToCharacters goodPath)));
|
||||||
asPath = isStorePath goodPath;
|
asPath = isStorePath (/. + goodPath);
|
||||||
otherPath = isStorePath "/something/else";
|
otherPath = isStorePath "/something/else";
|
||||||
otherVals = {
|
otherVals = {
|
||||||
attrset = isStorePath {};
|
attrset = isStorePath {};
|
||||||
|
117
lib/trivial.nix
117
lib/trivial.nix
@ -9,23 +9,37 @@ rec {
|
|||||||
|
|
||||||
Type: id :: a -> a
|
Type: id :: a -> a
|
||||||
*/
|
*/
|
||||||
id = x: x;
|
id =
|
||||||
|
# The value to return
|
||||||
|
x: x;
|
||||||
|
|
||||||
/* The constant function
|
/* The constant function
|
||||||
Ignores the second argument.
|
|
||||||
Or: Construct a function that always returns a static value.
|
Ignores the second argument. If called with only one argument,
|
||||||
|
constructs a function that always returns a static value.
|
||||||
|
|
||||||
Type: const :: a -> b -> a
|
Type: const :: a -> b -> a
|
||||||
Example:
|
Example:
|
||||||
let f = const 5; in f 10
|
let f = const 5; in f 10
|
||||||
=> 5
|
=> 5
|
||||||
*/
|
*/
|
||||||
const = x: y: x;
|
const =
|
||||||
|
# Value to return
|
||||||
|
x:
|
||||||
|
# Value to ignore
|
||||||
|
y: x;
|
||||||
|
|
||||||
|
|
||||||
## Named versions corresponding to some builtin operators.
|
## Named versions corresponding to some builtin operators.
|
||||||
|
|
||||||
/* Concatenate two lists */
|
/* Concatenate two lists
|
||||||
|
|
||||||
|
Type: concat :: [a] -> [a] -> [a]
|
||||||
|
|
||||||
|
Example:
|
||||||
|
concat [ 1 2 ] [ 3 4 ]
|
||||||
|
=> [ 1 2 3 4 ]
|
||||||
|
*/
|
||||||
concat = x: y: x ++ y;
|
concat = x: y: x ++ y;
|
||||||
|
|
||||||
/* boolean “or” */
|
/* boolean “or” */
|
||||||
@ -53,27 +67,40 @@ rec {
|
|||||||
bitNot = builtins.sub (-1);
|
bitNot = builtins.sub (-1);
|
||||||
|
|
||||||
/* Convert a boolean to a string.
|
/* Convert a boolean to a string.
|
||||||
Note that toString on a bool returns "1" and "".
|
|
||||||
|
This function uses the strings "true" and "false" to represent
|
||||||
|
boolean values. Calling `toString` on a bool instead returns "1"
|
||||||
|
and "" (sic!).
|
||||||
|
|
||||||
|
Type: boolToString :: bool -> string
|
||||||
*/
|
*/
|
||||||
boolToString = b: if b then "true" else "false";
|
boolToString = b: if b then "true" else "false";
|
||||||
|
|
||||||
/* Merge two attribute sets shallowly, right side trumps left
|
/* Merge two attribute sets shallowly, right side trumps left
|
||||||
|
|
||||||
|
mergeAttrs :: attrs -> attrs -> attrs
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
mergeAttrs { a = 1; b = 2; } { b = 3; c = 4; }
|
mergeAttrs { a = 1; b = 2; } { b = 3; c = 4; }
|
||||||
=> { a = 1; b = 3; c = 4; }
|
=> { a = 1; b = 3; c = 4; }
|
||||||
*/
|
*/
|
||||||
mergeAttrs = x: y: x // y;
|
mergeAttrs =
|
||||||
|
# Left attribute set
|
||||||
|
x:
|
||||||
|
# Right attribute set (higher precedence for equal keys)
|
||||||
|
y: x // y;
|
||||||
|
|
||||||
/* Flip the order of the arguments of a binary function.
|
/* Flip the order of the arguments of a binary function.
|
||||||
|
|
||||||
|
Type: flip :: (a -> b -> c) -> (b -> a -> c)
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
flip concat [1] [2]
|
flip concat [1] [2]
|
||||||
=> [ 2 1 ]
|
=> [ 2 1 ]
|
||||||
*/
|
*/
|
||||||
flip = f: a: b: f b a;
|
flip = f: a: b: f b a;
|
||||||
|
|
||||||
/* Apply function if argument is non-null.
|
/* Apply function if the supplied argument is non-null.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
mapNullable (x: x+1) null
|
mapNullable (x: x+1) null
|
||||||
@ -81,7 +108,11 @@ rec {
|
|||||||
mapNullable (x: x+1) 22
|
mapNullable (x: x+1) 22
|
||||||
=> 23
|
=> 23
|
||||||
*/
|
*/
|
||||||
mapNullable = f: a: if isNull a then a else f a;
|
mapNullable =
|
||||||
|
# Function to call
|
||||||
|
f:
|
||||||
|
# Argument to check for null before passing it to `f`
|
||||||
|
a: if isNull a then a else f a;
|
||||||
|
|
||||||
# Pull in some builtins not included elsewhere.
|
# Pull in some builtins not included elsewhere.
|
||||||
inherit (builtins)
|
inherit (builtins)
|
||||||
@ -92,21 +123,27 @@ rec {
|
|||||||
|
|
||||||
## nixpks version strings
|
## nixpks version strings
|
||||||
|
|
||||||
# The current full nixpkgs version number.
|
/* Returns the current full nixpkgs version number. */
|
||||||
version = release + versionSuffix;
|
version = release + versionSuffix;
|
||||||
|
|
||||||
# The current nixpkgs version number as string.
|
/* Returns the current nixpkgs release number as string. */
|
||||||
release = lib.strings.fileContents ../.version;
|
release = lib.strings.fileContents ../.version;
|
||||||
|
|
||||||
# The current nixpkgs version suffix as string.
|
/* Returns the current nixpkgs version suffix as string. */
|
||||||
versionSuffix =
|
versionSuffix =
|
||||||
let suffixFile = ../.version-suffix;
|
let suffixFile = ../.version-suffix;
|
||||||
in if pathExists suffixFile
|
in if pathExists suffixFile
|
||||||
then lib.strings.fileContents suffixFile
|
then lib.strings.fileContents suffixFile
|
||||||
else "pre-git";
|
else "pre-git";
|
||||||
|
|
||||||
# Attempt to get the revision nixpkgs is from
|
/* Attempts to return the the current revision of nixpkgs and
|
||||||
revisionWithDefault = default:
|
returns the supplied default value otherwise.
|
||||||
|
|
||||||
|
Type: revisionWithDefault :: string -> string
|
||||||
|
*/
|
||||||
|
revisionWithDefault =
|
||||||
|
# Default value to return if revision can not be determined
|
||||||
|
default:
|
||||||
let
|
let
|
||||||
revisionFile = "${toString ./..}/.git-revision";
|
revisionFile = "${toString ./..}/.git-revision";
|
||||||
gitRepo = "${toString ./..}/.git";
|
gitRepo = "${toString ./..}/.git";
|
||||||
@ -117,14 +154,20 @@ rec {
|
|||||||
|
|
||||||
nixpkgsVersion = builtins.trace "`lib.nixpkgsVersion` is deprecated, use `lib.version` instead!" version;
|
nixpkgsVersion = builtins.trace "`lib.nixpkgsVersion` is deprecated, use `lib.version` instead!" version;
|
||||||
|
|
||||||
# Whether we're being called by nix-shell.
|
/* Determine whether the function is being called from inside a Nix
|
||||||
|
shell.
|
||||||
|
|
||||||
|
Type: inNixShell :: bool
|
||||||
|
*/
|
||||||
inNixShell = builtins.getEnv "IN_NIX_SHELL" != "";
|
inNixShell = builtins.getEnv "IN_NIX_SHELL" != "";
|
||||||
|
|
||||||
|
|
||||||
## Integer operations
|
## Integer operations
|
||||||
|
|
||||||
# Return minimum/maximum of two numbers.
|
/* Return minimum of two numbers. */
|
||||||
min = x: y: if x < y then x else y;
|
min = x: y: if x < y then x else y;
|
||||||
|
|
||||||
|
/* Return maximum of two numbers. */
|
||||||
max = x: y: if x > y then x else y;
|
max = x: y: if x > y then x else y;
|
||||||
|
|
||||||
/* Integer modulus
|
/* Integer modulus
|
||||||
@ -158,8 +201,9 @@ rec {
|
|||||||
second subtype, compare elements of a single subtype with `yes`
|
second subtype, compare elements of a single subtype with `yes`
|
||||||
and `no` respectively.
|
and `no` respectively.
|
||||||
|
|
||||||
Example:
|
Type: (a -> bool) -> (a -> a -> int) -> (a -> a -> int) -> (a -> a -> int)
|
||||||
|
|
||||||
|
Example:
|
||||||
let cmp = splitByAndCompare (hasPrefix "foo") compare compare; in
|
let cmp = splitByAndCompare (hasPrefix "foo") compare compare; in
|
||||||
|
|
||||||
cmp "a" "z" => -1
|
cmp "a" "z" => -1
|
||||||
@ -170,31 +214,44 @@ rec {
|
|||||||
# while
|
# while
|
||||||
compare "fooa" "a" => 1
|
compare "fooa" "a" => 1
|
||||||
*/
|
*/
|
||||||
splitByAndCompare = p: yes: no: a: b:
|
splitByAndCompare =
|
||||||
|
# Predicate
|
||||||
|
p:
|
||||||
|
# Comparison function if predicate holds for both values
|
||||||
|
yes:
|
||||||
|
# Comparison function if predicate holds for neither value
|
||||||
|
no:
|
||||||
|
# First value to compare
|
||||||
|
a:
|
||||||
|
# Second value to compare
|
||||||
|
b:
|
||||||
if p a
|
if p a
|
||||||
then if p b then yes a b else -1
|
then if p b then yes a b else -1
|
||||||
else if p b then 1 else no a b;
|
else if p b then 1 else no a b;
|
||||||
|
|
||||||
|
|
||||||
/* Reads a JSON file. */
|
/* Reads a JSON file.
|
||||||
|
|
||||||
|
Type :: path -> any
|
||||||
|
*/
|
||||||
importJSON = path:
|
importJSON = path:
|
||||||
builtins.fromJSON (builtins.readFile path);
|
builtins.fromJSON (builtins.readFile path);
|
||||||
|
|
||||||
|
|
||||||
## Warnings
|
## Warnings
|
||||||
|
|
||||||
/* See https://github.com/NixOS/nix/issues/749. Eventually we'd like these
|
# See https://github.com/NixOS/nix/issues/749. Eventually we'd like these
|
||||||
to expand to Nix builtins that carry metadata so that Nix can filter out
|
# to expand to Nix builtins that carry metadata so that Nix can filter out
|
||||||
the INFO messages without parsing the message string.
|
# the INFO messages without parsing the message string.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# {
|
||||||
|
# foo = lib.warn "foo is deprecated" oldFoo;
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# TODO: figure out a clever way to integrate location information from
|
||||||
|
# something like __unsafeGetAttrPos.
|
||||||
|
|
||||||
Usage:
|
|
||||||
{
|
|
||||||
foo = lib.warn "foo is deprecated" oldFoo;
|
|
||||||
}
|
|
||||||
|
|
||||||
TODO: figure out a clever way to integrate location information from
|
|
||||||
something like __unsafeGetAttrPos.
|
|
||||||
*/
|
|
||||||
warn = msg: builtins.trace "WARNING: ${msg}";
|
warn = msg: builtins.trace "WARNING: ${msg}";
|
||||||
info = msg: builtins.trace "INFO: ${msg}";
|
info = msg: builtins.trace "INFO: ${msg}";
|
||||||
|
|
||||||
|
@ -143,6 +143,11 @@
|
|||||||
github = "ahmedtd";
|
github = "ahmedtd";
|
||||||
name = "Taahir Ahmed";
|
name = "Taahir Ahmed";
|
||||||
};
|
};
|
||||||
|
ahuzik = {
|
||||||
|
email = "ales.guzik@gmail.com";
|
||||||
|
github = "alesguzik";
|
||||||
|
name = "Ales Huzik";
|
||||||
|
};
|
||||||
aij = {
|
aij = {
|
||||||
email = "aij+git@mrph.org";
|
email = "aij+git@mrph.org";
|
||||||
github = "aij";
|
github = "aij";
|
||||||
@ -401,6 +406,11 @@
|
|||||||
github = "AveryLychee";
|
github = "AveryLychee";
|
||||||
name = "Avery Lychee";
|
name = "Avery Lychee";
|
||||||
};
|
};
|
||||||
|
averelld = {
|
||||||
|
email = "averell+nixos@rxd4.com";
|
||||||
|
github = "averelld";
|
||||||
|
name = "averelld";
|
||||||
|
};
|
||||||
avnik = {
|
avnik = {
|
||||||
email = "avn@avnik.info";
|
email = "avn@avnik.info";
|
||||||
github = "avnik";
|
github = "avnik";
|
||||||
@ -495,6 +505,11 @@
|
|||||||
github = "bennofs";
|
github = "bennofs";
|
||||||
name = "Benno Fünfstück";
|
name = "Benno Fünfstück";
|
||||||
};
|
};
|
||||||
|
benpye = {
|
||||||
|
email = "ben@curlybracket.co.uk";
|
||||||
|
github = "benpye";
|
||||||
|
name = "Ben Pye";
|
||||||
|
};
|
||||||
benwbooth = {
|
benwbooth = {
|
||||||
email = "benwbooth@gmail.com";
|
email = "benwbooth@gmail.com";
|
||||||
github = "benwbooth";
|
github = "benwbooth";
|
||||||
@ -2967,6 +2982,11 @@
|
|||||||
github = "nequissimus";
|
github = "nequissimus";
|
||||||
name = "Tim Steinbach";
|
name = "Tim Steinbach";
|
||||||
};
|
};
|
||||||
|
nikitavoloboev = {
|
||||||
|
email = "nikita.voloboev@gmail.com";
|
||||||
|
github = "nikitavoloboev";
|
||||||
|
name = "Nikita Voloboev";
|
||||||
|
};
|
||||||
nfjinjing = {
|
nfjinjing = {
|
||||||
email = "nfjinjing@gmail.com";
|
email = "nfjinjing@gmail.com";
|
||||||
github = "nfjinjing";
|
github = "nfjinjing";
|
||||||
@ -3895,6 +3915,11 @@
|
|||||||
github = "sjagoe";
|
github = "sjagoe";
|
||||||
name = "Simon Jagoe";
|
name = "Simon Jagoe";
|
||||||
};
|
};
|
||||||
|
sjau = {
|
||||||
|
email = "nixos@sjau.ch";
|
||||||
|
github = "sjau";
|
||||||
|
name = "Stephan Jau";
|
||||||
|
};
|
||||||
sjmackenzie = {
|
sjmackenzie = {
|
||||||
email = "setori88@gmail.com";
|
email = "setori88@gmail.com";
|
||||||
github = "sjmackenzie";
|
github = "sjmackenzie";
|
||||||
@ -4143,6 +4168,11 @@
|
|||||||
github = "taku0";
|
github = "taku0";
|
||||||
name = "Takuo Yonezawa";
|
name = "Takuo Yonezawa";
|
||||||
};
|
};
|
||||||
|
talyz = {
|
||||||
|
email = "kim.lindberger@gmail.com";
|
||||||
|
github = "talyz";
|
||||||
|
name = "Kim Lindberger";
|
||||||
|
};
|
||||||
tari = {
|
tari = {
|
||||||
email = "peter@taricorp.net";
|
email = "peter@taricorp.net";
|
||||||
github = "tari";
|
github = "tari";
|
||||||
@ -4618,6 +4648,11 @@
|
|||||||
github = "wucke13";
|
github = "wucke13";
|
||||||
name = "Wucke";
|
name = "Wucke";
|
||||||
};
|
};
|
||||||
|
wykurz = {
|
||||||
|
email = "wykurz@gmail.com";
|
||||||
|
github = "wykurz";
|
||||||
|
name = "Mateusz Wykurz";
|
||||||
|
};
|
||||||
wyvie = {
|
wyvie = {
|
||||||
email = "elijahrum@gmail.com";
|
email = "elijahrum@gmail.com";
|
||||||
github = "wyvie";
|
github = "wyvie";
|
||||||
|
@ -15,7 +15,7 @@ containers.database =
|
|||||||
{ config =
|
{ config =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{ <xref linkend="opt-services.postgresql.enable"/> = true;
|
{ <xref linkend="opt-services.postgresql.enable"/> = true;
|
||||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql96;
|
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_6;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
</programlisting>
|
</programlisting>
|
||||||
|
@ -197,10 +197,10 @@ swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
|||||||
pkgs.emacs
|
pkgs.emacs
|
||||||
];
|
];
|
||||||
|
|
||||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql90;
|
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_10;
|
||||||
</programlisting>
|
</programlisting>
|
||||||
The latter option definition changes the default PostgreSQL package used
|
The latter option definition changes the default PostgreSQL package used
|
||||||
by NixOS’s PostgreSQL service to 9.0. For more information on packages,
|
by NixOS’s PostgreSQL service to 10.x. For more information on packages,
|
||||||
including how to add new ones, see <xref linkend="sec-custom-packages"/>.
|
including how to add new ones, see <xref linkend="sec-custom-packages"/>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
@ -34,13 +34,4 @@
|
|||||||
Similarly, UDP port ranges can be opened through
|
Similarly, UDP port ranges can be opened through
|
||||||
<xref linkend="opt-networking.firewall.allowedUDPPortRanges"/>.
|
<xref linkend="opt-networking.firewall.allowedUDPPortRanges"/>.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
|
||||||
Also of interest is
|
|
||||||
<programlisting>
|
|
||||||
<xref linkend="opt-networking.firewall.allowPing"/> = true;
|
|
||||||
</programlisting>
|
|
||||||
to allow the machine to respond to ping requests. (ICMPv6 pings are always
|
|
||||||
allowed.)
|
|
||||||
</para>
|
|
||||||
</section>
|
</section>
|
||||||
|
@ -637,6 +637,11 @@ $ nix-instantiate -E '(import <nixpkgsunstable> {}).gitFull'
|
|||||||
anyways for clarity.
|
anyways for clarity.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Groups <literal>kvm</literal> and <literal>render</literal> are introduced now, as systemd requires them.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
@ -97,6 +97,16 @@
|
|||||||
start org.nixos.nix-daemon</command>.
|
start org.nixos.nix-daemon</command>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The Syncthing state and configuration data has been moved from
|
||||||
|
<varname>services.syncthing.dataDir</varname> to the newly defined
|
||||||
|
<varname>services.syncthing.configDir</varname>, which default to
|
||||||
|
<literal>/var/lib/syncthing/.config/syncthing</literal>.
|
||||||
|
This change makes possible to share synced directories using ACLs
|
||||||
|
without Syncthing resetting the permission on every start.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</listitem>
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
@ -145,6 +155,56 @@
|
|||||||
lib.mkForce [];</literal>.
|
lib.mkForce [];</literal>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
OpenSMTPD has been upgraded to version 6.4.0p1. This release makes
|
||||||
|
backwards-incompatible changes to the configuration file format. See
|
||||||
|
<command>man smtpd.conf</command> for more information on the new file
|
||||||
|
format.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The versioned <varname>postgresql</varname> have been renamed to use
|
||||||
|
underscore number seperators. For example, <varname>postgresql96</varname>
|
||||||
|
has been renamed to <varname>postgresql_9_6</varname>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Package <literal>consul-ui</literal> and passthrough <literal>consul.ui</literal> have been removed.
|
||||||
|
The package <literal>consul</literal> now uses upstream releases that vendor the UI into the binary.
|
||||||
|
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
|
||||||
|
for details.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Slurm introduces the new option
|
||||||
|
<literal>services.slurm.stateSaveLocation</literal>,
|
||||||
|
which is now set to <literal>/var/spool/slurm</literal> by default
|
||||||
|
(instead of <literal>/var/spool</literal>).
|
||||||
|
Make sure to move all files to the new directory or to set the option accordingly.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
The slurmctld now runs as user <literal>slurm</literal> instead of <literal>root</literal>.
|
||||||
|
If you want to keep slurmctld running as <literal>root</literal>, set
|
||||||
|
<literal>services.slurm.user = root</literal>.
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
The options <literal>services.slurm.nodeName</literal> and
|
||||||
|
<literal>services.slurm.partitionName</literal> are now sets of
|
||||||
|
strings to correctly reflect that fact that each of these
|
||||||
|
options can occour more than once in the configuration.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0 and has undergone
|
||||||
|
some major changes. The <literal>services.solr</literal> module has been updated to reflect
|
||||||
|
these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
@ -163,6 +223,15 @@
|
|||||||
Matomo version.
|
Matomo version.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The deprecated <literal>truecrypt</literal> package has been removed
|
||||||
|
and <literal>truecrypt</literal> attribute is now an alias for
|
||||||
|
<literal>veracrypt</literal>. VeraCrypt is backward-compatible with
|
||||||
|
TrueCrypt volumes. Note that <literal>cryptsetup</literal> also
|
||||||
|
supports loading TrueCrypt volumes.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</section>
|
</section>
|
||||||
</section>
|
</section>
|
||||||
|
@ -53,7 +53,8 @@ in rec {
|
|||||||
inherit prefix check;
|
inherit prefix check;
|
||||||
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
|
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
|
||||||
args = extraArgs;
|
args = extraArgs;
|
||||||
specialArgs = { modulesPath = ../modules; } // specialArgs;
|
specialArgs =
|
||||||
|
{ modulesPath = builtins.toString ../modules; } // specialArgs;
|
||||||
}) config options;
|
}) config options;
|
||||||
|
|
||||||
# These are the extra arguments passed to every module. In
|
# These are the extra arguments passed to every module. In
|
||||||
|
@ -250,7 +250,8 @@ sub connect {
|
|||||||
$self->start;
|
$self->start;
|
||||||
|
|
||||||
local $SIG{ALRM} = sub { die "timed out waiting for the VM to connect\n"; };
|
local $SIG{ALRM} = sub { die "timed out waiting for the VM to connect\n"; };
|
||||||
alarm 300;
|
# 50 minutes -- increased as a test, see #49441
|
||||||
|
alarm 3000;
|
||||||
readline $self->{socket} or die "the VM quit before connecting\n";
|
readline $self->{socket} or die "the VM quit before connecting\n";
|
||||||
alarm 0;
|
alarm 0;
|
||||||
|
|
||||||
|
@ -16,6 +16,13 @@ let
|
|||||||
resolvconfOptions = cfg.resolvconfOptions
|
resolvconfOptions = cfg.resolvconfOptions
|
||||||
++ optional cfg.dnsSingleRequest "single-request"
|
++ optional cfg.dnsSingleRequest "single-request"
|
||||||
++ optional cfg.dnsExtensionMechanism "edns0";
|
++ optional cfg.dnsExtensionMechanism "edns0";
|
||||||
|
|
||||||
|
|
||||||
|
localhostMapped4 = cfg.hosts ? "127.0.0.1" && elem "localhost" cfg.hosts."127.0.0.1";
|
||||||
|
localhostMapped6 = cfg.hosts ? "::1" && elem "localhost" cfg.hosts."::1";
|
||||||
|
|
||||||
|
localhostMultiple = any (elem "localhost") (attrValues (removeAttrs cfg.hosts [ "127.0.0.1" "::1" ]));
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -24,7 +31,6 @@ in
|
|||||||
|
|
||||||
networking.hosts = lib.mkOption {
|
networking.hosts = lib.mkOption {
|
||||||
type = types.attrsOf (types.listOf types.str);
|
type = types.attrsOf (types.listOf types.str);
|
||||||
default = {};
|
|
||||||
example = literalExample ''
|
example = literalExample ''
|
||||||
{
|
{
|
||||||
"127.0.0.1" = [ "foo.bar.baz" ];
|
"127.0.0.1" = [ "foo.bar.baz" ];
|
||||||
@ -192,6 +198,29 @@ in
|
|||||||
|
|
||||||
config = {
|
config = {
|
||||||
|
|
||||||
|
assertions = [{
|
||||||
|
assertion = localhostMapped4;
|
||||||
|
message = ''`networking.hosts` doesn't map "127.0.0.1" to "localhost"'';
|
||||||
|
} {
|
||||||
|
assertion = !cfg.enableIPv6 || localhostMapped6;
|
||||||
|
message = ''`networking.hosts` doesn't map "::1" to "localhost"'';
|
||||||
|
} {
|
||||||
|
assertion = !localhostMultiple;
|
||||||
|
message = ''
|
||||||
|
`networking.hosts` maps "localhost" to something other than "127.0.0.1"
|
||||||
|
or "::1". This will break some applications. Please use
|
||||||
|
`networking.extraHosts` if you really want to add such a mapping.
|
||||||
|
'';
|
||||||
|
}];
|
||||||
|
|
||||||
|
networking.hosts = {
|
||||||
|
"127.0.0.1" = [ "localhost" ];
|
||||||
|
} // optionalAttrs (cfg.hostName != "") {
|
||||||
|
"127.0.1.1" = [ cfg.hostName ];
|
||||||
|
} // optionalAttrs cfg.enableIPv6 {
|
||||||
|
"::1" = [ "localhost" ];
|
||||||
|
};
|
||||||
|
|
||||||
environment.etc =
|
environment.etc =
|
||||||
{ # /etc/services: TCP/UDP port assignments.
|
{ # /etc/services: TCP/UDP port assignments.
|
||||||
"services".source = pkgs.iana-etc + "/etc/services";
|
"services".source = pkgs.iana-etc + "/etc/services";
|
||||||
@ -199,27 +228,12 @@ in
|
|||||||
# /etc/protocols: IP protocol numbers.
|
# /etc/protocols: IP protocol numbers.
|
||||||
"protocols".source = pkgs.iana-etc + "/etc/protocols";
|
"protocols".source = pkgs.iana-etc + "/etc/protocols";
|
||||||
|
|
||||||
# /etc/rpc: RPC program numbers.
|
|
||||||
"rpc".source = pkgs.glibc.out + "/etc/rpc";
|
|
||||||
|
|
||||||
# /etc/hosts: Hostname-to-IP mappings.
|
# /etc/hosts: Hostname-to-IP mappings.
|
||||||
"hosts".text =
|
"hosts".text = let
|
||||||
let oneToString = set : ip : ip + " " + concatStringsSep " " ( getAttr ip set );
|
oneToString = set: ip: ip + " " + concatStringsSep " " set.${ip};
|
||||||
allToString = set: concatMapStringsSep "\n" (oneToString set) (attrNames set);
|
allToString = set: concatMapStringsSep "\n" (oneToString set) (attrNames set);
|
||||||
userLocalHosts = optionalString
|
in ''
|
||||||
( builtins.hasAttr "127.0.0.1" cfg.hosts )
|
${allToString cfg.hosts}
|
||||||
( concatStringsSep " " ( remove "localhost" cfg.hosts."127.0.0.1" ));
|
|
||||||
userLocalHosts6 = optionalString
|
|
||||||
( builtins.hasAttr "::1" cfg.hosts )
|
|
||||||
( concatStringsSep " " ( remove "localhost" cfg.hosts."::1" ));
|
|
||||||
otherHosts = allToString ( removeAttrs cfg.hosts [ "127.0.0.1" "::1" ]);
|
|
||||||
in
|
|
||||||
''
|
|
||||||
127.0.0.1 ${userLocalHosts} localhost
|
|
||||||
${optionalString cfg.enableIPv6 ''
|
|
||||||
::1 ${userLocalHosts6} localhost
|
|
||||||
''}
|
|
||||||
${otherHosts}
|
|
||||||
${cfg.extraHosts}
|
${cfg.extraHosts}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
@ -251,6 +265,9 @@ in
|
|||||||
"resolv.conf".source = "${pkgs.systemd}/lib/systemd/resolv.conf";
|
"resolv.conf".source = "${pkgs.systemd}/lib/systemd/resolv.conf";
|
||||||
} // optionalAttrs (config.services.resolved.enable && dnsmasqResolve) {
|
} // optionalAttrs (config.services.resolved.enable && dnsmasqResolve) {
|
||||||
"dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
|
"dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
|
||||||
|
} // optionalAttrs (pkgs.stdenv.hostPlatform.libc == "glibc") {
|
||||||
|
# /etc/rpc: RPC program numbers.
|
||||||
|
"rpc".source = pkgs.glibc.out + "/etc/rpc";
|
||||||
};
|
};
|
||||||
|
|
||||||
networking.proxy.envVars =
|
networking.proxy.envVars =
|
||||||
|
@ -19,7 +19,9 @@ let
|
|||||||
pkgs.diffutils
|
pkgs.diffutils
|
||||||
pkgs.findutils
|
pkgs.findutils
|
||||||
pkgs.gawk
|
pkgs.gawk
|
||||||
pkgs.glibc # for ldd, getent
|
pkgs.stdenv.cc.libc
|
||||||
|
pkgs.getent
|
||||||
|
pkgs.getconf
|
||||||
pkgs.gnugrep
|
pkgs.gnugrep
|
||||||
pkgs.gnupatch
|
pkgs.gnupatch
|
||||||
pkgs.gnused
|
pkgs.gnused
|
||||||
|
@ -7,4 +7,6 @@
|
|||||||
imports =
|
imports =
|
||||||
[ ./installation-cd-base.nix
|
[ ./installation-cd-base.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
|
fonts.fontconfig.enable = false;
|
||||||
}
|
}
|
||||||
|
@ -22,4 +22,42 @@ with lib;
|
|||||||
|
|
||||||
powerManagement.enable = false;
|
powerManagement.enable = false;
|
||||||
system.stateVersion = mkDefault "18.03";
|
system.stateVersion = mkDefault "18.03";
|
||||||
|
|
||||||
|
installer.cloneConfigExtra = ''
|
||||||
|
# Let demo build as a trusted user.
|
||||||
|
# nix.trustedUsers = [ "demo" ];
|
||||||
|
|
||||||
|
# Mount a VirtualBox shared folder.
|
||||||
|
# This is configurable in the VirtualBox menu at
|
||||||
|
# Machine / Settings / Shared Folders.
|
||||||
|
# fileSystems."/mnt" = {
|
||||||
|
# fsType = "vboxsf";
|
||||||
|
# device = "nameofdevicetomount";
|
||||||
|
# options = [ "rw" ];
|
||||||
|
# };
|
||||||
|
|
||||||
|
# By default, the NixOS VirtualBox demo image includes SDDM and Plasma.
|
||||||
|
# If you prefer another desktop manager or display manager, you may want
|
||||||
|
# to disable the default.
|
||||||
|
# services.xserver.desktopManager.plasma5.enable = lib.mkForce false;
|
||||||
|
# services.xserver.displayManager.sddm.enable = lib.mkForce false;
|
||||||
|
|
||||||
|
# Enable GDM/GNOME by uncommenting above two lines and two lines below.
|
||||||
|
# services.xserver.displayManager.gdm.enable = true;
|
||||||
|
# services.xserver.desktopManager.gnome3.enable = true;
|
||||||
|
|
||||||
|
# Set your time zone.
|
||||||
|
# time.timeZone = "Europe/Amsterdam";
|
||||||
|
|
||||||
|
# List packages installed in system profile. To search, run:
|
||||||
|
# \$ nix search wget
|
||||||
|
# environment.systemPackages = with pkgs; [
|
||||||
|
# wget vim
|
||||||
|
# ];
|
||||||
|
|
||||||
|
# Enable the OpenSSH daemon.
|
||||||
|
# services.openssh.enable = true;
|
||||||
|
|
||||||
|
system.stateVersion = mkDefault "18.03";
|
||||||
|
'';
|
||||||
}
|
}
|
||||||
|
@ -331,6 +331,9 @@
|
|||||||
zeronet = 304;
|
zeronet = 304;
|
||||||
lirc = 305;
|
lirc = 305;
|
||||||
lidarr = 306;
|
lidarr = 306;
|
||||||
|
slurm = 307;
|
||||||
|
kapacitor = 308;
|
||||||
|
solr = 309;
|
||||||
|
|
||||||
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
|
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
|
||||||
|
|
||||||
@ -622,6 +625,9 @@
|
|||||||
zeronet = 304;
|
zeronet = 304;
|
||||||
lirc = 305;
|
lirc = 305;
|
||||||
lidarr = 306;
|
lidarr = 306;
|
||||||
|
slurm = 307;
|
||||||
|
kapacitor = 308;
|
||||||
|
solr = 309;
|
||||||
|
|
||||||
# When adding a gid, make sure it doesn't match an existing
|
# When adding a gid, make sure it doesn't match an existing
|
||||||
# uid. Users and groups with the same name should have equal
|
# uid. Users and groups with the same name should have equal
|
||||||
|
@ -126,6 +126,7 @@
|
|||||||
./programs/udevil.nix
|
./programs/udevil.nix
|
||||||
./programs/venus.nix
|
./programs/venus.nix
|
||||||
./programs/vim.nix
|
./programs/vim.nix
|
||||||
|
./programs/wavemon.nix
|
||||||
./programs/way-cooler.nix
|
./programs/way-cooler.nix
|
||||||
./programs/wireshark.nix
|
./programs/wireshark.nix
|
||||||
./programs/xfs_quota.nix
|
./programs/xfs_quota.nix
|
||||||
@ -432,6 +433,7 @@
|
|||||||
./services/monitoring/hdaps.nix
|
./services/monitoring/hdaps.nix
|
||||||
./services/monitoring/heapster.nix
|
./services/monitoring/heapster.nix
|
||||||
./services/monitoring/incron.nix
|
./services/monitoring/incron.nix
|
||||||
|
./services/monitoring/kapacitor.nix
|
||||||
./services/monitoring/longview.nix
|
./services/monitoring/longview.nix
|
||||||
./services/monitoring/monit.nix
|
./services/monitoring/monit.nix
|
||||||
./services/monitoring/munin.nix
|
./services/monitoring/munin.nix
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
# Include some utilities that are useful for installing or repairing
|
# Include some utilities that are useful for installing or repairing
|
||||||
# the system.
|
# the system.
|
||||||
environment.systemPackages = [
|
environment.systemPackages = [
|
||||||
pkgs.w3m-nox # needed for the manual anyway
|
pkgs.w3m-nographics # needed for the manual anyway
|
||||||
pkgs.testdisk # useful for repairing boot problems
|
pkgs.testdisk # useful for repairing boot problems
|
||||||
pkgs.ms-sys # for writing Microsoft boot sectors / MBRs
|
pkgs.ms-sys # for writing Microsoft boot sectors / MBRs
|
||||||
pkgs.efibootmgr
|
pkgs.efibootmgr
|
||||||
@ -19,6 +19,9 @@
|
|||||||
pkgs.cryptsetup # needed for dm-crypt volumes
|
pkgs.cryptsetup # needed for dm-crypt volumes
|
||||||
pkgs.mkpasswd # for generating password files
|
pkgs.mkpasswd # for generating password files
|
||||||
|
|
||||||
|
# Some text editors.
|
||||||
|
pkgs.vim
|
||||||
|
|
||||||
# Some networking tools.
|
# Some networking tools.
|
||||||
pkgs.fuse
|
pkgs.fuse
|
||||||
pkgs.fuse3
|
pkgs.fuse3
|
||||||
|
@ -48,6 +48,8 @@ let
|
|||||||
|
|
||||||
{
|
{
|
||||||
imports = [ ${toString config.installer.cloneConfigIncludes} ];
|
imports = [ ${toString config.installer.cloneConfigIncludes} ];
|
||||||
|
|
||||||
|
${config.installer.cloneConfigExtra}
|
||||||
}
|
}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
@ -73,6 +75,13 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
installer.cloneConfigExtra = mkOption {
|
||||||
|
default = "";
|
||||||
|
description = ''
|
||||||
|
Extra text to include in the cloned configuration.nix included in this
|
||||||
|
installer.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
|
@ -63,7 +63,7 @@ with lib;
|
|||||||
# Tell the Nix evaluator to garbage collect more aggressively.
|
# Tell the Nix evaluator to garbage collect more aggressively.
|
||||||
# This is desirable in memory-constrained environments that don't
|
# This is desirable in memory-constrained environments that don't
|
||||||
# (yet) have swap set up.
|
# (yet) have swap set up.
|
||||||
environment.variables.GC_INITIAL_HEAP_SIZE = "100000";
|
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
|
||||||
|
|
||||||
# Make the installer more likely to succeed in low memory
|
# Make the installer more likely to succeed in low memory
|
||||||
# environments. The kernel's overcommit heustistics bite us
|
# environments. The kernel's overcommit heustistics bite us
|
||||||
@ -87,9 +87,6 @@ with lib;
|
|||||||
# console less cumbersome if the machine has a public IP.
|
# console less cumbersome if the machine has a public IP.
|
||||||
networking.firewall.logRefusedConnections = mkDefault false;
|
networking.firewall.logRefusedConnections = mkDefault false;
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.vim ];
|
|
||||||
|
|
||||||
|
|
||||||
# Allow the user to log in as root without a password.
|
# Allow the user to log in as root without a password.
|
||||||
users.users.root.initialHashedPassword = "";
|
users.users.root.initialHashedPassword = "";
|
||||||
};
|
};
|
||||||
|
@ -16,7 +16,7 @@ let
|
|||||||
# programmable completion. If we do, enable all modules installed in
|
# programmable completion. If we do, enable all modules installed in
|
||||||
# the system and user profile in obsolete /etc/bash_completion.d/
|
# the system and user profile in obsolete /etc/bash_completion.d/
|
||||||
# directories. Bash loads completions in all
|
# directories. Bash loads completions in all
|
||||||
# $XDG_DATA_DIRS/share/bash-completion/completions/
|
# $XDG_DATA_DIRS/bash-completion/completions/
|
||||||
# on demand, so they do not need to be sourced here.
|
# on demand, so they do not need to be sourced here.
|
||||||
if shopt -q progcomp &>/dev/null; then
|
if shopt -q progcomp &>/dev/null; then
|
||||||
. "${pkgs.bash-completion}/etc/profile.d/bash_completion.sh"
|
. "${pkgs.bash-completion}/etc/profile.d/bash_completion.sh"
|
||||||
|
@ -13,7 +13,7 @@ with lib;
|
|||||||
# Set up the per-user profile.
|
# Set up the per-user profile.
|
||||||
mkdir -m 0755 -p "$NIX_USER_PROFILE_DIR"
|
mkdir -m 0755 -p "$NIX_USER_PROFILE_DIR"
|
||||||
if [ "$(stat --printf '%u' "$NIX_USER_PROFILE_DIR")" != "$(id -u)" ]; then
|
if [ "$(stat --printf '%u' "$NIX_USER_PROFILE_DIR")" != "$(id -u)" ]; then
|
||||||
echo "WARNING: bad ownership on $NIX_USER_PROFILE_DIR, should be $(id -u)" >&2
|
echo "WARNING: the per-user profile dir $NIX_USER_PROFILE_DIR should belong to user id $(id -u)" >&2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -w "$HOME" ]; then
|
if [ -w "$HOME" ]; then
|
||||||
@ -35,7 +35,7 @@ with lib;
|
|||||||
NIX_USER_GCROOTS_DIR="/nix/var/nix/gcroots/per-user/$USER"
|
NIX_USER_GCROOTS_DIR="/nix/var/nix/gcroots/per-user/$USER"
|
||||||
mkdir -m 0755 -p "$NIX_USER_GCROOTS_DIR"
|
mkdir -m 0755 -p "$NIX_USER_GCROOTS_DIR"
|
||||||
if [ "$(stat --printf '%u' "$NIX_USER_GCROOTS_DIR")" != "$(id -u)" ]; then
|
if [ "$(stat --printf '%u' "$NIX_USER_GCROOTS_DIR")" != "$(id -u)" ]; then
|
||||||
echo "WARNING: bad ownership on $NIX_USER_GCROOTS_DIR, should be $(id -u)" >&2
|
echo "WARNING: the per-user gcroots dir $NIX_USER_GCROOTS_DIR should belong to user id $(id -u)" >&2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set up a default Nix expression from which to install stuff.
|
# Set up a default Nix expression from which to install stuff.
|
||||||
|
@ -5,6 +5,15 @@ with lib;
|
|||||||
let
|
let
|
||||||
cfg = config.programs.sway-beta;
|
cfg = config.programs.sway-beta;
|
||||||
swayPackage = cfg.package;
|
swayPackage = cfg.package;
|
||||||
|
|
||||||
|
swayWrapped = pkgs.writeShellScriptBin "sway" ''
|
||||||
|
${cfg.extraSessionCommands}
|
||||||
|
exec ${pkgs.dbus.dbus-launch} --exit-with-session ${swayPackage}/bin/sway
|
||||||
|
'';
|
||||||
|
swayJoined = pkgs.symlinkJoin {
|
||||||
|
name = "sway-joined";
|
||||||
|
paths = [ swayWrapped swayPackage ];
|
||||||
|
};
|
||||||
in {
|
in {
|
||||||
options.programs.sway-beta = {
|
options.programs.sway-beta = {
|
||||||
enable = mkEnableOption ''
|
enable = mkEnableOption ''
|
||||||
@ -20,13 +29,30 @@ in {
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extraSessionCommands = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
default = "";
|
||||||
|
example = ''
|
||||||
|
export SDL_VIDEODRIVER=wayland
|
||||||
|
# needs qt5.qtwayland in systemPackages
|
||||||
|
export QT_QPA_PLATFORM=wayland
|
||||||
|
export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
|
||||||
|
# Fix for some Java AWT applications (e.g. Android Studio),
|
||||||
|
# use this if they aren't displayed properly:
|
||||||
|
export _JAVA_AWT_WM_NONREPARENTING=1
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Shell commands executed just before Sway is started.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
extraPackages = mkOption {
|
extraPackages = mkOption {
|
||||||
type = with types; listOf package;
|
type = with types; listOf package;
|
||||||
default = with pkgs; [
|
default = with pkgs; [
|
||||||
xwayland dmenu
|
xwayland rxvt_unicode dmenu
|
||||||
];
|
];
|
||||||
defaultText = literalExample ''
|
defaultText = literalExample ''
|
||||||
with pkgs; [ xwayland dmenu ];
|
with pkgs; [ xwayland rxvt_unicode dmenu ];
|
||||||
'';
|
'';
|
||||||
example = literalExample ''
|
example = literalExample ''
|
||||||
with pkgs; [
|
with pkgs; [
|
||||||
@ -42,7 +68,7 @@ in {
|
|||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
environment.systemPackages = [ swayPackage ] ++ cfg.extraPackages;
|
environment.systemPackages = [ swayJoined ] ++ cfg.extraPackages;
|
||||||
security.pam.services.swaylock = {};
|
security.pam.services.swaylock = {};
|
||||||
hardware.opengl.enable = mkDefault true;
|
hardware.opengl.enable = mkDefault true;
|
||||||
fonts.enableDefaultFonts = mkDefault true;
|
fonts.enableDefaultFonts = mkDefault true;
|
||||||
@ -51,4 +77,3 @@ in {
|
|||||||
|
|
||||||
meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
|
meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
nixos/modules/programs/wavemon.nix
Normal file
28
nixos/modules/programs/wavemon.nix
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.programs.wavemon;
|
||||||
|
in {
|
||||||
|
options = {
|
||||||
|
programs.wavemon = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Whether to add wavemon to the global environment and configure a
|
||||||
|
setcap wrapper for it.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
environment.systemPackages = with pkgs; [ wavemon ];
|
||||||
|
security.wrappers.wavemon = {
|
||||||
|
source = "${pkgs.wavemon}/bin/wavemon";
|
||||||
|
capabilities = "cap_net_admin+ep";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -28,7 +28,10 @@ with lib;
|
|||||||
(config:
|
(config:
|
||||||
let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
|
let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
|
||||||
in if enabled then [ pkgs.gutenprint ] else [ ]))
|
in if enabled then [ pkgs.gutenprint ] else [ ]))
|
||||||
(mkRenamedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ])
|
(mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ]
|
||||||
|
(config:
|
||||||
|
let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
|
||||||
|
in if value != "" then [ value ] else []))
|
||||||
(mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
|
(mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
|
||||||
(mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
|
(mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
|
||||||
(mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
|
(mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
|
||||||
|
@ -28,7 +28,7 @@ with lib;
|
|||||||
capability setuid,
|
capability setuid,
|
||||||
network inet raw,
|
network inet raw,
|
||||||
|
|
||||||
${pkgs.glibc.out}/lib/*.so mr,
|
${pkgs.stdenv.cc.libc.out}/lib/*.so mr,
|
||||||
${pkgs.libcap.lib}/lib/libcap.so* mr,
|
${pkgs.libcap.lib}/lib/libcap.so* mr,
|
||||||
${pkgs.attr.out}/lib/libattr.so* mr,
|
${pkgs.attr.out}/lib/libattr.so* mr,
|
||||||
|
|
||||||
|
@ -170,4 +170,6 @@ in {
|
|||||||
'';
|
'';
|
||||||
}) cfg.params;
|
}) cfg.params;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ with lib;
|
|||||||
KERNEL=="random", TAG+="systemd"
|
KERNEL=="random", TAG+="systemd"
|
||||||
SUBSYSTEM=="cpu", ENV{MODALIAS}=="cpu:type:x86,*feature:*009E*", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
|
SUBSYSTEM=="cpu", ENV{MODALIAS}=="cpu:type:x86,*feature:*009E*", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
|
||||||
KERNEL=="hw_random", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
|
KERNEL=="hw_random", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"
|
||||||
${if config.services.tcsd.enable then "" else ''KERNEL=="tpm0", TAG+="systemd", ENV{SYSTEMD_WANTS}+="rngd.service"''}
|
|
||||||
'';
|
'';
|
||||||
|
|
||||||
systemd.services.rngd = {
|
systemd.services.rngd = {
|
||||||
@ -30,8 +29,7 @@ with lib;
|
|||||||
|
|
||||||
description = "Hardware RNG Entropy Gatherer Daemon";
|
description = "Hardware RNG Entropy Gatherer Daemon";
|
||||||
|
|
||||||
serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f -v" +
|
serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f -v";
|
||||||
(if config.services.tcsd.enable then " --no-tpm=1" else "");
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -53,6 +53,9 @@ in
|
|||||||
Type = "notify";
|
Type = "notify";
|
||||||
NotifyAccess = "all";
|
NotifyAccess = "all";
|
||||||
};
|
};
|
||||||
|
restartTriggers = [
|
||||||
|
config.environment.etc."salt/master".source
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,7 +15,6 @@ let
|
|||||||
# Default is in /etc/salt/pki/minion
|
# Default is in /etc/salt/pki/minion
|
||||||
pki_dir = "/var/lib/salt/pki/minion";
|
pki_dir = "/var/lib/salt/pki/minion";
|
||||||
} cfg.configuration;
|
} cfg.configuration;
|
||||||
configDir = pkgs.writeTextDir "minion" (builtins.toJSON fullConfig);
|
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
@ -36,7 +35,16 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
environment.systemPackages = with pkgs; [ salt ];
|
environment = {
|
||||||
|
# Set this up in /etc/salt/minion so `salt-call`, etc. work.
|
||||||
|
# The alternatives are
|
||||||
|
# - passing --config-dir to all salt commands, not just the minion unit,
|
||||||
|
# - setting aglobal environment variable.
|
||||||
|
etc."salt/minion".source = pkgs.writeText "minion" (
|
||||||
|
builtins.toJSON fullConfig
|
||||||
|
);
|
||||||
|
systemPackages = with pkgs; [ salt ];
|
||||||
|
};
|
||||||
systemd.services.salt-minion = {
|
systemd.services.salt-minion = {
|
||||||
description = "Salt Minion";
|
description = "Salt Minion";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
@ -45,11 +53,14 @@ in
|
|||||||
utillinux
|
utillinux
|
||||||
];
|
];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${pkgs.salt}/bin/salt-minion --config-dir=${configDir}";
|
ExecStart = "${pkgs.salt}/bin/salt-minion";
|
||||||
LimitNOFILE = 8192;
|
LimitNOFILE = 8192;
|
||||||
Type = "notify";
|
Type = "notify";
|
||||||
NotifyAccess = "all";
|
NotifyAccess = "all";
|
||||||
};
|
};
|
||||||
|
restartTriggers = [
|
||||||
|
config.environment.etc."salt/minion".source
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -6,13 +6,18 @@ let
|
|||||||
|
|
||||||
cfg = config.services.slurm;
|
cfg = config.services.slurm;
|
||||||
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
|
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
|
||||||
|
|
||||||
|
defaultUser = "slurm";
|
||||||
|
|
||||||
configFile = pkgs.writeTextDir "slurm.conf"
|
configFile = pkgs.writeTextDir "slurm.conf"
|
||||||
''
|
''
|
||||||
ClusterName=${cfg.clusterName}
|
ClusterName=${cfg.clusterName}
|
||||||
|
StateSaveLocation=${cfg.stateSaveLocation}
|
||||||
|
SlurmUser=${cfg.user}
|
||||||
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
|
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
|
||||||
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
|
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
|
||||||
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
|
${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
|
||||||
${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
|
${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
|
||||||
PlugStackConfig=${plugStackConfig}
|
PlugStackConfig=${plugStackConfig}
|
||||||
ProctrackType=${cfg.procTrackType}
|
ProctrackType=${cfg.procTrackType}
|
||||||
${cfg.extraConfig}
|
${cfg.extraConfig}
|
||||||
@ -24,12 +29,19 @@ let
|
|||||||
${cfg.extraPlugstackConfig}
|
${cfg.extraPlugstackConfig}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
|
||||||
cgroupConfig = pkgs.writeTextDir "cgroup.conf"
|
cgroupConfig = pkgs.writeTextDir "cgroup.conf"
|
||||||
''
|
''
|
||||||
${cfg.extraCgroupConfig}
|
${cfg.extraCgroupConfig}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
slurmdbdConf = pkgs.writeTextDir "slurmdbd.conf"
|
||||||
|
''
|
||||||
|
DbdHost=${cfg.dbdserver.dbdHost}
|
||||||
|
SlurmUser=${cfg.user}
|
||||||
|
StorageType=accounting_storage/mysql
|
||||||
|
${cfg.dbdserver.extraConfig}
|
||||||
|
'';
|
||||||
|
|
||||||
# slurm expects some additional config files to be
|
# slurm expects some additional config files to be
|
||||||
# in the same directory as slurm.conf
|
# in the same directory as slurm.conf
|
||||||
etcSlurm = pkgs.symlinkJoin {
|
etcSlurm = pkgs.symlinkJoin {
|
||||||
@ -43,6 +55,8 @@ in
|
|||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
|
|
||||||
|
meta.maintainers = [ maintainers.markuskowa ];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
|
||||||
services.slurm = {
|
services.slurm = {
|
||||||
@ -60,6 +74,27 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
dbdserver = {
|
||||||
|
enable = mkEnableOption "SlurmDBD service";
|
||||||
|
|
||||||
|
dbdHost = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = config.networking.hostName;
|
||||||
|
description = ''
|
||||||
|
Hostname of the machine where <literal>slurmdbd</literal>
|
||||||
|
is running (i.e. name returned by <literal>hostname -s</literal>).
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfig = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
default = "";
|
||||||
|
description = ''
|
||||||
|
Extra configuration for <literal>slurmdbd.conf</literal>
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
client = {
|
client = {
|
||||||
enable = mkEnableOption "slurm client daemon";
|
enable = mkEnableOption "slurm client daemon";
|
||||||
};
|
};
|
||||||
@ -116,9 +151,9 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
nodeName = mkOption {
|
nodeName = mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.listOf types.str;
|
||||||
default = null;
|
default = [];
|
||||||
example = "linux[1-32] CPUs=1 State=UNKNOWN";
|
example = literalExample ''[ "linux[1-32] CPUs=1 State=UNKNOWN" ];'';
|
||||||
description = ''
|
description = ''
|
||||||
Name that SLURM uses to refer to a node (or base partition for BlueGene
|
Name that SLURM uses to refer to a node (or base partition for BlueGene
|
||||||
systems). Typically this would be the string that "/bin/hostname -s"
|
systems). Typically this would be the string that "/bin/hostname -s"
|
||||||
@ -127,9 +162,9 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
partitionName = mkOption {
|
partitionName = mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.listOf types.str;
|
||||||
default = null;
|
default = [];
|
||||||
example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
|
example = literalExample ''[ "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP" ];'';
|
||||||
description = ''
|
description = ''
|
||||||
Name by which the partition may be referenced. Note that now you have
|
Name by which the partition may be referenced. Note that now you have
|
||||||
to write the partition's parameters after the name.
|
to write the partition's parameters after the name.
|
||||||
@ -150,7 +185,7 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
procTrackType = mkOption {
|
procTrackType = mkOption {
|
||||||
type = types.string;
|
type = types.str;
|
||||||
default = "proctrack/linuxproc";
|
default = "proctrack/linuxproc";
|
||||||
description = ''
|
description = ''
|
||||||
Plugin to be used for process tracking on a job step basis.
|
Plugin to be used for process tracking on a job step basis.
|
||||||
@ -159,6 +194,25 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
stateSaveLocation = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "/var/spool/slurmctld";
|
||||||
|
description = ''
|
||||||
|
Directory into which the Slurm controller, slurmctld, saves its state.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = defaultUser;
|
||||||
|
description = ''
|
||||||
|
Set this option when you want to run the slurmctld daemon
|
||||||
|
as something else than the default slurm user "slurm".
|
||||||
|
Note that the UID of this user needs to be the same
|
||||||
|
on all nodes.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
extraConfig = mkOption {
|
extraConfig = mkOption {
|
||||||
default = "";
|
default = "";
|
||||||
type = types.lines;
|
type = types.lines;
|
||||||
@ -184,6 +238,8 @@ in
|
|||||||
used when <literal>procTrackType=proctrack/cgroup</literal>.
|
used when <literal>procTrackType=proctrack/cgroup</literal>.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
@ -220,12 +276,24 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
|
in mkIf ( cfg.enableStools ||
|
||||||
|
cfg.client.enable ||
|
||||||
|
cfg.server.enable ||
|
||||||
|
cfg.dbdserver.enable ) {
|
||||||
|
|
||||||
environment.systemPackages = [ wrappedSlurm ];
|
environment.systemPackages = [ wrappedSlurm ];
|
||||||
|
|
||||||
services.munge.enable = mkDefault true;
|
services.munge.enable = mkDefault true;
|
||||||
|
|
||||||
|
# use a static uid as default to ensure it is the same on all nodes
|
||||||
|
users.users.slurm = mkIf (cfg.user == defaultUser) {
|
||||||
|
name = defaultUser;
|
||||||
|
group = "slurm";
|
||||||
|
uid = config.ids.uids.slurm;
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups.slurm.gid = config.ids.uids.slurm;
|
||||||
|
|
||||||
systemd.services.slurmd = mkIf (cfg.client.enable) {
|
systemd.services.slurmd = mkIf (cfg.client.enable) {
|
||||||
path = with pkgs; [ wrappedSlurm coreutils ]
|
path = with pkgs; [ wrappedSlurm coreutils ]
|
||||||
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
|
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
|
||||||
@ -261,6 +329,29 @@ in
|
|||||||
PIDFile = "/run/slurmctld.pid";
|
PIDFile = "/run/slurmctld.pid";
|
||||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
preStart = ''
|
||||||
|
mkdir -p ${cfg.stateSaveLocation}
|
||||||
|
chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.slurmdbd = mkIf (cfg.dbdserver.enable) {
|
||||||
|
path = with pkgs; [ wrappedSlurm munge coreutils ];
|
||||||
|
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network.target" "munged.service" "mysql.service" ];
|
||||||
|
requires = [ "munged.service" "mysql.service" ];
|
||||||
|
|
||||||
|
# slurm strips the last component off the path
|
||||||
|
environment.SLURM_CONF = "${slurmdbdConf}/slurm.conf";
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "forking";
|
||||||
|
ExecStart = "${cfg.package}/bin/slurmdbd";
|
||||||
|
PIDFile = "/run/slurmdbd.pid";
|
||||||
|
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -55,7 +55,7 @@ in
|
|||||||
|
|
||||||
package = mkOption {
|
package = mkOption {
|
||||||
type = types.package;
|
type = types.package;
|
||||||
example = literalExample "pkgs.postgresql96";
|
example = literalExample "pkgs.postgresql_9_6";
|
||||||
description = ''
|
description = ''
|
||||||
PostgreSQL package to use.
|
PostgreSQL package to use.
|
||||||
'';
|
'';
|
||||||
@ -118,7 +118,7 @@ in
|
|||||||
extraPlugins = mkOption {
|
extraPlugins = mkOption {
|
||||||
type = types.listOf types.path;
|
type = types.listOf types.path;
|
||||||
default = [];
|
default = [];
|
||||||
example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }) ]";
|
example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql_9_4; }) ]";
|
||||||
description = ''
|
description = ''
|
||||||
When this list contains elements a new store path is created.
|
When this list contains elements a new store path is created.
|
||||||
PostgreSQL and the elements are symlinked into it. Then pg_config,
|
PostgreSQL and the elements are symlinked into it. Then pg_config,
|
||||||
@ -167,9 +167,9 @@ in
|
|||||||
# Note: when changing the default, make it conditional on
|
# Note: when changing the default, make it conditional on
|
||||||
# ‘system.stateVersion’ to maintain compatibility with existing
|
# ‘system.stateVersion’ to maintain compatibility with existing
|
||||||
# systems!
|
# systems!
|
||||||
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql96
|
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6
|
||||||
else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql95
|
else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql_9_5
|
||||||
else pkgs.postgresql94);
|
else pkgs.postgresql_9_4);
|
||||||
|
|
||||||
services.postgresql.dataDir =
|
services.postgresql.dataDir =
|
||||||
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/postgresql/${config.services.postgresql.package.psqlSchema}"
|
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/postgresql/${config.services.postgresql.package.psqlSchema}"
|
||||||
@ -271,5 +271,5 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
meta.doc = ./postgresql.xml;
|
meta.doc = ./postgresql.xml;
|
||||||
|
meta.maintainers = with lib.maintainers; [ thoughtpolice ];
|
||||||
}
|
}
|
||||||
|
@ -27,12 +27,12 @@
|
|||||||
<filename>configuration.nix</filename>:
|
<filename>configuration.nix</filename>:
|
||||||
<programlisting>
|
<programlisting>
|
||||||
<xref linkend="opt-services.postgresql.enable"/> = true;
|
<xref linkend="opt-services.postgresql.enable"/> = true;
|
||||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql94;
|
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_4;
|
||||||
</programlisting>
|
</programlisting>
|
||||||
Note that you are required to specify the desired version of PostgreSQL
|
Note that you are required to specify the desired version of PostgreSQL
|
||||||
(e.g. <literal>pkgs.postgresql94</literal>). Since upgrading your PostgreSQL
|
(e.g. <literal>pkgs.postgresql_9_4</literal>). Since upgrading your
|
||||||
version requires a database dump and reload (see below), NixOS cannot
|
PostgreSQL version requires a database dump and reload (see below), NixOS
|
||||||
provide a default value for
|
cannot provide a default value for
|
||||||
<xref linkend="opt-services.postgresql.package"/> such as the most recent
|
<xref linkend="opt-services.postgresql.package"/> such as the most recent
|
||||||
release of PostgreSQL.
|
release of PostgreSQL.
|
||||||
</para>
|
</para>
|
||||||
|
@ -56,6 +56,32 @@ in
|
|||||||
{ Type = "dbus";
|
{ Type = "dbus";
|
||||||
BusName = "org.freedesktop.UPower";
|
BusName = "org.freedesktop.UPower";
|
||||||
ExecStart = "@${cfg.package}/libexec/upowerd upowerd";
|
ExecStart = "@${cfg.package}/libexec/upowerd upowerd";
|
||||||
|
Restart = "on-failure";
|
||||||
|
# Upstream lockdown:
|
||||||
|
# Filesystem lockdown
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
# Needed by keyboard backlight support
|
||||||
|
ProtectKernelTunables = false;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
ReadWritePaths = "/var/lib/upower";
|
||||||
|
ProtectHome = true;
|
||||||
|
PrivateTmp = true;
|
||||||
|
|
||||||
|
# Network
|
||||||
|
# PrivateNetwork=true would block udev's netlink socket
|
||||||
|
RestrictAddressFamilies = "AF_UNIX AF_NETLINK";
|
||||||
|
|
||||||
|
# Execute Mappings
|
||||||
|
MemoryDenyWriteExecute = true;
|
||||||
|
|
||||||
|
# Modules
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
|
||||||
|
# Real-time
|
||||||
|
RestrictRealtime = true;
|
||||||
|
|
||||||
|
# Privilege escalation
|
||||||
|
NoNewPrivileges = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -176,4 +176,6 @@ in
|
|||||||
}
|
}
|
||||||
) cfg.instances);
|
) cfg.instances);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||||
}
|
}
|
||||||
|
@ -115,4 +115,6 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||||
}
|
}
|
||||||
|
@ -127,11 +127,15 @@ let
|
|||||||
options {
|
options {
|
||||||
pidfile = "$RUNDIR/rspamd.pid";
|
pidfile = "$RUNDIR/rspamd.pid";
|
||||||
.include "$CONFDIR/options.inc"
|
.include "$CONFDIR/options.inc"
|
||||||
|
.include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/options.inc"
|
||||||
|
.include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/options.inc"
|
||||||
}
|
}
|
||||||
|
|
||||||
logging {
|
logging {
|
||||||
type = "syslog";
|
type = "syslog";
|
||||||
.include "$CONFDIR/logging.inc"
|
.include "$CONFDIR/logging.inc"
|
||||||
|
.include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/logging.inc"
|
||||||
|
.include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/logging.inc"
|
||||||
}
|
}
|
||||||
|
|
||||||
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
|
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
|
||||||
@ -149,6 +153,41 @@ let
|
|||||||
${cfg.extraConfig}
|
${cfg.extraConfig}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
rspamdDir = pkgs.linkFarm "etc-rspamd-dir" (
|
||||||
|
(mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) cfg.locals) ++
|
||||||
|
(mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) cfg.overrides) ++
|
||||||
|
(optional (cfg.localLuaRules != null) { name = "rspamd.local.lua"; path = cfg.localLuaRules; }) ++
|
||||||
|
[ { name = "rspamd.conf"; path = rspamdConfFile; } ]
|
||||||
|
);
|
||||||
|
|
||||||
|
configFileModule = prefix: { name, config, ... }: {
|
||||||
|
options = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = true;
|
||||||
|
description = ''
|
||||||
|
Whether this file ${prefix} should be generated. This
|
||||||
|
option allows specific ${prefix} files to be disabled.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
text = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr types.lines;
|
||||||
|
description = "Text of the file.";
|
||||||
|
};
|
||||||
|
|
||||||
|
source = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
description = "Path of the source file.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
config = {
|
||||||
|
source = mkIf (config.text != null) (
|
||||||
|
let name' = "rspamd-${prefix}-" + baseNameOf name;
|
||||||
|
in mkDefault (pkgs.writeText name' config.text));
|
||||||
|
};
|
||||||
|
};
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -167,6 +206,41 @@ in
|
|||||||
description = "Whether to run the rspamd daemon in debug mode.";
|
description = "Whether to run the rspamd daemon in debug mode.";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
locals = mkOption {
|
||||||
|
type = with types; loaOf (submodule (configFileModule "locals"));
|
||||||
|
default = {};
|
||||||
|
description = ''
|
||||||
|
Local configuration files, written into <filename>/etc/rspamd/local.d/{name}</filename>.
|
||||||
|
'';
|
||||||
|
example = literalExample ''
|
||||||
|
{ "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
|
||||||
|
"arc.conf".text = "allow_envfrom_empty = true;";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
overrides = mkOption {
|
||||||
|
type = with types; loaOf (submodule (configFileModule "overrides"));
|
||||||
|
default = {};
|
||||||
|
description = ''
|
||||||
|
Overridden configuration files, written into <filename>/etc/rspamd/override.d/{name}</filename>.
|
||||||
|
'';
|
||||||
|
example = literalExample ''
|
||||||
|
{ "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
|
||||||
|
"arc.conf".text = "allow_envfrom_empty = true;";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
localLuaRules = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
description = ''
|
||||||
|
Path of file to link to <filename>/etc/rspamd/rspamd.local.lua</filename> for local
|
||||||
|
rules written in Lua
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
workers = mkOption {
|
workers = mkOption {
|
||||||
type = with types; attrsOf (submodule workerOpts);
|
type = with types; attrsOf (submodule workerOpts);
|
||||||
description = ''
|
description = ''
|
||||||
@ -242,16 +316,17 @@ in
|
|||||||
gid = config.ids.gids.rspamd;
|
gid = config.ids.gids.rspamd;
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.etc."rspamd.conf".source = rspamdConfFile;
|
environment.etc."rspamd".source = rspamdDir;
|
||||||
|
|
||||||
systemd.services.rspamd = {
|
systemd.services.rspamd = {
|
||||||
description = "Rspamd Service";
|
description = "Rspamd Service";
|
||||||
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
|
restartTriggers = [ rspamdDir ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c ${rspamdConfFile} -f";
|
ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c /etc/rspamd/rspamd.conf -f";
|
||||||
Restart = "always";
|
Restart = "always";
|
||||||
RuntimeDirectory = "rspamd";
|
RuntimeDirectory = "rspamd";
|
||||||
PrivateTmp = true;
|
PrivateTmp = true;
|
||||||
|
@ -14,15 +14,16 @@ let
|
|||||||
pathUrlQuote = url: replaceStrings ["/"] ["%2F"] url;
|
pathUrlQuote = url: replaceStrings ["/"] ["%2F"] url;
|
||||||
pgSuperUser = config.services.postgresql.superUser;
|
pgSuperUser = config.services.postgresql.superUser;
|
||||||
|
|
||||||
databaseYml = ''
|
databaseConfig = {
|
||||||
production:
|
production = {
|
||||||
adapter: postgresql
|
adapter = "postgresql";
|
||||||
database: ${cfg.databaseName}
|
database = cfg.databaseName;
|
||||||
host: ${cfg.databaseHost}
|
host = cfg.databaseHost;
|
||||||
password: ${cfg.databasePassword}
|
password = cfg.databasePassword;
|
||||||
username: ${cfg.databaseUsername}
|
username = cfg.databaseUsername;
|
||||||
encoding: utf8
|
encoding = "utf8";
|
||||||
'';
|
};
|
||||||
|
};
|
||||||
|
|
||||||
gitalyToml = pkgs.writeText "gitaly.toml" ''
|
gitalyToml = pkgs.writeText "gitaly.toml" ''
|
||||||
socket_path = "${lib.escape ["\""] gitalySocket}"
|
socket_path = "${lib.escape ["\""] gitalySocket}"
|
||||||
@ -45,35 +46,31 @@ let
|
|||||||
'') gitlabConfig.production.repositories.storages))}
|
'') gitlabConfig.production.repositories.storages))}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
gitlabShellYml = ''
|
gitlabShellConfig = {
|
||||||
user: ${cfg.user}
|
user = cfg.user;
|
||||||
gitlab_url: "http+unix://${pathUrlQuote gitlabSocket}"
|
gitlab_url = "http+unix://${pathUrlQuote gitlabSocket}";
|
||||||
http_settings:
|
http_settings.self_signed_cert = false;
|
||||||
self_signed_cert: false
|
repos_path = "${cfg.statePath}/repositories";
|
||||||
repos_path: "${cfg.statePath}/repositories"
|
secret_file = "${cfg.statePath}/config/gitlab_shell_secret";
|
||||||
secret_file: "${cfg.statePath}/config/gitlab_shell_secret"
|
log_file = "${cfg.statePath}/log/gitlab-shell.log";
|
||||||
log_file: "${cfg.statePath}/log/gitlab-shell.log"
|
custom_hooks_dir = "${cfg.statePath}/custom_hooks";
|
||||||
custom_hooks_dir: "${cfg.statePath}/custom_hooks"
|
redis = {
|
||||||
redis:
|
bin = "${pkgs.redis}/bin/redis-cli";
|
||||||
bin: ${pkgs.redis}/bin/redis-cli
|
host = "127.0.0.1";
|
||||||
host: 127.0.0.1
|
port = 6379;
|
||||||
port: 6379
|
database = 0;
|
||||||
database: 0
|
namespace = "resque:gitlab";
|
||||||
namespace: resque:gitlab
|
};
|
||||||
'';
|
};
|
||||||
|
|
||||||
redisYml = ''
|
redisConfig.production.url = "redis://localhost:6379/";
|
||||||
production:
|
|
||||||
url: redis://localhost:6379/
|
|
||||||
'';
|
|
||||||
|
|
||||||
secretsYml = ''
|
secretsConfig.production = {
|
||||||
production:
|
secret_key_base = cfg.secrets.secret;
|
||||||
secret_key_base: ${cfg.secrets.secret}
|
otp_key_base = cfg.secrets.otp;
|
||||||
otp_key_base: ${cfg.secrets.otp}
|
db_key_base = cfg.secrets.db;
|
||||||
db_key_base: ${cfg.secrets.db}
|
openid_connect_signing_key = cfg.secrets.jws;
|
||||||
openid_connect_signing_key: ${builtins.toJSON cfg.secrets.jws}
|
};
|
||||||
'';
|
|
||||||
|
|
||||||
gitlabConfig = {
|
gitlabConfig = {
|
||||||
# These are the default settings from config/gitlab.example.yml
|
# These are the default settings from config/gitlab.example.yml
|
||||||
@ -115,12 +112,8 @@ let
|
|||||||
upload_pack = true;
|
upload_pack = true;
|
||||||
receive_pack = true;
|
receive_pack = true;
|
||||||
};
|
};
|
||||||
workhorse = {
|
workhorse.secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
|
||||||
secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
|
git.bin_path = "git";
|
||||||
};
|
|
||||||
git = {
|
|
||||||
bin_path = "git";
|
|
||||||
};
|
|
||||||
monitoring = {
|
monitoring = {
|
||||||
ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
|
ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
|
||||||
sidekiq_exporter = {
|
sidekiq_exporter = {
|
||||||
@ -138,7 +131,7 @@ let
|
|||||||
HOME = "${cfg.statePath}/home";
|
HOME = "${cfg.statePath}/home";
|
||||||
UNICORN_PATH = "${cfg.statePath}/";
|
UNICORN_PATH = "${cfg.statePath}/";
|
||||||
GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
|
GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
|
||||||
GITLAB_STATE_PATH = "${cfg.statePath}";
|
GITLAB_STATE_PATH = cfg.statePath;
|
||||||
GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
|
GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
|
||||||
SCHEMA = "${cfg.statePath}/db/schema.rb";
|
SCHEMA = "${cfg.statePath}/db/schema.rb";
|
||||||
GITLAB_LOG_PATH = "${cfg.statePath}/log";
|
GITLAB_LOG_PATH = "${cfg.statePath}/log";
|
||||||
@ -146,13 +139,11 @@ let
|
|||||||
GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
|
GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
|
||||||
GITLAB_SHELL_SECRET_PATH = "${cfg.statePath}/config/gitlab_shell_secret";
|
GITLAB_SHELL_SECRET_PATH = "${cfg.statePath}/config/gitlab_shell_secret";
|
||||||
GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/shell/hooks";
|
GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/shell/hooks";
|
||||||
GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "gitlab-redis.yml" redisYml;
|
GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "redis.yml" (builtins.toJSON redisConfig);
|
||||||
prometheus_multiproc_dir = "/run/gitlab";
|
prometheus_multiproc_dir = "/run/gitlab";
|
||||||
RAILS_ENV = "production";
|
RAILS_ENV = "production";
|
||||||
};
|
};
|
||||||
|
|
||||||
unicornConfig = builtins.readFile ./defaultUnicornConfig.rb;
|
|
||||||
|
|
||||||
gitlab-rake = pkgs.stdenv.mkDerivation rec {
|
gitlab-rake = pkgs.stdenv.mkDerivation rec {
|
||||||
name = "gitlab-rake";
|
name = "gitlab-rake";
|
||||||
buildInputs = [ pkgs.makeWrapper ];
|
buildInputs = [ pkgs.makeWrapper ];
|
||||||
@ -162,7 +153,6 @@ let
|
|||||||
mkdir -p $out/bin
|
mkdir -p $out/bin
|
||||||
makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
|
makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
|
||||||
${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
|
${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
|
||||||
--set GITLAB_CONFIG_PATH '${cfg.statePath}/config' \
|
|
||||||
--set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar config.services.postgresql.package pkgs.coreutils pkgs.procps ]}:$PATH' \
|
--set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar config.services.postgresql.package pkgs.coreutils pkgs.procps ]}:$PATH' \
|
||||||
--set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
|
--set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
|
||||||
--run 'cd ${cfg.packages.gitlab}/share/gitlab'
|
--run 'cd ${cfg.packages.gitlab}/share/gitlab'
|
||||||
@ -306,7 +296,6 @@ in {
|
|||||||
|
|
||||||
initialRootPassword = mkOption {
|
initialRootPassword = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "UseNixOS!";
|
|
||||||
description = ''
|
description = ''
|
||||||
Initial password of the root account if this is a new install.
|
Initial password of the root account if this is a new install.
|
||||||
'';
|
'';
|
||||||
@ -461,10 +450,30 @@ in {
|
|||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
systemd.tmpfiles.rules = [
|
||||||
|
"d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.backupPath} 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/db 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/log 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/repositories 2770 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/shell 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/tmp/pids 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/tmp/sockets 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/uploads 0700 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/custom_hooks/pre-receive.d 0700 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/custom_hooks/post-receive.d 0700 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${cfg.statePath}/custom_hooks/update.d 0700 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${gitlabConfig.production.shared.path}/artifacts 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${gitlabConfig.production.shared.path}/lfs-objects 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
"d ${gitlabConfig.production.shared.path}/pages 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
];
|
||||||
|
|
||||||
systemd.services.gitlab-sidekiq = {
|
systemd.services.gitlab-sidekiq = {
|
||||||
after = [ "network.target" "redis.service" ];
|
after = [ "network.target" "redis.service" "gitlab.service" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
partOf = [ "gitlab.service" ];
|
|
||||||
environment = gitlabEnv;
|
environment = gitlabEnv;
|
||||||
path = with pkgs; [
|
path = with pkgs; [
|
||||||
config.services.postgresql.package
|
config.services.postgresql.package
|
||||||
@ -486,10 +495,8 @@ in {
|
|||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.gitaly = {
|
systemd.services.gitaly = {
|
||||||
after = [ "network.target" "gitlab.service" ];
|
after = [ "network.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
environment.HOME = gitlabEnv.HOME;
|
|
||||||
environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
|
|
||||||
path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv cfg.packages.gitaly.rubyEnv.wrappedRuby ];
|
path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv cfg.packages.gitaly.rubyEnv.wrappedRuby ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "simple";
|
Type = "simple";
|
||||||
@ -505,8 +512,6 @@ in {
|
|||||||
systemd.services.gitlab-workhorse = {
|
systemd.services.gitlab-workhorse = {
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
environment.HOME = gitlabEnv.HOME;
|
|
||||||
environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
|
|
||||||
path = with pkgs; [
|
path = with pkgs; [
|
||||||
gitAndTools.git
|
gitAndTools.git
|
||||||
gnutar
|
gnutar
|
||||||
@ -514,10 +519,6 @@ in {
|
|||||||
openssh
|
openssh
|
||||||
gitlab-workhorse
|
gitlab-workhorse
|
||||||
];
|
];
|
||||||
preStart = ''
|
|
||||||
mkdir -p /run/gitlab
|
|
||||||
chown ${cfg.user}:${cfg.group} /run/gitlab
|
|
||||||
'';
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
PermissionsStartOnly = true; # preStart must be run as root
|
PermissionsStartOnly = true; # preStart must be run as root
|
||||||
Type = "simple";
|
Type = "simple";
|
||||||
@ -538,7 +539,7 @@ in {
|
|||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.gitlab = {
|
systemd.services.gitlab = {
|
||||||
after = [ "network.target" "postgresql.service" "redis.service" ];
|
after = [ "gitlab-workhorse.service" "gitaly.service" "network.target" "postgresql.service" "redis.service" ];
|
||||||
requires = [ "gitlab-sidekiq.service" ];
|
requires = [ "gitlab-sidekiq.service" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
environment = gitlabEnv;
|
environment = gitlabEnv;
|
||||||
@ -551,102 +552,76 @@ in {
|
|||||||
gnupg
|
gnupg
|
||||||
];
|
];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
mkdir -p ${cfg.backupPath}
|
|
||||||
mkdir -p ${cfg.statePath}/builds
|
|
||||||
mkdir -p ${cfg.statePath}/repositories
|
|
||||||
mkdir -p ${gitlabConfig.production.shared.path}/artifacts
|
|
||||||
mkdir -p ${gitlabConfig.production.shared.path}/lfs-objects
|
|
||||||
mkdir -p ${gitlabConfig.production.shared.path}/pages
|
|
||||||
mkdir -p ${cfg.statePath}/log
|
|
||||||
mkdir -p ${cfg.statePath}/tmp/pids
|
|
||||||
mkdir -p ${cfg.statePath}/tmp/sockets
|
|
||||||
mkdir -p ${cfg.statePath}/shell
|
|
||||||
mkdir -p ${cfg.statePath}/db
|
|
||||||
mkdir -p ${cfg.statePath}/uploads
|
|
||||||
mkdir -p ${cfg.statePath}/custom_hooks/pre-receive.d
|
|
||||||
mkdir -p ${cfg.statePath}/custom_hooks/post-receive.d
|
|
||||||
mkdir -p ${cfg.statePath}/custom_hooks/update.d
|
|
||||||
|
|
||||||
rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
|
|
||||||
mkdir -p ${cfg.statePath}/config
|
|
||||||
|
|
||||||
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
|
|
||||||
|
|
||||||
mkdir -p /run/gitlab
|
|
||||||
mkdir -p ${cfg.statePath}/log
|
|
||||||
[ -d /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
|
|
||||||
[ -d /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
|
|
||||||
[ -d /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
|
|
||||||
ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
|
|
||||||
chown -R ${cfg.user}:${cfg.group} /run/gitlab
|
|
||||||
|
|
||||||
# Prepare home directory
|
|
||||||
mkdir -p ${gitlabEnv.HOME}/.ssh
|
|
||||||
touch ${gitlabEnv.HOME}/.ssh/authorized_keys
|
|
||||||
chown -R ${cfg.user}:${cfg.group} ${gitlabEnv.HOME}/
|
|
||||||
|
|
||||||
cp -rf ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
|
cp -rf ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
|
||||||
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
|
rm -rf ${cfg.statePath}/config
|
||||||
${optionalString cfg.smtp.enable ''
|
mkdir ${cfg.statePath}/config
|
||||||
ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
|
|
||||||
''}
|
|
||||||
ln -sf ${cfg.statePath}/config /run/gitlab/config
|
|
||||||
if [ -e ${cfg.statePath}/lib ]; then
|
if [ -e ${cfg.statePath}/lib ]; then
|
||||||
rm ${cfg.statePath}/lib
|
rm ${cfg.statePath}/lib
|
||||||
fi
|
fi
|
||||||
ln -sf ${pkgs.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
|
|
||||||
|
ln -sf ${cfg.packages.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
|
||||||
|
[ -L /run/gitlab/config ] || ln -sf ${cfg.statePath}/config /run/gitlab/config
|
||||||
|
[ -L /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
|
||||||
|
[ -L /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
|
||||||
|
[ -L /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
|
||||||
|
${optionalString cfg.smtp.enable ''
|
||||||
|
ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
|
||||||
|
''}
|
||||||
cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
|
cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
|
||||||
|
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
|
||||||
|
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
|
||||||
|
|
||||||
# JSON is a subset of YAML
|
# JSON is a subset of YAML
|
||||||
ln -fs ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
|
ln -sf ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
|
||||||
ln -fs ${pkgs.writeText "database.yml" databaseYml} ${cfg.statePath}/config/database.yml
|
ln -sf ${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} ${cfg.statePath}/config/database.yml
|
||||||
ln -fs ${pkgs.writeText "secrets.yml" secretsYml} ${cfg.statePath}/config/secrets.yml
|
ln -sf ${pkgs.writeText "secrets.yml" (builtins.toJSON secretsConfig)} ${cfg.statePath}/config/secrets.yml
|
||||||
ln -fs ${pkgs.writeText "unicorn.rb" unicornConfig} ${cfg.statePath}/config/unicorn.rb
|
ln -sf ${./defaultUnicornConfig.rb} ${cfg.statePath}/config/unicorn.rb
|
||||||
|
|
||||||
|
# Install the shell required to push repositories
|
||||||
|
ln -sf ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)} /run/gitlab/shell-config.yml
|
||||||
|
[ -L ${cfg.statePath}/shell/hooks ] || ln -sf ${cfg.packages.gitlab-shell}/hooks ${cfg.statePath}/shell/hooks
|
||||||
|
${cfg.packages.gitlab-shell}/bin/install
|
||||||
|
|
||||||
chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}/
|
chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}/
|
||||||
chmod -R ug+rwX,o-rwx+X ${cfg.statePath}/
|
chmod -R ug+rwX,o-rwx+X ${cfg.statePath}/
|
||||||
|
chown -R ${cfg.user}:${cfg.group} /run/gitlab
|
||||||
|
|
||||||
# Install the shell required to push repositories
|
|
||||||
ln -fs ${pkgs.writeText "config.yml" gitlabShellYml} "$GITLAB_SHELL_CONFIG_PATH"
|
|
||||||
ln -fs ${cfg.packages.gitlab-shell}/hooks "$GITLAB_SHELL_HOOKS_PATH"
|
|
||||||
${cfg.packages.gitlab-shell}/bin/install
|
|
||||||
|
|
||||||
if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
|
|
||||||
if ! test -e "${cfg.statePath}/db-created"; then
|
if ! test -e "${cfg.statePath}/db-created"; then
|
||||||
|
if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
|
||||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "CREATE ROLE ${cfg.databaseUsername} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${cfg.databasePassword}'"
|
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "CREATE ROLE ${cfg.databaseUsername} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${cfg.databasePassword}'"
|
||||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} ${config.services.postgresql.package}/bin/createdb --owner ${cfg.databaseUsername} ${cfg.databaseName}
|
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} ${config.services.postgresql.package}/bin/createdb --owner ${cfg.databaseUsername} ${cfg.databaseName}
|
||||||
touch "${cfg.statePath}/db-created"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# enable required pg_trgm extension for gitlab
|
# enable required pg_trgm extension for gitlab
|
||||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
|
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Always do the db migrations just to be sure the database is up-to-date
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:schema:load
|
||||||
${gitlab-rake}/bin/gitlab-rake db:migrate RAILS_ENV=production
|
|
||||||
|
touch "${cfg.statePath}/db-created"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Always do the db migrations just to be sure the database is up-to-date
|
||||||
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:migrate
|
||||||
|
|
||||||
# The gitlab:setup task is horribly broken somehow, the db:migrate
|
|
||||||
# task above and the db:seed_fu below will do the same for setting
|
|
||||||
# up the initial database
|
|
||||||
if ! test -e "${cfg.statePath}/db-seeded"; then
|
if ! test -e "${cfg.statePath}/db-seeded"; then
|
||||||
${gitlab-rake}/bin/gitlab-rake db:seed_fu RAILS_ENV=production \
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} ${gitlab-rake}/bin/gitlab-rake db:seed_fu \
|
||||||
GITLAB_ROOT_PASSWORD='${cfg.initialRootPassword}' GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}'
|
GITLAB_ROOT_PASSWORD='${cfg.initialRootPassword}' GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}'
|
||||||
touch "${cfg.statePath}/db-seeded"
|
touch "${cfg.statePath}/db-seeded"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# The gitlab:shell:setup regenerates the authorized_keys file so that
|
# The gitlab:shell:setup regenerates the authorized_keys file so that
|
||||||
# the store path to the gitlab-shell in it gets updated
|
# the store path to the gitlab-shell in it gets updated
|
||||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup RAILS_ENV=production
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup
|
||||||
|
|
||||||
# The gitlab:shell:create_hooks task seems broken for fixing links
|
# The gitlab:shell:create_hooks task seems broken for fixing links
|
||||||
# so we instead delete all the hooks and create them anew
|
# so we instead delete all the hooks and create them anew
|
||||||
rm -f ${cfg.statePath}/repositories/**/*.git/hooks
|
rm -f ${cfg.statePath}/repositories/**/*.git/hooks
|
||||||
${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks RAILS_ENV=production
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks
|
||||||
|
|
||||||
|
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${pkgs.git}/bin/git config --global core.autocrlf "input"
|
||||||
|
|
||||||
# Change permissions in the last step because some of the
|
# Change permissions in the last step because some of the
|
||||||
# intermediary scripts like to create directories as root.
|
# intermediary scripts like to create directories as root.
|
||||||
chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}
|
|
||||||
chmod -R ug+rwX,o-rwx+X ${cfg.statePath}
|
|
||||||
chmod -R u+rwX,go-rwx+X ${gitlabEnv.HOME}
|
chmod -R u+rwX,go-rwx+X ${gitlabEnv.HOME}
|
||||||
chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
|
chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
|
||||||
chmod -R ug-s ${cfg.statePath}/repositories
|
chmod -R ug-s ${cfg.statePath}/repositories
|
||||||
|
@ -157,6 +157,7 @@ in {
|
|||||||
Restart = "on-failure";
|
Restart = "on-failure";
|
||||||
ProtectSystem = "strict";
|
ProtectSystem = "strict";
|
||||||
ReadWritePaths = "${cfg.configDir}";
|
ReadWritePaths = "${cfg.configDir}";
|
||||||
|
KillSignal = "SIGINT";
|
||||||
PrivateTmp = true;
|
PrivateTmp = true;
|
||||||
RemoveIPC = true;
|
RemoveIPC = true;
|
||||||
};
|
};
|
||||||
|
154
nixos/modules/services/monitoring/kapacitor.nix
Normal file
154
nixos/modules/services/monitoring/kapacitor.nix
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
{ options, config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.kapacitor;
|
||||||
|
|
||||||
|
kapacitorConf = pkgs.writeTextFile {
|
||||||
|
name = "kapacitord.conf";
|
||||||
|
text = ''
|
||||||
|
hostname="${config.networking.hostName}"
|
||||||
|
data_dir="${cfg.dataDir}"
|
||||||
|
|
||||||
|
[http]
|
||||||
|
bind-address = "${cfg.bind}:${toString cfg.port}"
|
||||||
|
log-enabled = false
|
||||||
|
auth-enabled = false
|
||||||
|
|
||||||
|
[task]
|
||||||
|
dir = "${cfg.dataDir}/tasks"
|
||||||
|
snapshot-interval = "${cfg.taskSnapshotInterval}"
|
||||||
|
|
||||||
|
[replay]
|
||||||
|
dir = "${cfg.dataDir}/replay"
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
boltdb = "${cfg.dataDir}/kapacitor.db"
|
||||||
|
|
||||||
|
${optionalString (cfg.loadDirectory != null) ''
|
||||||
|
[load]
|
||||||
|
enabled = true
|
||||||
|
dir = "${cfg.loadDirectory}"
|
||||||
|
''}
|
||||||
|
|
||||||
|
${optionalString (cfg.defaultDatabase.enable) ''
|
||||||
|
[[influxdb]]
|
||||||
|
name = "default"
|
||||||
|
enabled = true
|
||||||
|
default = true
|
||||||
|
urls = [ "${cfg.defaultDatabase.url}" ]
|
||||||
|
username = "${cfg.defaultDatabase.username}"
|
||||||
|
password = "${cfg.defaultDatabase.password}"
|
||||||
|
''}
|
||||||
|
|
||||||
|
${cfg.extraConfig}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.kapacitor = {
|
||||||
|
enable = mkEnableOption "kapacitor";
|
||||||
|
|
||||||
|
dataDir = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
example = "/var/lib/kapacitor";
|
||||||
|
default = "/var/lib/kapacitor";
|
||||||
|
description = "Location where Kapacitor stores its state";
|
||||||
|
};
|
||||||
|
|
||||||
|
port = mkOption {
|
||||||
|
type = types.int;
|
||||||
|
default = 9092;
|
||||||
|
description = "Port of Kapacitor";
|
||||||
|
};
|
||||||
|
|
||||||
|
bind = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "";
|
||||||
|
example = literalExample "0.0.0.0";
|
||||||
|
description = "Address to bind to. The default is to bind to all addresses";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfig = mkOption {
|
||||||
|
description = "These lines go into kapacitord.conf verbatim.";
|
||||||
|
default = "";
|
||||||
|
type = types.lines;
|
||||||
|
};
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "kapacitor";
|
||||||
|
description = "User account under which Kapacitor runs";
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "kapacitor";
|
||||||
|
description = "Group under which Kapacitor runs";
|
||||||
|
};
|
||||||
|
|
||||||
|
taskSnapshotInterval = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "Specifies how often to snapshot the task state (in InfluxDB time units)";
|
||||||
|
default = "1m0s";
|
||||||
|
example = "1m0s";
|
||||||
|
};
|
||||||
|
|
||||||
|
loadDirectory = mkOption {
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
defaultDatabase = {
|
||||||
|
enable = mkEnableOption "kapacitor.defaultDatabase";
|
||||||
|
|
||||||
|
url = mkOption {
|
||||||
|
description = "The URL to an InfluxDB server that serves as the default database";
|
||||||
|
example = "http://localhost:8086";
|
||||||
|
type = types.string;
|
||||||
|
};
|
||||||
|
|
||||||
|
username = mkOption {
|
||||||
|
description = "The username to connect to the remote InfluxDB server";
|
||||||
|
type = types.string;
|
||||||
|
};
|
||||||
|
|
||||||
|
password = mkOption {
|
||||||
|
description = "The password to connect to the remote InfluxDB server";
|
||||||
|
type = types.string;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
environment.systemPackages = [ pkgs.kapacitor ];
|
||||||
|
|
||||||
|
systemd.services.kapacitor = {
|
||||||
|
description = "Kapacitor Real-Time Stream Processing Engine";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "networking.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
|
||||||
|
User = "kapacitor";
|
||||||
|
Group = "kapacitor";
|
||||||
|
PermissionsStartOnly = true;
|
||||||
|
};
|
||||||
|
preStart = ''
|
||||||
|
mkdir -p ${cfg.dataDir}
|
||||||
|
chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users.kapacitor = {
|
||||||
|
uid = config.ids.uids.kapacitor;
|
||||||
|
description = "Kapacitor user";
|
||||||
|
home = cfg.dataDir;
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups.kapacitor = {
|
||||||
|
gid = config.ids.gids.kapacitor;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -33,7 +33,7 @@ let
|
|||||||
|
|
||||||
purple_plugin_path =
|
purple_plugin_path =
|
||||||
lib.concatMapStringsSep ":"
|
lib.concatMapStringsSep ":"
|
||||||
(plugin: "${plugin}/lib/pidgin/")
|
(plugin: "${plugin}/lib/pidgin/:${plugin}/lib/purple-2/")
|
||||||
cfg.libpurple_plugins
|
cfg.libpurple_plugins
|
||||||
;
|
;
|
||||||
|
|
||||||
|
@ -93,6 +93,8 @@ in
|
|||||||
|
|
||||||
services.timesyncd.enable = mkForce false;
|
services.timesyncd.enable = mkForce false;
|
||||||
|
|
||||||
|
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
|
||||||
|
|
||||||
systemd.services.chronyd =
|
systemd.services.chronyd =
|
||||||
{ description = "chrony NTP daemon";
|
{ description = "chrony NTP daemon";
|
||||||
|
|
||||||
|
@ -6,9 +6,10 @@ let
|
|||||||
dataDir = "/var/lib/consul";
|
dataDir = "/var/lib/consul";
|
||||||
cfg = config.services.consul;
|
cfg = config.services.consul;
|
||||||
|
|
||||||
configOptions = { data_dir = dataDir; } //
|
configOptions = {
|
||||||
(if cfg.webUi then { ui_dir = "${cfg.package.ui}"; } else { }) //
|
data_dir = dataDir;
|
||||||
cfg.extraConfig;
|
ui = cfg.webUi;
|
||||||
|
} // cfg.extraConfig;
|
||||||
|
|
||||||
configFiles = [ "/etc/consul.json" "/etc/consul-addrs.json" ]
|
configFiles = [ "/etc/consul.json" "/etc/consul-addrs.json" ]
|
||||||
++ cfg.extraConfigFiles;
|
++ cfg.extraConfigFiles;
|
||||||
|
@ -67,6 +67,8 @@ in
|
|||||||
environment.systemPackages = [ pkgs.ntp ];
|
environment.systemPackages = [ pkgs.ntp ];
|
||||||
services.timesyncd.enable = mkForce false;
|
services.timesyncd.enable = mkForce false;
|
||||||
|
|
||||||
|
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
|
||||||
|
|
||||||
users.users = singleton
|
users.users = singleton
|
||||||
{ name = ntpUser;
|
{ name = ntpUser;
|
||||||
uid = config.ids.uids.ntp;
|
uid = config.ids.uids.ntp;
|
||||||
|
@ -267,4 +267,6 @@ in
|
|||||||
"ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
|
"ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
|
||||||
) cfg.redsocks;
|
) cfg.redsocks;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||||
}
|
}
|
||||||
|
@ -62,9 +62,21 @@ in {
|
|||||||
dataDir = mkOption {
|
dataDir = mkOption {
|
||||||
type = types.path;
|
type = types.path;
|
||||||
default = "/var/lib/syncthing";
|
default = "/var/lib/syncthing";
|
||||||
|
description = ''
|
||||||
|
Path where synced directories will exist.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
configDir = mkOption {
|
||||||
|
type = types.path;
|
||||||
description = ''
|
description = ''
|
||||||
Path where the settings and keys will exist.
|
Path where the settings and keys will exist.
|
||||||
'';
|
'';
|
||||||
|
default =
|
||||||
|
let
|
||||||
|
nixos = config.system.stateVersion;
|
||||||
|
cond = versionAtLeast nixos "19.03";
|
||||||
|
in cfg.dataDir + (optionalString cond "/.config/syncthing");
|
||||||
};
|
};
|
||||||
|
|
||||||
openDefaultPorts = mkOption {
|
openDefaultPorts = mkOption {
|
||||||
@ -144,7 +156,7 @@ in {
|
|||||||
${cfg.package}/bin/syncthing \
|
${cfg.package}/bin/syncthing \
|
||||||
-no-browser \
|
-no-browser \
|
||||||
-gui-address=${cfg.guiAddress} \
|
-gui-address=${cfg.guiAddress} \
|
||||||
-home=${cfg.dataDir}
|
-home=${cfg.configDir}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -39,7 +39,8 @@ in
|
|||||||
systemd.services.zerotierone = {
|
systemd.services.zerotierone = {
|
||||||
description = "ZeroTierOne";
|
description = "ZeroTierOne";
|
||||||
path = [ cfg.package ];
|
path = [ cfg.package ];
|
||||||
after = [ "network.target" ];
|
bindsTo = [ "network-online.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
mkdir -p /var/lib/zerotier-one/networks.d
|
mkdir -p /var/lib/zerotier-one/networks.d
|
||||||
|
@ -6,141 +6,104 @@ let
|
|||||||
|
|
||||||
cfg = config.services.solr;
|
cfg = config.services.solr;
|
||||||
|
|
||||||
# Assemble all jars needed for solr
|
in
|
||||||
solrJars = pkgs.stdenv.mkDerivation {
|
|
||||||
name = "solr-jars";
|
|
||||||
|
|
||||||
src = pkgs.fetchurl {
|
|
||||||
url = http://archive.apache.org/dist/tomcat/tomcat-5/v5.5.36/bin/apache-tomcat-5.5.36.tar.gz;
|
|
||||||
sha256 = "01mzvh53wrs1p2ym765jwd00gl6kn8f9k3nhdrnhdqr8dhimfb2p";
|
|
||||||
};
|
|
||||||
|
|
||||||
installPhase = ''
|
|
||||||
mkdir -p $out/lib
|
|
||||||
cp common/lib/*.jar $out/lib/
|
|
||||||
ln -s ${pkgs.ant}/lib/ant/lib/ant.jar $out/lib/
|
|
||||||
ln -s ${cfg.solrPackage}/lib/ext/* $out/lib/
|
|
||||||
ln -s ${pkgs.jdk.home}/lib/tools.jar $out/lib/
|
|
||||||
'' + optionalString (cfg.extraJars != []) ''
|
|
||||||
for f in ${concatStringsSep " " cfg.extraJars}; do
|
|
||||||
cp $f $out/lib
|
|
||||||
done
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
in {
|
|
||||||
|
|
||||||
|
{
|
||||||
options = {
|
options = {
|
||||||
services.solr = {
|
services.solr = {
|
||||||
enable = mkOption {
|
enable = mkEnableOption "Enables the solr service.";
|
||||||
type = types.bool;
|
|
||||||
default = false;
|
|
||||||
description = ''
|
|
||||||
Enables the solr service.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
javaPackage = mkOption {
|
package = mkOption {
|
||||||
type = types.package;
|
|
||||||
default = pkgs.jre;
|
|
||||||
defaultText = "pkgs.jre";
|
|
||||||
description = ''
|
|
||||||
Which Java derivation to use for running solr.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
solrPackage = mkOption {
|
|
||||||
type = types.package;
|
type = types.package;
|
||||||
default = pkgs.solr;
|
default = pkgs.solr;
|
||||||
defaultText = "pkgs.solr";
|
defaultText = "pkgs.solr";
|
||||||
description = ''
|
description = "Which Solr package to use.";
|
||||||
Which solr derivation to use for running solr.
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extraJars = mkOption {
|
port = mkOption {
|
||||||
type = types.listOf types.path;
|
type = types.int;
|
||||||
default = [];
|
default = 8983;
|
||||||
description = ''
|
description = "Port on which Solr is ran.";
|
||||||
List of paths pointing to jars. Jars are copied to commonLibFolder to be available to java/solr.
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
log4jConfiguration = mkOption {
|
stateDir = mkOption {
|
||||||
type = types.lines;
|
type = types.path;
|
||||||
default = ''
|
default = "/var/lib/solr";
|
||||||
log4j.rootLogger=INFO, stdout
|
description = "The solr home directory containing config, data, and logging files.";
|
||||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
|
||||||
log4j.appender.stdout.Target=System.out
|
|
||||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
|
||||||
log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Contents of the <literal>log4j.properties</literal> used. By default,
|
|
||||||
everything is logged to stdout (picked up by systemd) with level INFO.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
user = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
description = ''
|
|
||||||
The user that should run the solr process and.
|
|
||||||
the working directories.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
group = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
description = ''
|
|
||||||
The group that will own the working directory.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
solrHome = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
description = ''
|
|
||||||
The solr home directory. It is your own responsibility to
|
|
||||||
make sure this directory contains a working solr configuration,
|
|
||||||
and is writeable by the the user running the solr service.
|
|
||||||
Failing to do so, the solr will not start properly.
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extraJavaOptions = mkOption {
|
extraJavaOptions = mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
default = [];
|
default = [];
|
||||||
description = ''
|
description = "Extra command line options given to the java process running Solr.";
|
||||||
Extra command line options given to the java process running
|
|
||||||
solr.
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extraWinstoneOptions = mkOption {
|
user = mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.str;
|
||||||
default = [];
|
default = "solr";
|
||||||
description = ''
|
description = "User under which Solr is ran.";
|
||||||
Extra command line options given to the Winstone, which is
|
};
|
||||||
the servlet container hosting solr.
|
|
||||||
'';
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "solr";
|
||||||
|
description = "Group under which Solr is ran.";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
services.winstone.solr = {
|
environment.systemPackages = [ cfg.package ];
|
||||||
serviceName = "solr";
|
|
||||||
inherit (cfg) user group javaPackage;
|
systemd.services.solr = {
|
||||||
warFile = "${cfg.solrPackage}/lib/solr.war";
|
after = [ "network.target" "remote-fs.target" "nss-lookup.target" "systemd-journald-dev-log.socket" ];
|
||||||
extraOptions = [
|
wantedBy = [ "multi-user.target" ];
|
||||||
"--commonLibFolder=${solrJars}/lib"
|
|
||||||
"--useJasper"
|
environment = {
|
||||||
] ++ cfg.extraWinstoneOptions;
|
SOLR_HOME = "${cfg.stateDir}/data";
|
||||||
extraJavaOptions = [
|
LOG4J_PROPS = "${cfg.stateDir}/log4j2.xml";
|
||||||
"-Dsolr.solr.home=${cfg.solrHome}"
|
SOLR_LOGS_DIR = "${cfg.stateDir}/logs";
|
||||||
"-Dlog4j.configuration=file://${pkgs.writeText "log4j.properties" cfg.log4jConfiguration}"
|
SOLR_PORT = "${toString cfg.port}";
|
||||||
] ++ cfg.extraJavaOptions;
|
|
||||||
};
|
};
|
||||||
|
path = with pkgs; [
|
||||||
|
gawk
|
||||||
|
procps
|
||||||
|
];
|
||||||
|
preStart = ''
|
||||||
|
mkdir -p "${cfg.stateDir}/data";
|
||||||
|
mkdir -p "${cfg.stateDir}/logs";
|
||||||
|
|
||||||
|
if ! test -e "${cfg.stateDir}/data/solr.xml"; then
|
||||||
|
install -D -m0640 ${cfg.package}/server/solr/solr.xml "${cfg.stateDir}/data/solr.xml"
|
||||||
|
install -D -m0640 ${cfg.package}/server/solr/zoo.cfg "${cfg.stateDir}/data/zoo.cfg"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! test -e "${cfg.stateDir}/log4j2.xml"; then
|
||||||
|
install -D -m0640 ${cfg.package}/server/resources/log4j2.xml "${cfg.stateDir}/log4j2.xml"
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
ExecStart="${cfg.package}/bin/solr start -f -a \"${concatStringsSep " " cfg.extraJavaOptions}\"";
|
||||||
|
ExecStop="${cfg.package}/bin/solr stop";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users = optionalAttrs (cfg.user == "solr") (singleton
|
||||||
|
{ name = "solr";
|
||||||
|
group = cfg.group;
|
||||||
|
home = cfg.stateDir;
|
||||||
|
createHome = true;
|
||||||
|
uid = config.ids.uids.solr;
|
||||||
|
});
|
||||||
|
|
||||||
|
users.groups = optionalAttrs (cfg.group == "solr") (singleton
|
||||||
|
{ name = "solr";
|
||||||
|
gid = config.ids.gids.solr;
|
||||||
|
});
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ let
|
|||||||
|
|
||||||
configFile = pkgs.writeText "nginx.conf" ''
|
configFile = pkgs.writeText "nginx.conf" ''
|
||||||
user ${cfg.user} ${cfg.group};
|
user ${cfg.user} ${cfg.group};
|
||||||
error_log stderr;
|
error_log ${cfg.logError};
|
||||||
daemon off;
|
daemon off;
|
||||||
|
|
||||||
${cfg.config}
|
${cfg.config}
|
||||||
@ -341,6 +341,35 @@ in
|
|||||||
";
|
";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
logError = mkOption {
|
||||||
|
default = "stderr";
|
||||||
|
description = "
|
||||||
|
Configures logging.
|
||||||
|
The first parameter defines a file that will store the log. The
|
||||||
|
special value stderr selects the standard error file. Logging to
|
||||||
|
syslog can be configured by specifying the “syslog:” prefix.
|
||||||
|
The second parameter determines the level of logging, and can be
|
||||||
|
one of the following: debug, info, notice, warn, error, crit,
|
||||||
|
alert, or emerg. Log levels above are listed in the order of
|
||||||
|
increasing severity. Setting a certain log level will cause all
|
||||||
|
messages of the specified and more severe log levels to be logged.
|
||||||
|
If this parameter is omitted then error is used.
|
||||||
|
";
|
||||||
|
};
|
||||||
|
|
||||||
|
preStart = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
default = ''
|
||||||
|
test -d ${cfg.stateDir}/logs || mkdir -m 750 -p ${cfg.stateDir}/logs
|
||||||
|
test `stat -c %a ${cfg.stateDir}` = "750" || chmod 750 ${cfg.stateDir}
|
||||||
|
test `stat -c %a ${cfg.stateDir}/logs` = "750" || chmod 750 ${cfg.stateDir}/logs
|
||||||
|
chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
|
||||||
|
'';
|
||||||
|
description = "
|
||||||
|
Shell commands executed before the service's nginx is started.
|
||||||
|
";
|
||||||
|
};
|
||||||
|
|
||||||
config = mkOption {
|
config = mkOption {
|
||||||
default = "";
|
default = "";
|
||||||
description = "
|
description = "
|
||||||
@ -608,9 +637,7 @@ in
|
|||||||
stopIfChanged = false;
|
stopIfChanged = false;
|
||||||
preStart =
|
preStart =
|
||||||
''
|
''
|
||||||
mkdir -p ${cfg.stateDir}/logs
|
${cfg.preStart}
|
||||||
chmod 700 ${cfg.stateDir}
|
|
||||||
chown -R ${cfg.user}:${cfg.group} ${cfg.stateDir}
|
|
||||||
${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir} -t
|
${cfg.package}/bin/nginx -c ${configFile} -p ${cfg.stateDir} -t
|
||||||
'';
|
'';
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
|
@ -22,7 +22,7 @@ let
|
|||||||
# This wrapper ensures that we actually get themes
|
# This wrapper ensures that we actually get themes
|
||||||
makeWrapper ${pkgs.lightdm_gtk_greeter}/sbin/lightdm-gtk-greeter \
|
makeWrapper ${pkgs.lightdm_gtk_greeter}/sbin/lightdm-gtk-greeter \
|
||||||
$out/greeter \
|
$out/greeter \
|
||||||
--prefix PATH : "${pkgs.glibc.bin}/bin" \
|
--prefix PATH : "${lib.getBin pkgs.stdenv.cc.libc}/bin" \
|
||||||
--set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
|
--set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
|
||||||
--set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
|
--set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
|
||||||
--set GTK_EXE_PREFIX "${theme}" \
|
--set GTK_EXE_PREFIX "${theme}" \
|
||||||
|
@ -21,7 +21,8 @@ let
|
|||||||
[ coreutils
|
[ coreutils
|
||||||
gnugrep
|
gnugrep
|
||||||
findutils
|
findutils
|
||||||
glibc # needed for getent
|
getent
|
||||||
|
stdenv.cc.libc # nscd in update-users-groups.pl
|
||||||
shadow
|
shadow
|
||||||
nettools # needed for hostname
|
nettools # needed for hostname
|
||||||
utillinux # needed for mount and mountpoint
|
utillinux # needed for mount and mountpoint
|
||||||
|
@ -147,7 +147,7 @@ let
|
|||||||
${config.boot.initrd.extraUtilsCommands}
|
${config.boot.initrd.extraUtilsCommands}
|
||||||
|
|
||||||
# Copy ld manually since it isn't detected correctly
|
# Copy ld manually since it isn't detected correctly
|
||||||
cp -pv ${pkgs.glibc.out}/lib/ld*.so.? $out/lib
|
cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
|
||||||
|
|
||||||
# Copy all of the needed libraries
|
# Copy all of the needed libraries
|
||||||
find $out/bin $out/lib -type f | while read BIN; do
|
find $out/bin $out/lib -type f | while read BIN; do
|
||||||
|
@ -112,6 +112,7 @@ in {
|
|||||||
|
|
||||||
environment.etc."systemd/nspawn".source = generateUnits "nspawn" units [] [];
|
environment.etc."systemd/nspawn".source = generateUnits "nspawn" units [] [];
|
||||||
|
|
||||||
|
systemd.targets."multi-user".wants = [ "machines.target "];
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -387,7 +387,7 @@ let
|
|||||||
|
|
||||||
logindHandlerType = types.enum [
|
logindHandlerType = types.enum [
|
||||||
"ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
|
"ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
|
||||||
"hibernate" "hybrid-sleep" "lock"
|
"hibernate" "hybrid-sleep" "suspend-then-hibernate" "lock"
|
||||||
];
|
];
|
||||||
|
|
||||||
in
|
in
|
||||||
@ -587,6 +587,15 @@ in
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
services.journald.forwardToSyslog = mkOption {
|
||||||
|
default = config.services.rsyslogd.enable || config.services.syslog-ng.enable;
|
||||||
|
defaultText = "config.services.rsyslogd.enable || config.services.syslog-ng.enable";
|
||||||
|
type = types.bool;
|
||||||
|
description = ''
|
||||||
|
Whether to forward log messages to syslog.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
services.logind.extraConfig = mkOption {
|
services.logind.extraConfig = mkOption {
|
||||||
default = "";
|
default = "";
|
||||||
type = types.lines;
|
type = types.lines;
|
||||||
@ -754,6 +763,9 @@ in
|
|||||||
ForwardToConsole=yes
|
ForwardToConsole=yes
|
||||||
TTYPath=${config.services.journald.console}
|
TTYPath=${config.services.journald.console}
|
||||||
''}
|
''}
|
||||||
|
${optionalString (config.services.journald.forwardToSyslog) ''
|
||||||
|
ForwardToSyslog=yes
|
||||||
|
''}
|
||||||
${config.services.journald.extraConfig}
|
${config.services.journald.extraConfig}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ let cfg = config.ec2; in
|
|||||||
# Mount all formatted ephemeral disks and activate all swap devices.
|
# Mount all formatted ephemeral disks and activate all swap devices.
|
||||||
# We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
|
# We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
|
||||||
# because the set of devices is dependent on the instance type
|
# because the set of devices is dependent on the instance type
|
||||||
# (e.g. "m1.large" has one ephemeral filesystem and one swap device,
|
# (e.g. "m1.small" has one ephemeral filesystem and one swap device,
|
||||||
# while "m1.large" has two ephemeral filesystems and no swap
|
# while "m1.large" has two ephemeral filesystems and no swap
|
||||||
# devices). Also, put /tmp and /var on /disk0, since it has a lot
|
# devices). Also, put /tmp and /var on /disk0, since it has a lot
|
||||||
# more space than the root device. Similarly, "move" /nix to /disk0
|
# more space than the root device. Similarly, "move" /nix to /disk0
|
||||||
|
@ -243,6 +243,9 @@ let
|
|||||||
|
|
||||||
Restart = "on-failure";
|
Restart = "on-failure";
|
||||||
|
|
||||||
|
Slice = "machine.slice";
|
||||||
|
Delegate = true;
|
||||||
|
|
||||||
# Hack: we don't want to kill systemd-nspawn, since we call
|
# Hack: we don't want to kill systemd-nspawn, since we call
|
||||||
# "machinectl poweroff" in preStop to shut down the
|
# "machinectl poweroff" in preStop to shut down the
|
||||||
# container cleanly. But systemd requires sending a signal
|
# container cleanly. But systemd requires sending a signal
|
||||||
@ -606,7 +609,7 @@ in
|
|||||||
{ config =
|
{ config =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{ services.postgresql.enable = true;
|
{ services.postgresql.enable = true;
|
||||||
services.postgresql.package = pkgs.postgresql96;
|
services.postgresql.package = pkgs.postgresql_9_6;
|
||||||
|
|
||||||
system.stateVersion = "17.03";
|
system.stateVersion = "17.03";
|
||||||
};
|
};
|
||||||
@ -657,6 +660,8 @@ in
|
|||||||
serviceConfig = serviceDirectives dummyConfig;
|
serviceConfig = serviceDirectives dummyConfig;
|
||||||
};
|
};
|
||||||
in {
|
in {
|
||||||
|
systemd.targets."multi-user".wants = [ "machines.target" ];
|
||||||
|
|
||||||
systemd.services = listToAttrs (filter (x: x.value != null) (
|
systemd.services = listToAttrs (filter (x: x.value != null) (
|
||||||
# The generic container template used by imperative containers
|
# The generic container template used by imperative containers
|
||||||
[{ name = "container@"; value = unit; }]
|
[{ name = "container@"; value = unit; }]
|
||||||
@ -680,7 +685,7 @@ in
|
|||||||
} // (
|
} // (
|
||||||
if config.autoStart then
|
if config.autoStart then
|
||||||
{
|
{
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "machines.target" ];
|
||||||
wants = [ "network.target" ];
|
wants = [ "network.target" ];
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
restartTriggers = [ config.path ];
|
restartTriggers = [ config.path ];
|
||||||
|
135
nixos/modules/virtualisation/docker-preloader.nix
Normal file
135
nixos/modules/virtualisation/docker-preloader.nix
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
with builtins;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.virtualisation;
|
||||||
|
|
||||||
|
sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
|
||||||
|
hash = drv: head (split "-" (baseNameOf drv.outPath));
|
||||||
|
# The label of an ext4 FS is limited to 16 bytes
|
||||||
|
labelFromImage = image: substring 0 16 (hash image);
|
||||||
|
|
||||||
|
# The Docker image is loaded and some files from /var/lib/docker/
|
||||||
|
# are written into a qcow image.
|
||||||
|
preload = image: pkgs.vmTools.runInLinuxVM (
|
||||||
|
pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
|
||||||
|
buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
|
||||||
|
preVM = pkgs.vmTools.createEmptyImage {
|
||||||
|
size = cfg.dockerPreloader.qcowSize;
|
||||||
|
fullName = "docker-deamon-image.qcow2";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
''
|
||||||
|
mkfs.ext4 /dev/vda
|
||||||
|
e2label /dev/vda ${labelFromImage image}
|
||||||
|
mkdir -p /var/lib/docker
|
||||||
|
mount -t ext4 /dev/vda /var/lib/docker
|
||||||
|
|
||||||
|
modprobe overlay
|
||||||
|
|
||||||
|
# from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
|
||||||
|
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
|
||||||
|
cd /sys/fs/cgroup
|
||||||
|
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
|
||||||
|
mkdir -p $sys
|
||||||
|
if ! mountpoint -q $sys; then
|
||||||
|
if ! mount -n -t cgroup -o $sys cgroup $sys; then
|
||||||
|
rmdir $sys || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
|
||||||
|
|
||||||
|
until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
|
||||||
|
printf '.'
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
docker load -i ${image}
|
||||||
|
|
||||||
|
kill %1
|
||||||
|
find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
|
||||||
|
'');
|
||||||
|
|
||||||
|
preloadedImages = map preload cfg.dockerPreloader.images;
|
||||||
|
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
options.virtualisation.dockerPreloader = {
|
||||||
|
images = mkOption {
|
||||||
|
default = [ ];
|
||||||
|
type = types.listOf types.package;
|
||||||
|
description =
|
||||||
|
''
|
||||||
|
A list of Docker images to preload (in the /var/lib/docker directory).
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
qcowSize = mkOption {
|
||||||
|
default = 1024;
|
||||||
|
type = types.int;
|
||||||
|
description =
|
||||||
|
''
|
||||||
|
The size (MB) of qcow files.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
assertions = [{
|
||||||
|
# If docker.storageDriver is null, Docker choose the storage
|
||||||
|
# driver. So, in this case, we cannot be sure overlay2 is used.
|
||||||
|
assertion = cfg.dockerPreloader.images == []
|
||||||
|
|| cfg.docker.storageDriver == "overlay2"
|
||||||
|
|| cfg.docker.storageDriver == "overlay"
|
||||||
|
|| cfg.docker.storageDriver == null;
|
||||||
|
message = "The Docker image Preloader only works with overlay2 storage driver!";
|
||||||
|
}];
|
||||||
|
|
||||||
|
virtualisation.qemu.options =
|
||||||
|
map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
|
||||||
|
preloadedImages;
|
||||||
|
|
||||||
|
|
||||||
|
# All attached QCOW files are mounted and their contents are linked
|
||||||
|
# to /var/lib/docker/ in order to make image available.
|
||||||
|
systemd.services.docker-preloader = {
|
||||||
|
description = "Preloaded Docker images";
|
||||||
|
wantedBy = ["docker.service"];
|
||||||
|
after = ["network.target"];
|
||||||
|
path = with pkgs; [ mount rsync jq ];
|
||||||
|
script = ''
|
||||||
|
mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
|
||||||
|
echo '{}' > /tmp/repositories.json
|
||||||
|
|
||||||
|
for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
|
||||||
|
mkdir -p /mnt/docker-images/$i
|
||||||
|
|
||||||
|
# The ext4 label is limited to 16 bytes
|
||||||
|
mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
|
||||||
|
|
||||||
|
find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
|
||||||
|
-exec ln -s '{}' /var/lib/docker/overlay2/ \;
|
||||||
|
cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
|
||||||
|
|
||||||
|
rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
|
||||||
|
|
||||||
|
# Accumulate image definitions
|
||||||
|
cp /tmp/repositories.json /tmp/repositories.json.tmp
|
||||||
|
jq -s '.[0] * .[1]' \
|
||||||
|
/tmp/repositories.json.tmp \
|
||||||
|
/mnt/docker-images/$i/image/overlay2/repositories.json \
|
||||||
|
> /tmp/repositories.json
|
||||||
|
done
|
||||||
|
|
||||||
|
mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
|
||||||
|
'';
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@ -144,7 +144,6 @@ in
|
|||||||
path = with pkgs; [ iproute ];
|
path = with pkgs; [ iproute ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${gce}/bin/google_network_daemon --debug";
|
ExecStart = "${gce}/bin/google_network_daemon --debug";
|
||||||
Type = "oneshot";
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -196,6 +196,8 @@ in {
|
|||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
path = with pkgs; [ coreutils libvirt gawk ];
|
path = with pkgs; [ coreutils libvirt gawk ];
|
||||||
restartIfChanged = false;
|
restartIfChanged = false;
|
||||||
|
|
||||||
|
environment.ON_SHUTDOWN = "${cfg.onShutdown}";
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.sockets.virtlogd = {
|
systemd.sockets.virtlogd = {
|
||||||
|
@ -185,7 +185,10 @@ let
|
|||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
imports = [ ../profiles/qemu-guest.nix ];
|
imports = [
|
||||||
|
../profiles/qemu-guest.nix
|
||||||
|
./docker-preloader.nix
|
||||||
|
];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ in {
|
|||||||
virtualbox = {
|
virtualbox = {
|
||||||
baseImageSize = mkOption {
|
baseImageSize = mkOption {
|
||||||
type = types.int;
|
type = types.int;
|
||||||
default = 10 * 1024;
|
default = 50 * 1024;
|
||||||
description = ''
|
description = ''
|
||||||
The size of the VirtualBox base image in MiB.
|
The size of the VirtualBox base image in MiB.
|
||||||
'';
|
'';
|
||||||
@ -61,7 +61,7 @@ in {
|
|||||||
export HOME=$PWD
|
export HOME=$PWD
|
||||||
export PATH=${pkgs.virtualbox}/bin:$PATH
|
export PATH=${pkgs.virtualbox}/bin:$PATH
|
||||||
|
|
||||||
echo "creating VirtualBox pass-through disk wrapper (no copying invovled)..."
|
echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
|
||||||
VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
|
VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
|
||||||
|
|
||||||
echo "creating VirtualBox VM..."
|
echo "creating VirtualBox VM..."
|
||||||
@ -72,9 +72,9 @@ in {
|
|||||||
--memory ${toString cfg.memorySize} --acpi on --vram 32 \
|
--memory ${toString cfg.memorySize} --acpi on --vram 32 \
|
||||||
${optionalString (pkgs.stdenv.hostPlatform.system == "i686-linux") "--pae on"} \
|
${optionalString (pkgs.stdenv.hostPlatform.system == "i686-linux") "--pae on"} \
|
||||||
--nictype1 virtio --nic1 nat \
|
--nictype1 virtio --nic1 nat \
|
||||||
--audiocontroller ac97 --audio alsa \
|
--audiocontroller ac97 --audio alsa --audioout on \
|
||||||
--rtcuseutc on \
|
--rtcuseutc on \
|
||||||
--usb on --mouse usbtablet
|
--usb on --usbehci on --mouse usbtablet
|
||||||
VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
|
VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
|
||||||
VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
|
VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
|
||||||
--medium disk.vmdk
|
--medium disk.vmdk
|
||||||
@ -82,7 +82,7 @@ in {
|
|||||||
echo "exporting VirtualBox VM..."
|
echo "exporting VirtualBox VM..."
|
||||||
mkdir -p $out
|
mkdir -p $out
|
||||||
fn="$out/${cfg.vmFileName}"
|
fn="$out/${cfg.vmFileName}"
|
||||||
VBoxManage export "$vmName" --output "$fn"
|
VBoxManage export "$vmName" --output "$fn" --options manifest
|
||||||
|
|
||||||
rm -v $diskImage
|
rm -v $diskImage
|
||||||
|
|
||||||
|
@ -283,6 +283,7 @@ in rec {
|
|||||||
tests.docker-tools = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-tools.nix {};
|
tests.docker-tools = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-tools.nix {};
|
||||||
tests.docker-tools-overlay = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-tools-overlay.nix {};
|
tests.docker-tools-overlay = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-tools-overlay.nix {};
|
||||||
tests.docker-edge = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-edge.nix {};
|
tests.docker-edge = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-edge.nix {};
|
||||||
|
tests.docker-preloader = callTestOnMatchingSystems ["x86_64-linux"] tests/docker-preloader.nix {};
|
||||||
tests.docker-registry = callTest tests/docker-registry.nix {};
|
tests.docker-registry = callTest tests/docker-registry.nix {};
|
||||||
tests.dovecot = callTest tests/dovecot.nix {};
|
tests.dovecot = callTest tests/dovecot.nix {};
|
||||||
tests.dnscrypt-proxy = callTestOnMatchingSystems ["x86_64-linux"] tests/dnscrypt-proxy.nix {};
|
tests.dnscrypt-proxy = callTestOnMatchingSystems ["x86_64-linux"] tests/dnscrypt-proxy.nix {};
|
||||||
@ -300,7 +301,7 @@ in rec {
|
|||||||
tests.fsck = callTest tests/fsck.nix {};
|
tests.fsck = callTest tests/fsck.nix {};
|
||||||
tests.fwupd = callTest tests/fwupd.nix {};
|
tests.fwupd = callTest tests/fwupd.nix {};
|
||||||
tests.gdk-pixbuf = callTest tests/gdk-pixbuf.nix {};
|
tests.gdk-pixbuf = callTest tests/gdk-pixbuf.nix {};
|
||||||
#tests.gitlab = callTest tests/gitlab.nix {};
|
tests.gitlab = callTest tests/gitlab.nix {};
|
||||||
tests.gitolite = callTest tests/gitolite.nix {};
|
tests.gitolite = callTest tests/gitolite.nix {};
|
||||||
tests.gjs = callTest tests/gjs.nix {};
|
tests.gjs = callTest tests/gjs.nix {};
|
||||||
tests.gocd-agent = callTest tests/gocd-agent.nix {};
|
tests.gocd-agent = callTest tests/gocd-agent.nix {};
|
||||||
@ -399,6 +400,7 @@ in rec {
|
|||||||
tests.radicale = callTest tests/radicale.nix {};
|
tests.radicale = callTest tests/radicale.nix {};
|
||||||
tests.redmine = callTest tests/redmine.nix {};
|
tests.redmine = callTest tests/redmine.nix {};
|
||||||
tests.rspamd = callSubTests tests/rspamd.nix {};
|
tests.rspamd = callSubTests tests/rspamd.nix {};
|
||||||
|
tests.rsyslogd = callSubTests tests/rsyslogd.nix {};
|
||||||
tests.runInMachine = callTest tests/run-in-machine.nix {};
|
tests.runInMachine = callTest tests/run-in-machine.nix {};
|
||||||
tests.rxe = callTest tests/rxe.nix {};
|
tests.rxe = callTest tests/rxe.nix {};
|
||||||
tests.samba = callTest tests/samba.nix {};
|
tests.samba = callTest tests/samba.nix {};
|
||||||
@ -408,6 +410,7 @@ in rec {
|
|||||||
tests.slurm = callTest tests/slurm.nix {};
|
tests.slurm = callTest tests/slurm.nix {};
|
||||||
tests.smokeping = callTest tests/smokeping.nix {};
|
tests.smokeping = callTest tests/smokeping.nix {};
|
||||||
tests.snapper = callTest tests/snapper.nix {};
|
tests.snapper = callTest tests/snapper.nix {};
|
||||||
|
tests.solr = callTest tests/solr.nix {};
|
||||||
#tests.statsd = callTest tests/statsd.nix {}; # statsd is broken: #45946
|
#tests.statsd = callTest tests/statsd.nix {}; # statsd is broken: #45946
|
||||||
tests.strongswan-swanctl = callTest tests/strongswan-swanctl.nix {};
|
tests.strongswan-swanctl = callTest tests/strongswan-swanctl.nix {};
|
||||||
tests.sudo = callTest tests/sudo.nix {};
|
tests.sudo = callTest tests/sudo.nix {};
|
||||||
@ -467,7 +470,7 @@ in rec {
|
|||||||
{ services.httpd.enable = true;
|
{ services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
services.postgresql.enable = true;
|
services.postgresql.enable = true;
|
||||||
services.postgresql.package = pkgs.postgresql93;
|
services.postgresql.package = pkgs.postgresql_9_3;
|
||||||
environment.systemPackages = [ pkgs.php ];
|
environment.systemPackages = [ pkgs.php ];
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
@ -12,7 +12,6 @@ import ./make-test.nix ({pkgs, ...}: rec {
|
|||||||
};
|
};
|
||||||
|
|
||||||
networking = {
|
networking = {
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||||
{ address = "192.168.1.1"; prefixLength = 24; }
|
{ address = "192.168.1.1"; prefixLength = 24; }
|
||||||
|
@ -12,8 +12,10 @@ with pkgs.lib;
|
|||||||
|
|
||||||
mapAttrs (channel: chromiumPkg: makeTest rec {
|
mapAttrs (channel: chromiumPkg: makeTest rec {
|
||||||
name = "chromium-${channel}";
|
name = "chromium-${channel}";
|
||||||
meta = with pkgs.stdenv.lib.maintainers; {
|
meta = {
|
||||||
maintainers = [ aszlig ];
|
maintainers = with maintainers; [ aszlig ];
|
||||||
|
# https://github.com/NixOS/hydra/issues/591#issuecomment-435125621
|
||||||
|
inherit (chromiumPkg.meta) timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
enableOCR = true;
|
enableOCR = true;
|
||||||
@ -166,7 +168,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
|
|||||||
|
|
||||||
my $clipboard = $machine->succeed(ru "${pkgs.xclip}/bin/xclip -o");
|
my $clipboard = $machine->succeed(ru "${pkgs.xclip}/bin/xclip -o");
|
||||||
die "sandbox not working properly: $clipboard"
|
die "sandbox not working properly: $clipboard"
|
||||||
unless $clipboard =~ /namespace sandbox.*yes/mi
|
unless $clipboard =~ /layer 1 sandbox.*namespace/mi
|
||||||
&& $clipboard =~ /pid namespaces.*yes/mi
|
&& $clipboard =~ /pid namespaces.*yes/mi
|
||||||
&& $clipboard =~ /network namespaces.*yes/mi
|
&& $clipboard =~ /network namespaces.*yes/mi
|
||||||
&& $clipboard =~ /seccomp.*sandbox.*yes/mi
|
&& $clipboard =~ /seccomp.*sandbox.*yes/mi
|
||||||
@ -184,7 +186,7 @@ mapAttrs (channel: chromiumPkg: makeTest rec {
|
|||||||
|
|
||||||
my $clipboard = $machine->succeed(ru "${pkgs.xclip}/bin/xclip -o");
|
my $clipboard = $machine->succeed(ru "${pkgs.xclip}/bin/xclip -o");
|
||||||
die "copying twice in a row does not work properly: $clipboard"
|
die "copying twice in a row does not work properly: $clipboard"
|
||||||
unless $clipboard =~ /namespace sandbox.*yes/mi
|
unless $clipboard =~ /layer 1 sandbox.*namespace/mi
|
||||||
&& $clipboard =~ /pid namespaces.*yes/mi
|
&& $clipboard =~ /pid namespaces.*yes/mi
|
||||||
&& $clipboard =~ /network namespaces.*yes/mi
|
&& $clipboard =~ /network namespaces.*yes/mi
|
||||||
&& $clipboard =~ /seccomp.*sandbox.*yes/mi
|
&& $clipboard =~ /seccomp.*sandbox.*yes/mi
|
||||||
|
@ -13,9 +13,6 @@ let
|
|||||||
|
|
||||||
# CJDNS output is incompatible with the XML log.
|
# CJDNS output is incompatible with the XML log.
|
||||||
systemd.services.cjdns.serviceConfig.StandardOutput = "null";
|
systemd.services.cjdns.serviceConfig.StandardOutput = "null";
|
||||||
#networking.firewall.enable = true;
|
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
#networking.firewall.rejectPackets = true;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
in
|
in
|
||||||
|
@ -42,7 +42,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ services.httpd.enable = true;
|
{ services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -43,7 +43,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
config =
|
config =
|
||||||
{
|
{
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ services.httpd.enable = true;
|
{ services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
system.stateVersion = "18.03";
|
system.stateVersion = "18.03";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -25,7 +25,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ services.httpd.enable = true;
|
{ services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ services.httpd.enable = true;
|
{ services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
services.httpd.adminAddr = "foo@example.org";
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -10,7 +10,6 @@ let
|
|||||||
hostBridge = "br0";
|
hostBridge = "br0";
|
||||||
config = {
|
config = {
|
||||||
networking.firewall.enable = false;
|
networking.firewall.enable = false;
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
networking.interfaces.eth0.ipv4.addresses = [
|
networking.interfaces.eth0.ipv4.addresses = [
|
||||||
{ address = "192.168.1.122"; prefixLength = 24; }
|
{ address = "192.168.1.122"; prefixLength = 24; }
|
||||||
];
|
];
|
||||||
|
27
nixos/tests/docker-preloader.nix
Normal file
27
nixos/tests/docker-preloader.nix
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import ./make-test.nix ({ pkgs, ...} : {
|
||||||
|
name = "docker-preloader";
|
||||||
|
meta = with pkgs.stdenv.lib.maintainers; {
|
||||||
|
maintainers = [ lewo ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
docker =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
virtualisation.docker.enable = true;
|
||||||
|
virtualisation.dockerPreloader.images = [ pkgs.dockerTools.examples.nix pkgs.dockerTools.examples.bash ];
|
||||||
|
|
||||||
|
services.openssh.enable = true;
|
||||||
|
services.openssh.permitRootLogin = "yes";
|
||||||
|
services.openssh.extraConfig = "PermitEmptyPasswords yes";
|
||||||
|
users.extraUsers.root.password = "";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
startAll;
|
||||||
|
|
||||||
|
$docker->waitForUnit("sockets.target");
|
||||||
|
$docker->succeed("docker run nix nix-store --version");
|
||||||
|
$docker->succeed("docker run bash bash --version");
|
||||||
|
'';
|
||||||
|
})
|
@ -1,14 +1,18 @@
|
|||||||
# This test runs gitlab and checks if it works
|
# This test runs gitlab and checks if it works
|
||||||
|
|
||||||
import ./make-test.nix ({ pkgs, ...} : {
|
import ./make-test.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
name = "gitlab";
|
name = "gitlab";
|
||||||
meta = with pkgs.stdenv.lib.maintainers; {
|
meta = with pkgs.stdenv.lib.maintainers; {
|
||||||
maintainers = [ domenkozar offline ];
|
maintainers = [ globin ];
|
||||||
};
|
};
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
gitlab = { ... }: {
|
gitlab = { ... }: {
|
||||||
virtualisation.memorySize = 768;
|
virtualisation.memorySize = 4096;
|
||||||
|
systemd.services.gitlab.serviceConfig.Restart = mkForce "no";
|
||||||
|
systemd.services.gitlab-workhorse.serviceConfig.Restart = mkForce "no";
|
||||||
|
systemd.services.gitaly.serviceConfig.Restart = mkForce "no";
|
||||||
|
systemd.services.gitlab-sidekiq.serviceConfig.Restart = mkForce "no";
|
||||||
|
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
enable = true;
|
enable = true;
|
||||||
@ -19,10 +23,10 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.gitlab.serviceConfig.TimeoutStartSec = "10min";
|
|
||||||
services.gitlab = {
|
services.gitlab = {
|
||||||
enable = true;
|
enable = true;
|
||||||
databasePassword = "dbPassword";
|
databasePassword = "dbPassword";
|
||||||
|
initialRootPassword = "notproduction";
|
||||||
secrets = {
|
secrets = {
|
||||||
secret = "secret";
|
secret = "secret";
|
||||||
otp = "otpsecret";
|
otp = "otpsecret";
|
||||||
@ -65,8 +69,12 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
$gitlab->start();
|
$gitlab->start();
|
||||||
|
$gitlab->waitForUnit("gitaly.service");
|
||||||
|
$gitlab->waitForUnit("gitlab-workhorse.service");
|
||||||
$gitlab->waitForUnit("gitlab.service");
|
$gitlab->waitForUnit("gitlab.service");
|
||||||
$gitlab->waitForUnit("gitlab-sidekiq.service");
|
$gitlab->waitForUnit("gitlab-sidekiq.service");
|
||||||
$gitlab->waitUntilSucceeds("curl http://localhost:80/users/sign_in");
|
$gitlab->waitForFile("/var/gitlab/state/tmp/sockets/gitlab.socket");
|
||||||
|
$gitlab->waitUntilSucceeds("curl -sSf http://localhost/users/sign_in");
|
||||||
|
$gitlab->succeed("${pkgs.sudo}/bin/sudo -u gitlab -H gitlab-rake gitlab:check 1>&2")
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
@ -74,7 +74,6 @@ in {
|
|||||||
print "$log\n";
|
print "$log\n";
|
||||||
|
|
||||||
# Check that no errors were logged
|
# Check that no errors were logged
|
||||||
# The timer can get out of sync due to Hydra's load, so this error is ignored
|
$hass->fail("cat ${configDir}/home-assistant.log | grep -qF ERROR");
|
||||||
$hass->fail("cat ${configDir}/home-assistant.log | grep -vF 'Timer got out of sync' | grep -qF ERROR");
|
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
@ -11,7 +11,6 @@ import ./make-test.nix ({ pkgs, lib, withFirewall, withConntrackHelpers ? false,
|
|||||||
lib.mkMerge [
|
lib.mkMerge [
|
||||||
{ virtualisation.vlans = [ 2 1 ];
|
{ virtualisation.vlans = [ 2 1 ];
|
||||||
networking.firewall.enable = withFirewall;
|
networking.firewall.enable = withFirewall;
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
networking.nat.internalIPs = [ "192.168.1.0/24" ];
|
||||||
networking.nat.externalInterface = "eth1";
|
networking.nat.externalInterface = "eth1";
|
||||||
}
|
}
|
||||||
@ -33,7 +32,6 @@ import ./make-test.nix ({ pkgs, lib, withFirewall, withConntrackHelpers ? false,
|
|||||||
{ pkgs, nodes, ... }:
|
{ pkgs, nodes, ... }:
|
||||||
lib.mkMerge [
|
lib.mkMerge [
|
||||||
{ virtualisation.vlans = [ 1 ];
|
{ virtualisation.vlans = [ 1 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
networking.defaultGateway =
|
networking.defaultGateway =
|
||||||
(pkgs.lib.head nodes.router.config.networking.interfaces.eth2.ipv4.addresses).address;
|
(pkgs.lib.head nodes.router.config.networking.interfaces.eth2.ipv4.addresses).address;
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@ let
|
|||||||
networking = {
|
networking = {
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
firewall.checkReversePath = true;
|
firewall.checkReversePath = true;
|
||||||
firewall.allowedUDPPorts = [ 547 ];
|
firewall.allowedUDPPorts = [ 547 ];
|
||||||
interfaces = mkOverride 0 (listToAttrs (flip map vlanIfs (n:
|
interfaces = mkOverride 0 (listToAttrs (flip map vlanIfs (n:
|
||||||
@ -86,7 +85,6 @@ let
|
|||||||
virtualisation.vlans = [ 1 2 ];
|
virtualisation.vlans = [ 1 2 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
defaultGateway = "192.168.1.1";
|
defaultGateway = "192.168.1.1";
|
||||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
interfaces.eth1.ipv4.addresses = mkOverride 0 [
|
||||||
@ -139,7 +137,6 @@ let
|
|||||||
virtualisation.vlans = [ 1 2 ];
|
virtualisation.vlans = [ 1 2 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = true;
|
useDHCP = true;
|
||||||
interfaces.eth1 = {
|
interfaces.eth1 = {
|
||||||
ipv4.addresses = mkOverride 0 [ ];
|
ipv4.addresses = mkOverride 0 [ ];
|
||||||
@ -194,7 +191,6 @@ let
|
|||||||
virtualisation.vlans = [ 1 2 ];
|
virtualisation.vlans = [ 1 2 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
interfaces.eth1 = {
|
interfaces.eth1 = {
|
||||||
ipv4.addresses = mkOverride 0 [ ];
|
ipv4.addresses = mkOverride 0 [ ];
|
||||||
@ -234,7 +230,6 @@ let
|
|||||||
virtualisation.vlans = [ 1 2 ];
|
virtualisation.vlans = [ 1 2 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
bonds.bond = {
|
bonds.bond = {
|
||||||
interfaces = [ "eth1" "eth2" ];
|
interfaces = [ "eth1" "eth2" ];
|
||||||
@ -271,7 +266,6 @@ let
|
|||||||
virtualisation.vlans = [ vlan ];
|
virtualisation.vlans = [ vlan ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
interfaces.eth1.ipv4.addresses = mkOverride 0
|
interfaces.eth1.ipv4.addresses = mkOverride 0
|
||||||
[ { inherit address; prefixLength = 24; } ];
|
[ { inherit address; prefixLength = 24; } ];
|
||||||
@ -285,7 +279,6 @@ let
|
|||||||
virtualisation.vlans = [ 1 2 ];
|
virtualisation.vlans = [ 1 2 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
bridges.bridge.interfaces = [ "eth1" "eth2" ];
|
bridges.bridge.interfaces = [ "eth1" "eth2" ];
|
||||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
|
interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
|
||||||
@ -329,7 +322,6 @@ let
|
|||||||
# reverse path filtering rules for the macvlan interface seem
|
# reverse path filtering rules for the macvlan interface seem
|
||||||
# to be incorrect, causing the test to fail. Disable temporarily.
|
# to be incorrect, causing the test to fail. Disable temporarily.
|
||||||
firewall.checkReversePath = false;
|
firewall.checkReversePath = false;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = true;
|
useDHCP = true;
|
||||||
macvlans.macvlan.interface = "eth1";
|
macvlans.macvlan.interface = "eth1";
|
||||||
interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
|
interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
|
||||||
@ -415,7 +407,6 @@ let
|
|||||||
#virtualisation.vlans = [ 1 ];
|
#virtualisation.vlans = [ 1 ];
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = networkd;
|
useNetworkd = networkd;
|
||||||
firewall.allowPing = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
vlans.vlan = {
|
vlans.vlan = {
|
||||||
id = 1;
|
id = 1;
|
||||||
|
@ -17,11 +17,12 @@ import ./make-test.nix {
|
|||||||
extraServerArgs = [ "-v" ];
|
extraServerArgs = [ "-v" ];
|
||||||
serverConfiguration = ''
|
serverConfiguration = ''
|
||||||
listen on 0.0.0.0
|
listen on 0.0.0.0
|
||||||
|
action do_relay relay
|
||||||
# DO NOT DO THIS IN PRODUCTION!
|
# DO NOT DO THIS IN PRODUCTION!
|
||||||
# Setting up authentication requires a certificate which is painful in
|
# Setting up authentication requires a certificate which is painful in
|
||||||
# a test environment, but THIS WOULD BE DANGEROUS OUTSIDE OF A
|
# a test environment, but THIS WOULD BE DANGEROUS OUTSIDE OF A
|
||||||
# WELL-CONTROLLED ENVIRONMENT!
|
# WELL-CONTROLLED ENVIRONMENT!
|
||||||
accept from any for any relay
|
match from any for any action do_relay
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@ -41,8 +42,9 @@ import ./make-test.nix {
|
|||||||
extraServerArgs = [ "-v" ];
|
extraServerArgs = [ "-v" ];
|
||||||
serverConfiguration = ''
|
serverConfiguration = ''
|
||||||
listen on 0.0.0.0
|
listen on 0.0.0.0
|
||||||
accept from any for local deliver to mda \
|
action dovecot_deliver mda \
|
||||||
"${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
|
"${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
|
||||||
|
match from any for local action dovecot_deliver
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
services.dovecot2 = {
|
services.dovecot2 = {
|
||||||
|
@ -26,31 +26,20 @@ import ./make-test.nix ({ pkgs, ...} :
|
|||||||
services.xserver.displayManager.sddm.theme = "breeze-ocr-theme";
|
services.xserver.displayManager.sddm.theme = "breeze-ocr-theme";
|
||||||
services.xserver.desktopManager.plasma5.enable = true;
|
services.xserver.desktopManager.plasma5.enable = true;
|
||||||
services.xserver.desktopManager.default = "plasma5";
|
services.xserver.desktopManager.default = "plasma5";
|
||||||
|
services.xserver.displayManager.sddm.autoLogin = {
|
||||||
|
enable = true;
|
||||||
|
user = "alice";
|
||||||
|
};
|
||||||
virtualisation.memorySize = 1024;
|
virtualisation.memorySize = 1024;
|
||||||
environment.systemPackages = [ sddm_theme ];
|
environment.systemPackages = [ sddm_theme ];
|
||||||
|
|
||||||
# fontconfig-penultimate-0.3.3 -> 0.3.4 broke OCR apparently, but no idea why.
|
|
||||||
nixpkgs.config.packageOverrides = superPkgs: {
|
|
||||||
fontconfig-penultimate = superPkgs.fontconfig-penultimate.override {
|
|
||||||
version = "0.3.3";
|
|
||||||
sha256 = "1z76jbkb0nhf4w7fy647yyayqr4q02fgk6w58k0yi700p0m3h4c9";
|
|
||||||
};
|
};
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
enableOCR = true;
|
|
||||||
|
|
||||||
testScript = { nodes, ... }: let
|
testScript = { nodes, ... }: let
|
||||||
user = nodes.machine.config.users.users.alice;
|
user = nodes.machine.config.users.users.alice;
|
||||||
xdo = "${pkgs.xdotool}/bin/xdotool";
|
xdo = "${pkgs.xdotool}/bin/xdotool";
|
||||||
in ''
|
in ''
|
||||||
startAll;
|
startAll;
|
||||||
# Wait for display manager to start
|
# wait for log in
|
||||||
$machine->waitForText(qr/${user.description}/);
|
|
||||||
$machine->screenshot("sddm");
|
|
||||||
|
|
||||||
# Log in
|
|
||||||
$machine->sendChars("${user.password}\n");
|
|
||||||
$machine->waitForFile("/home/alice/.Xauthority");
|
$machine->waitForFile("/home/alice/.Xauthority");
|
||||||
$machine->succeed("xauth merge ~alice/.Xauthority");
|
$machine->succeed("xauth merge ~alice/.Xauthority");
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ import ./make-test.nix ({ pkgs, ...} : {
|
|||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
services.postgresql = let mypg = pkgs.postgresql100; in {
|
services.postgresql = let mypg = pkgs.postgresql_11; in {
|
||||||
enable = true;
|
enable = true;
|
||||||
package = mypg;
|
package = mypg;
|
||||||
extraPlugins = [ (pkgs.postgis.override { postgresql = mypg; }) ];
|
extraPlugins = [ (pkgs.postgis.override { postgresql = mypg; }) ];
|
||||||
|
@ -66,7 +66,6 @@ import ./make-test.nix ({ pkgs, ... }:
|
|||||||
virtualisation.vlans = [ 3 ];
|
virtualisation.vlans = [ 3 ];
|
||||||
networking.defaultGateway = ifAddr nodes.router2 "eth1";
|
networking.defaultGateway = ifAddr nodes.router2 "eth1";
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
networking.firewall.allowPing = true;
|
|
||||||
services.httpd.enable = true;
|
services.httpd.enable = true;
|
||||||
services.httpd.adminAddr = "foo@example.com";
|
services.httpd.adminAddr = "foo@example.com";
|
||||||
};
|
};
|
||||||
|
@ -27,7 +27,7 @@ let
|
|||||||
$machine->succeed("id \"rspamd\" >/dev/null");
|
$machine->succeed("id \"rspamd\" >/dev/null");
|
||||||
${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
|
${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
|
||||||
sleep 10;
|
sleep 10;
|
||||||
$machine->log($machine->succeed("cat /etc/rspamd.conf"));
|
$machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
|
||||||
$machine->log($machine->succeed("systemctl cat rspamd.service"));
|
$machine->log($machine->succeed("systemctl cat rspamd.service"));
|
||||||
$machine->log($machine->succeed("curl http://localhost:11334/auth"));
|
$machine->log($machine->succeed("curl http://localhost:11334/auth"));
|
||||||
$machine->log($machine->succeed("curl http://127.0.0.1:11334/auth"));
|
$machine->log($machine->succeed("curl http://127.0.0.1:11334/auth"));
|
||||||
@ -55,7 +55,7 @@ in
|
|||||||
$machine->waitForFile("/run/rspamd.sock");
|
$machine->waitForFile("/run/rspamd.sock");
|
||||||
${checkSocket "/run/rspamd.sock" "root" "root" "600" }
|
${checkSocket "/run/rspamd.sock" "root" "root" "600" }
|
||||||
${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
|
${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
|
||||||
$machine->log($machine->succeed("cat /etc/rspamd.conf"));
|
$machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
|
||||||
$machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
|
$machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
|
||||||
$machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
|
$machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
|
||||||
'';
|
'';
|
||||||
@ -86,9 +86,80 @@ in
|
|||||||
$machine->waitForFile("/run/rspamd.sock");
|
$machine->waitForFile("/run/rspamd.sock");
|
||||||
${checkSocket "/run/rspamd.sock" "root" "root" "600" }
|
${checkSocket "/run/rspamd.sock" "root" "root" "600" }
|
||||||
${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
|
${checkSocket "/run/rspamd-worker.sock" "root" "root" "666" }
|
||||||
$machine->log($machine->succeed("cat /etc/rspamd.conf"));
|
$machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
|
||||||
$machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
|
$machine->log($machine->succeed("rspamc -h /run/rspamd-worker.sock stat"));
|
||||||
$machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
|
$machine->log($machine->succeed("curl --unix-socket /run/rspamd-worker.sock http://localhost/ping"));
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
customLuaRules = makeTest {
|
||||||
|
name = "rspamd-custom-lua-rules";
|
||||||
|
machine = {
|
||||||
|
environment.etc."tests/no-muh.eml".text = ''
|
||||||
|
From: Sheep1<bah@example.com>
|
||||||
|
To: Sheep2<mah@example.com>
|
||||||
|
Subject: Evil cows
|
||||||
|
|
||||||
|
I find cows to be evil don't you?
|
||||||
|
'';
|
||||||
|
environment.etc."tests/muh.eml".text = ''
|
||||||
|
From: Cow<cow@example.com>
|
||||||
|
To: Sheep2<mah@example.com>
|
||||||
|
Subject: Evil cows
|
||||||
|
|
||||||
|
Cows are majestic creatures don't Muh agree?
|
||||||
|
'';
|
||||||
|
services.rspamd = {
|
||||||
|
enable = true;
|
||||||
|
locals."groups.conf".text = ''
|
||||||
|
group "cows" {
|
||||||
|
symbol {
|
||||||
|
NO_MUH = {
|
||||||
|
weight = 1.0;
|
||||||
|
description = "Mails should not muh";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
localLuaRules = pkgs.writeText "rspamd.local.lua" ''
|
||||||
|
local rspamd_logger = require "rspamd_logger"
|
||||||
|
rspamd_config.NO_MUH = {
|
||||||
|
callback = function (task)
|
||||||
|
local parts = task:get_text_parts()
|
||||||
|
if parts then
|
||||||
|
for _,part in ipairs(parts) do
|
||||||
|
local content = tostring(part:get_content())
|
||||||
|
rspamd_logger.infox(rspamd_config, 'Found content %s', content)
|
||||||
|
local found = string.find(content, "Muh");
|
||||||
|
rspamd_logger.infox(rspamd_config, 'Found muh %s', tostring(found))
|
||||||
|
if found then
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return false
|
||||||
|
end,
|
||||||
|
score = 5.0,
|
||||||
|
description = 'Allow no cows',
|
||||||
|
group = "cows",
|
||||||
|
}
|
||||||
|
rspamd_logger.infox(rspamd_config, 'Work dammit!!!')
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
${initMachine}
|
||||||
|
$machine->waitForOpenPort(11334);
|
||||||
|
$machine->log($machine->succeed("cat /etc/rspamd/rspamd.conf"));
|
||||||
|
$machine->log($machine->succeed("cat /etc/rspamd/rspamd.local.lua"));
|
||||||
|
$machine->log($machine->succeed("cat /etc/rspamd/local.d/groups.conf"));
|
||||||
|
${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
|
||||||
|
$machine->log($machine->succeed("curl --unix-socket /run/rspamd/rspamd.sock http://localhost/ping"));
|
||||||
|
$machine->log($machine->succeed("rspamc -h 127.0.0.1:11334 stat"));
|
||||||
|
$machine->log($machine->succeed("cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334"));
|
||||||
|
$machine->log($machine->succeed("cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols"));
|
||||||
|
$machine->waitUntilSucceeds("journalctl -u rspamd | grep -i muh >&2");
|
||||||
|
$machine->log($machine->fail("cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"));
|
||||||
|
$machine->log($machine->succeed("cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"));
|
||||||
|
'';
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
38
nixos/tests/rsyslogd.nix
Normal file
38
nixos/tests/rsyslogd.nix
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
{ system ? builtins.currentSystem }:
|
||||||
|
|
||||||
|
with import ../lib/testing.nix { inherit system; };
|
||||||
|
with pkgs.lib;
|
||||||
|
{
|
||||||
|
test1 = makeTest {
|
||||||
|
name = "rsyslogd-test1";
|
||||||
|
meta.maintainers = [ maintainers.aanderse ];
|
||||||
|
|
||||||
|
machine =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{ services.rsyslogd.enable = true;
|
||||||
|
services.journald.forwardToSyslog = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
# ensure rsyslogd isn't receiving messages from journald if explicitly disabled
|
||||||
|
testScript = ''
|
||||||
|
$machine->waitForUnit("default.target");
|
||||||
|
$machine->fail("test -f /var/log/messages");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
test2 = makeTest {
|
||||||
|
name = "rsyslogd-test2";
|
||||||
|
meta.maintainers = [ maintainers.aanderse ];
|
||||||
|
|
||||||
|
machine =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{ services.rsyslogd.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# ensure rsyslogd is receiving messages from journald
|
||||||
|
testScript = ''
|
||||||
|
$machine->waitForUnit("default.target");
|
||||||
|
$machine->succeed("test -f /var/log/messages");
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
@ -1,22 +1,27 @@
|
|||||||
import ./make-test.nix ({ ... }:
|
import ./make-test.nix ({ lib, ... }:
|
||||||
let mungekey = "mungeverryweakkeybuteasytointegratoinatest";
|
let
|
||||||
|
mungekey = "mungeverryweakkeybuteasytointegratoinatest";
|
||||||
|
|
||||||
slurmconfig = {
|
slurmconfig = {
|
||||||
controlMachine = "control";
|
controlMachine = "control";
|
||||||
nodeName = ''
|
nodeName = [ "node[1-3] CPUs=1 State=UNKNOWN" ];
|
||||||
control
|
partitionName = [ "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP" ];
|
||||||
NodeName=node[1-3] CPUs=1 State=UNKNOWN
|
extraConfig = ''
|
||||||
|
AccountingStorageHost=dbd
|
||||||
|
AccountingStorageType=accounting_storage/slurmdbd
|
||||||
'';
|
'';
|
||||||
partitionName = "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP";
|
|
||||||
};
|
};
|
||||||
in {
|
in {
|
||||||
name = "slurm";
|
name = "slurm";
|
||||||
|
|
||||||
|
meta.maintainers = [ lib.maintainers.markuskowa ];
|
||||||
|
|
||||||
nodes =
|
nodes =
|
||||||
let
|
let
|
||||||
computeNode =
|
computeNode =
|
||||||
{ ...}:
|
{ ...}:
|
||||||
{
|
{
|
||||||
# TODO slrumd port and slurmctld port should be configurations and
|
# TODO slurmd port and slurmctld port should be configurations and
|
||||||
# automatically allowed by the firewall.
|
# automatically allowed by the firewall.
|
||||||
networking.firewall.enable = false;
|
networking.firewall.enable = false;
|
||||||
services.slurm = {
|
services.slurm = {
|
||||||
@ -43,6 +48,24 @@ in {
|
|||||||
} // slurmconfig;
|
} // slurmconfig;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
dbd =
|
||||||
|
{ pkgs, ... } :
|
||||||
|
{
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
services.slurm.dbdserver = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
services.mysql = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.mysql;
|
||||||
|
ensureDatabases = [ "slurm_acct_db" ];
|
||||||
|
ensureUsers = [{
|
||||||
|
ensurePermissions = { "slurm_acct_db.*" = "ALL PRIVILEGES"; };
|
||||||
|
name = "slurm";
|
||||||
|
}];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
node1 = computeNode;
|
node1 = computeNode;
|
||||||
node2 = computeNode;
|
node2 = computeNode;
|
||||||
node3 = computeNode;
|
node3 = computeNode;
|
||||||
@ -54,7 +77,7 @@ in {
|
|||||||
startAll;
|
startAll;
|
||||||
|
|
||||||
# Set up authentification across the cluster
|
# Set up authentification across the cluster
|
||||||
foreach my $node (($submit,$control,$node1,$node2,$node3))
|
foreach my $node (($submit,$control,$dbd,$node1,$node2,$node3))
|
||||||
{
|
{
|
||||||
$node->waitForUnit("default.target");
|
$node->waitForUnit("default.target");
|
||||||
|
|
||||||
@ -63,10 +86,22 @@ in {
|
|||||||
$node->succeed("chmod 0400 /etc/munge/munge.key");
|
$node->succeed("chmod 0400 /etc/munge/munge.key");
|
||||||
$node->succeed("chown munge:munge /etc/munge/munge.key");
|
$node->succeed("chown munge:munge /etc/munge/munge.key");
|
||||||
$node->succeed("systemctl restart munged");
|
$node->succeed("systemctl restart munged");
|
||||||
}
|
|
||||||
|
$node->waitForUnit("munged");
|
||||||
|
};
|
||||||
|
|
||||||
# Restart the services since they have probably failed due to the munge init
|
# Restart the services since they have probably failed due to the munge init
|
||||||
# failure
|
# failure
|
||||||
|
subtest "can_start_slurmdbd", sub {
|
||||||
|
$dbd->succeed("systemctl restart slurmdbd");
|
||||||
|
$dbd->waitForUnit("slurmdbd.service");
|
||||||
|
};
|
||||||
|
|
||||||
|
# there needs to be an entry for the current
|
||||||
|
# cluster in the database before slurmctld is restarted
|
||||||
|
subtest "add_account", sub {
|
||||||
|
$control->succeed("sacctmgr -i add cluster default");
|
||||||
|
};
|
||||||
|
|
||||||
subtest "can_start_slurmctld", sub {
|
subtest "can_start_slurmctld", sub {
|
||||||
$control->succeed("systemctl restart slurmctld");
|
$control->succeed("systemctl restart slurmctld");
|
||||||
@ -81,12 +116,17 @@ in {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
# Test that the cluster work and can distribute jobs;
|
# Test that the cluster works and can distribute jobs;
|
||||||
|
|
||||||
subtest "run_distributed_command", sub {
|
subtest "run_distributed_command", sub {
|
||||||
# Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
|
# Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
|
||||||
# The output must contain the 3 different names
|
# The output must contain the 3 different names
|
||||||
$submit->succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq");
|
$submit->succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
subtest "check_slurm_dbd", sub {
|
||||||
|
# find the srun job from above in the database
|
||||||
|
$submit->succeed("sacct | grep hostname");
|
||||||
|
};
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
47
nixos/tests/solr.nix
Normal file
47
nixos/tests/solr.nix
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import ./make-test.nix ({ pkgs, lib, ... }:
|
||||||
|
{
|
||||||
|
name = "solr";
|
||||||
|
meta.maintainers = [ lib.maintainers.aanderse ];
|
||||||
|
|
||||||
|
machine =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
# Ensure the virtual machine has enough memory for Solr to avoid the following error:
|
||||||
|
#
|
||||||
|
# OpenJDK 64-Bit Server VM warning:
|
||||||
|
# INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
|
||||||
|
# failed; error='Cannot allocate memory' (errno=12)
|
||||||
|
#
|
||||||
|
# There is insufficient memory for the Java Runtime Environment to continue.
|
||||||
|
# Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
|
||||||
|
virtualisation.memorySize = 2000;
|
||||||
|
|
||||||
|
services.solr.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
startAll;
|
||||||
|
|
||||||
|
$machine->waitForUnit('solr.service');
|
||||||
|
$machine->waitForOpenPort('8983');
|
||||||
|
$machine->succeed('curl --fail http://localhost:8983/solr/');
|
||||||
|
|
||||||
|
# adapted from pkgs.solr/examples/films/README.txt
|
||||||
|
$machine->succeed('sudo -u solr solr create -c films');
|
||||||
|
$machine->succeed(q(curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
|
||||||
|
"add-field" : {
|
||||||
|
"name":"name",
|
||||||
|
"type":"text_general",
|
||||||
|
"multiValued":false,
|
||||||
|
"stored":true
|
||||||
|
},
|
||||||
|
"add-field" : {
|
||||||
|
"name":"initial_release_date",
|
||||||
|
"type":"pdate",
|
||||||
|
"stored":true
|
||||||
|
}
|
||||||
|
}')) =~ /"status":0/ or die;
|
||||||
|
$machine->succeed('sudo -u solr post -c films ${pkgs.solr}/example/films/films.json');
|
||||||
|
$machine->succeed('curl http://localhost:8983/solr/films/query?q=name:batman') =~ /"name":"Batman Begins"/ or die;
|
||||||
|
'';
|
||||||
|
})
|
@ -90,5 +90,5 @@ rec {
|
|||||||
parity-beta = callPackage ./parity/beta.nix { };
|
parity-beta = callPackage ./parity/beta.nix { };
|
||||||
parity-ui = callPackage ./parity-ui { };
|
parity-ui = callPackage ./parity-ui { };
|
||||||
|
|
||||||
particl-core = callPackage ./particl/particl-core.nix { boost = boost165; miniupnpc = miniupnpc_2; };
|
particl-core = callPackage ./particl/particl-core.nix { miniupnpc = miniupnpc_2; };
|
||||||
}
|
}
|
||||||
|
@ -11,13 +11,13 @@ with stdenv.lib;
|
|||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
|
|
||||||
name = "litecoin" + (toString (optional (!withGui) "d")) + "-" + version;
|
name = "litecoin" + (toString (optional (!withGui) "d")) + "-" + version;
|
||||||
version = "0.16.2";
|
version = "0.16.3";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "litecoin-project";
|
owner = "litecoin-project";
|
||||||
repo = "litecoin";
|
repo = "litecoin";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "0xfwh7cxxz6w8kgr4kl48w3zm81n1hv8fxb5l9zx3460im1ffgy6";
|
sha256 = "0vc184qfdkjky1qffa7309k6973k4197bkzwcmffc9r5sdfhrhkp";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
||||||
|
@ -10,31 +10,40 @@
|
|||||||
, zeromq
|
, zeromq
|
||||||
, zlib
|
, zlib
|
||||||
, unixtools
|
, unixtools
|
||||||
|
, python3
|
||||||
}:
|
}:
|
||||||
|
|
||||||
with stdenv.lib;
|
with stdenv.lib;
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
name = "particl-core-${version}";
|
name = "particl-core-${version}";
|
||||||
version = "0.16.2.0";
|
version = "0.17.0.2";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://github.com/particl/particl-core/archive/v${version}.tar.gz";
|
url = "https://github.com/particl/particl-core/archive/v${version}.tar.gz";
|
||||||
sha256 = "1d2vvg7avlhsg0rcpd5pbzafnk1w51a2y29xjjkpafi6iqs2l617";
|
sha256 = "0bkxdayl0jrfhgz8qzqqpwzv0yavz3nwsn6c8k003jnbcw65fkhx";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
nativeBuildInputs = [ pkgconfig autoreconfHook ];
|
||||||
buildInputs = [
|
buildInputs = [ openssl db48 boost zlib miniupnpc libevent zeromq unixtools.hexdump python3 ];
|
||||||
openssl db48 boost zlib miniupnpc libevent zeromq
|
|
||||||
unixtools.hexdump
|
configureFlags = [
|
||||||
|
"--disable-bench"
|
||||||
|
"--with-boost-libdir=${boost.out}/lib"
|
||||||
|
] ++ optionals (!doCheck) [
|
||||||
|
"--enable-tests=no"
|
||||||
];
|
];
|
||||||
|
|
||||||
configureFlags = [ "--with-boost-libdir=${boost.out}/lib" ];
|
# Always check during Hydra builds
|
||||||
|
doCheck = true;
|
||||||
|
preCheck = "patchShebangs test";
|
||||||
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
description = "Privacy-Focused Marketplace & Decentralized Application Platform";
|
description = "Privacy-Focused Marketplace & Decentralized Application Platform";
|
||||||
longDescription= ''
|
longDescription= ''
|
||||||
An open source, decentralized privacy platform built for global person to person eCommerce.
|
An open source, decentralized privacy platform built for global person to person eCommerce.
|
||||||
|
RPC daemon and CLI client only.
|
||||||
'';
|
'';
|
||||||
homepage = https://particl.io/;
|
homepage = https://particl.io/;
|
||||||
maintainers = with maintainers; [ demyanrogozhin ];
|
maintainers = with maintainers; [ demyanrogozhin ];
|
||||||
|
30
pkgs/applications/audio/avldrums-lv2/default.nix
Normal file
30
pkgs/applications/audio/avldrums-lv2/default.nix
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
{ stdenv, fetchFromGitHub, pkgconfig, pango, cairo, libGLU, lv2 }:
|
||||||
|
|
||||||
|
stdenv.mkDerivation rec {
|
||||||
|
name = "${pname}-${version}";
|
||||||
|
pname = "avldrums.lv2";
|
||||||
|
version = "0.3.0";
|
||||||
|
|
||||||
|
src = fetchFromGitHub {
|
||||||
|
owner = "x42";
|
||||||
|
repo = pname;
|
||||||
|
rev = "v${version}";
|
||||||
|
sha256 = "0w51gdshq2i5bix2x5l3g3gnycy84nlzf5sj0jkrw0zrnbk6ghwg";
|
||||||
|
fetchSubmodules = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
installFlags = "PREFIX=$(out)";
|
||||||
|
|
||||||
|
nativeBuildInputs = [ pkgconfig ];
|
||||||
|
buildInputs = [
|
||||||
|
pango cairo libGLU lv2
|
||||||
|
];
|
||||||
|
|
||||||
|
meta = with stdenv.lib; {
|
||||||
|
description = "Dedicated AVLDrumkits LV2 Plugin";
|
||||||
|
homepage = http://x42-plugins.com/x42/x42-avldrums;
|
||||||
|
license = licenses.gpl2;
|
||||||
|
maintainers = [ maintainers.magnetophon ];
|
||||||
|
platforms = [ "i686-linux" "x86_64-linux" ];
|
||||||
|
};
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user