Merge branch 'master' into systemd-219
Conflicts: nixos/modules/system/boot/systemd.nix pkgs/applications/networking/p2p/transmission/default.nix pkgs/development/libraries/libseccomp/default.nix pkgs/os-specific/linux/systemd/default.nix pkgs/top-level/all-packages.nix
This commit is contained in:
commit
916793cf13
@ -33,8 +33,10 @@ For pull-requests, please rebase onto nixpkgs `master`.
|
||||
* [Manual (NixOS)](https://nixos.org/nixos/manual/)
|
||||
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
|
||||
* [Continuous package builds for 14.12 release](https://hydra.nixos.org/jobset/nixos/release-14.12)
|
||||
* [Continuous package builds for 15.09 release](https://hydra.nixos.org/jobset/nixos/release-15.09)
|
||||
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
|
||||
* [Tests for 14.12 release](https://hydra.nixos.org/job/nixos/release-14.12/tested#tabs-constituents)
|
||||
* [Tests for 15.09 release](https://hydra.nixos.org/job/nixos/release-15.09/tested#tabs-constituents)
|
||||
|
||||
Communication:
|
||||
|
||||
|
@ -85,6 +85,44 @@ in ...</programlisting>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-pkg-overrideDerivation">
|
||||
<title><pkg>.overrideDerivation</title>
|
||||
|
||||
<para>
|
||||
The function <varname>overrideDerivation</varname> is usually available for all the
|
||||
derivations in the nixpkgs expression (<varname>pkgs</varname>).
|
||||
</para>
|
||||
<para>
|
||||
It is used to create a new derivation by overriding the attributes of
|
||||
the original derivation according to the given function.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Example usage:
|
||||
|
||||
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
|
||||
name = "sed-4.2.2-pre";
|
||||
src = fetchurl {
|
||||
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
|
||||
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
|
||||
};
|
||||
patches = [];
|
||||
});</programlisting>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In the above example, the name, src and patches of the derivation
|
||||
will be overridden, while all other attributes will be retained from the
|
||||
original derivation.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
|
||||
the original derivation.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-lib-makeOverridable">
|
||||
<title>lib.makeOverridable</title>
|
||||
|
||||
@ -117,4 +155,119 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
<section xml:id="sec-fhs-environments">
|
||||
<title>buildFHSChrootEnv/buildFHSUserEnv</title>
|
||||
|
||||
<para>
|
||||
<function>buildFHSChrootEnv</function> and
|
||||
<function>buildFHSUserEnv</function> provide a way to build and run
|
||||
FHS-compatible lightweight sandboxes. They get their own isolated root with
|
||||
binded <filename>/nix/store</filename>, so their footprint in terms of disk
|
||||
space needed is quite small. This allows one to run software which is hard or
|
||||
unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions,
|
||||
games distributed as tarballs, software with integrity checking and/or external
|
||||
self-updated binaries.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<function>buildFHSChrootEnv</function> allows to create persistent
|
||||
environments, which can be constructed, deconstructed and entered by
|
||||
multiple users at once. A downside is that it requires
|
||||
<literal>root</literal> access for both those who create and destroy and
|
||||
those who enter it. It can be useful to create environments for daemons that
|
||||
one can enter and observe.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<function>buildFHSUserEnv</function> uses Linux namespaces feature to create
|
||||
temporary lightweight environments which are destroyed after all child
|
||||
processes exit. It does not require root access, and can be useful to create
|
||||
sandboxes and wrap applications.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Those functions both rely on <function>buildFHSEnv</function>, which creates
|
||||
an actual directory structure given a list of necessary packages and extra
|
||||
build commands.
|
||||
<function>buildFHSChrootEnv</function> and <function>buildFHSUserEnv</function>
|
||||
both accept those arguments which are passed to
|
||||
<function>buildFHSEnv</function>:
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><literal>name</literal></term>
|
||||
|
||||
<listitem><para>Environment name.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><literal>targetPkgs</literal></term>
|
||||
|
||||
<listitem><para>Packages to be installed for the main host's architecture
|
||||
(i.e. x86_64 on x86_64 installations).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><literal>multiPkgs</literal></term>
|
||||
|
||||
<listitem><para>Packages to be installed for all architectures supported by
|
||||
a host (i.e. i686 and x86_64 on x86_64 installations).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><literal>extraBuildCommands</literal></term>
|
||||
|
||||
<listitem><para>Additional commands to be executed for finalizing the
|
||||
directory structure.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><literal>extraBuildCommandsMulti</literal></term>
|
||||
|
||||
<listitem><para>Like <literal>extraBuildCommandsMulti</literal>, but
|
||||
executed only on multilib architectures.</para></listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
Additionally, <function>buildFHSUserEnv</function> accepts
|
||||
<literal>runScript</literal> parameter, which is a command that would be
|
||||
executed inside the sandbox and passed all the command line arguments. It
|
||||
default to <literal>bash</literal>.
|
||||
One can create a simple environment using a <literal>shell.nix</literal>
|
||||
like that:
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
{ pkgs ? import <nixpkgs> {} }:
|
||||
|
||||
(pkgs.buildFHSUserEnv {
|
||||
name = "simple-x11-env";
|
||||
targetPkgs = pkgs: (with pkgs;
|
||||
[ udev
|
||||
alsaLib
|
||||
]) ++ (with pkgs.xlibs;
|
||||
[ libX11
|
||||
libXcursor
|
||||
libXrandr
|
||||
]);
|
||||
multiPkgs = pkgs: (with pkgs;
|
||||
[ udev
|
||||
alsaLib
|
||||
]) ++ (with [];
|
||||
runScript = "bash";
|
||||
}).env
|
||||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
Running <literal>nix-shell</literal> would then drop you into a shell with
|
||||
these libraries and binaries available. You can use this to run
|
||||
closed-source applications which expect FHS structure without hassles:
|
||||
simply change <literal>runScript</literal> to the application path,
|
||||
e.g. <filename>./bin/start.sh</filename> -- relative paths are supported.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
@ -92,18 +92,18 @@ error: attribute ‘haskellPackages’ in selection path
|
||||
</para>
|
||||
<programlisting>
|
||||
$ nix-env -qaP coreutils
|
||||
nixos.pkgs.coreutils coreutils-8.23
|
||||
nixos.coreutils coreutils-8.23
|
||||
</programlisting>
|
||||
<para>
|
||||
If your system responds like that (most NixOS installatios will),
|
||||
If your system responds like that (most NixOS installations will),
|
||||
then the attribute path to <literal>haskellPackages</literal> is
|
||||
<literal>nixos.pkgs.haskellPackages</literal>. Thus, if you want to
|
||||
<literal>nixos.haskellPackages</literal>. Thus, if you want to
|
||||
use <literal>nix-env</literal> without giving an explicit
|
||||
<literal>-f</literal> flag, then that's the way to do it:
|
||||
</para>
|
||||
<programlisting>
|
||||
$ nix-env -qaP -A nixos.pkgs.haskellPackages
|
||||
$ nix-env -iA nixos.pkgs.haskellPackages.cabal-install
|
||||
$ nix-env -qaP -A nixos.haskellPackages
|
||||
$ nix-env -iA nixos.haskellPackages.cabal-install
|
||||
</programlisting>
|
||||
<para>
|
||||
Our current default compiler is GHC 7.10.x and the
|
||||
@ -600,6 +600,12 @@ $ nix-shell "<nixpkgs>" -A haskellPackages.bar.env
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
<para>
|
||||
Then, replace instances of <literal>haskellPackages</literal> in the
|
||||
<literal>cabal2nix</literal>-generated <literal>default.nix</literal>
|
||||
or <literal>shell.nix</literal> files with
|
||||
<literal>profiledHaskellPackages</literal>.
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="how-to-override-package-versions-in-a-compiler-specific-package-set">
|
||||
<title>How to override package versions in a compiler-specific
|
||||
@ -712,7 +718,7 @@ text-1.2.0.4-98506efb1b9ada233bb5c2b2db516d91
|
||||
</para>
|
||||
<programlisting>
|
||||
# nix-store -q --referrers /nix/store/*-haskell-text-1.2.0.4 \
|
||||
| nix-store --repair-path --option binary-caches http://hydra.nixos.org
|
||||
| xargs -L 1 nix-store --repair-path --option binary-caches http://hydra.nixos.org
|
||||
</programlisting>
|
||||
<para>
|
||||
If you're using additional Hydra servers other than
|
||||
@ -755,4 +761,69 @@ export NIX_CFLAGS_LINK="-L/usr/lib"
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section xml:id="other-resources">
|
||||
<title>Other resources</title>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The Youtube video
|
||||
<link xlink:href="https://www.youtube.com/watch?v=BsBhi_r-OeE">Nix
|
||||
Loves Haskell</link> provides an introduction into Haskell NG
|
||||
aimed at beginners. The slides are available at
|
||||
http://cryp.to/nixos-meetup-3-slides.pdf and also -- in a form
|
||||
ready for cut & paste -- at
|
||||
https://github.com/NixOS/cabal2nix/blob/master/doc/nixos-meetup-3-slides.md.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Another Youtube video is
|
||||
<link xlink:href="https://www.youtube.com/watch?v=mQd3s57n_2Y">Escaping
|
||||
Cabal Hell with Nix</link>, which discusses the subject of
|
||||
Haskell development with Nix but also provides a basic
|
||||
introduction to Nix as well, i.e. it's suitable for viewers with
|
||||
almost no prior Nix experience.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Oliver Charles wrote a very nice
|
||||
<link xlink:href="http://wiki.ocharles.org.uk/Nix">Tutorial how to
|
||||
develop Haskell packages with Nix</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <emphasis>Journey into the Haskell NG
|
||||
infrastructure</emphasis> series of postings describe the new
|
||||
Haskell infrastructure in great detail:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015591.html">Part
|
||||
1</link> explains the differences between the old and the
|
||||
new code and gives instructions how to migrate to the new
|
||||
setup.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2015-January/015608.html">Part
|
||||
2</link> looks in-depth at how to tweak and configure your
|
||||
setup by means of overrides.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2015-April/016912.html">Part
|
||||
3</link> describes the infrastructure that keeps the
|
||||
Haskell package set in Nixpkgs up-to-date.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-language-support">
|
||||
@ -341,7 +342,14 @@ twisted = buildPythonPackage {
|
||||
<para>By default <varname>doCheck = true</varname> is set and tests are run with
|
||||
<literal>${python.interpreter} setup.py test</literal> command in <varname>checkPhase</varname>.</para>
|
||||
|
||||
<para><varname>propagatedBuildInputs</varname> packages are propagated to user environment.</para>
|
||||
<para>
|
||||
As in Perl, dependencies on other Python packages can be specified in the
|
||||
<varname>buildInputs</varname> and
|
||||
<varname>propagatedBuildInputs</varname> attributes. If something is
|
||||
exclusively a build-time dependency, use
|
||||
<varname>buildInputs</varname>; if it’s (also) a runtime dependency,
|
||||
use <varname>propagatedBuildInputs</varname>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default <varname>meta.platforms</varname> is set to the same value
|
||||
@ -456,6 +464,27 @@ python.buildEnv.override {
|
||||
with wrapped binaries in <filename>bin/</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can also use <varname>env</varname> attribute to create local
|
||||
environments with needed packages installed (somewhat comparable to
|
||||
<literal>virtualenv</literal>). For example, with the following
|
||||
<filename>shell.nix</filename>:
|
||||
|
||||
<programlisting language="nix">
|
||||
<![CDATA[with import <nixpkgs> {};
|
||||
|
||||
(python3.buildEnv.override {
|
||||
extraLibs = with python3Packages;
|
||||
[ numpy
|
||||
requests
|
||||
];
|
||||
}).env]]>
|
||||
</programlisting>
|
||||
|
||||
Running <command>nix-shell</command> will drop you into a shell where
|
||||
<command>python</command> will have specified packages in its path.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<title>
|
||||
<function>python.buildEnv</function> arguments
|
||||
|
@ -219,5 +219,151 @@ you should modify
|
||||
</section>
|
||||
-->
|
||||
|
||||
<!--============================================================-->
|
||||
|
||||
<section xml:id="sec-eclipse">
|
||||
|
||||
<title>Eclipse</title>
|
||||
|
||||
<para>
|
||||
The Nix expressions related to the Eclipse platform and IDE are in
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse"><filename>pkgs/applications/editors/eclipse</filename></link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Nixpkgs provides a number of packages that will install Eclipse in
|
||||
its various forms, these range from the bare-bones Eclipse
|
||||
Platform to the more fully featured Eclipse SDK or Scala-IDE
|
||||
packages and multiple version are often available. It is possible
|
||||
to list available Eclipse packages by issuing the command:
|
||||
|
||||
<screen>
|
||||
$ nix-env -f '<nixpkgs>' -qaP -A eclipses --description
|
||||
</screen>
|
||||
|
||||
Once an Eclipse variant is installed it can be run using the
|
||||
<command>eclipse</command> command, as expected. From within
|
||||
Eclipse it is then possible to install plugins in the usual manner
|
||||
by either manually specifying an Eclipse update site or by
|
||||
installing the Marketplace Client plugin and using it to discover
|
||||
and install other plugins. This installation method provides an
|
||||
Eclipse installation that closely resemble a manually installed
|
||||
Eclipse.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you prefer to install plugins in a more declarative manner then
|
||||
Nixpkgs also offer a number of Eclipse plugins that can be
|
||||
installed in an <emphasis>Eclipse environment</emphasis>. This
|
||||
type of environment is created using the function
|
||||
<varname>eclipseWithPlugins</varname> found inside the
|
||||
<varname>nixpkgs.eclipses</varname> attribute set. This function
|
||||
takes as argument <literal>{ eclipse, plugins ? [], jvmArgs ? []
|
||||
}</literal> where <varname>eclipse</varname> is a one of the
|
||||
Eclipse packages described above, <varname>plugins</varname> is a
|
||||
list of plugin derivations, and <varname>jvmArgs</varname> is a
|
||||
list of arguments given to the JVM running the Eclipse. For
|
||||
example, say you wish to install the latest Eclipse Platform with
|
||||
the popular Eclipse Color Theme plugin and also allow Eclipse to
|
||||
use more RAM. You could then add
|
||||
|
||||
<screen>
|
||||
packageOverrides = pkgs: {
|
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
|
||||
eclipse = eclipse-platform;
|
||||
jvmArgs = [ "-Xmx2048m" ];
|
||||
plugins = [ plugins.color-theme ];
|
||||
};
|
||||
}
|
||||
</screen>
|
||||
|
||||
to your Nixpkgs configuration
|
||||
(<filename>~/.nixpkgs/config.nix</filename>) and install it by
|
||||
running <command>nix-env -f '<nixpkgs>' -iA
|
||||
myEclipse</command> and afterward run Eclipse as usual. It is
|
||||
possible to find out which plugins are available for installation
|
||||
using <varname>eclipseWithPlugins</varname> by running
|
||||
|
||||
<screen>
|
||||
$ nix-env -f '<nixpkgs>' -qaP -A eclipses.plugins --description
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If there is a need to install plugins that are not available in
|
||||
Nixpkgs then it may be possible to define these plugins outside
|
||||
Nixpkgs using the <varname>buildEclipseUpdateSite</varname> and
|
||||
<varname>buildEclipsePlugin</varname> functions found in the
|
||||
<varname>nixpkgs.eclipses.plugins</varname> attribute set. Use the
|
||||
<varname>buildEclipseUpdateSite</varname> function to install a
|
||||
plugin distributed as an Eclipse update site. This function takes
|
||||
<literal>{ name, src }</literal> as argument where
|
||||
<literal>src</literal> indicates the Eclipse update site archive.
|
||||
All Eclipse features and plugins within the downloaded update site
|
||||
will be installed. When an update site archive is not available
|
||||
then the <varname>buildEclipsePlugin</varname> function can be
|
||||
used to install a plugin that consists of a pair of feature and
|
||||
plugin JARs. This function takes an argument <literal>{ name,
|
||||
srcFeature, srcPlugin }</literal> where
|
||||
<literal>srcFeature</literal> and <literal>srcPlugin</literal> are
|
||||
the feature and plugin JARs, respectively.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Expanding the previous example with two plugins using the above
|
||||
functions we have
|
||||
<screen>
|
||||
packageOverrides = pkgs: {
|
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
|
||||
eclipse = eclipse-platform;
|
||||
jvmArgs = [ "-Xmx2048m" ];
|
||||
plugins = [
|
||||
plugins.color-theme
|
||||
(plugins.buildEclipsePlugin {
|
||||
name = "myplugin1-1.0";
|
||||
srcFeature = fetchurl {
|
||||
url = "http://…/features/myplugin1.jar";
|
||||
sha256 = "123…";
|
||||
};
|
||||
srcPlugin = fetchurl {
|
||||
url = "http://…/plugins/myplugin1.jar";
|
||||
sha256 = "123…";
|
||||
};
|
||||
});
|
||||
(plugins.buildEclipseUpdateSite {
|
||||
name = "myplugin2-1.0";
|
||||
src = fetchurl {
|
||||
stripRoot = false;
|
||||
url = "http://…/myplugin2.zip";
|
||||
sha256 = "123…";
|
||||
};
|
||||
});
|
||||
];
|
||||
};
|
||||
}
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-elm">
|
||||
|
||||
<title>Elm</title>
|
||||
|
||||
<para>
|
||||
The Nix expressions for Elm reside in
|
||||
<filename>pkgs/development/compilers/elm</filename>. They are generated
|
||||
automatically by <command>update-elm.rb</command> script. One should
|
||||
specify versions of Elm packages inside the script, clear the
|
||||
<filename>packages</filename> directory and run the script from inside it.
|
||||
<literal>elm-reactor</literal> is special because it also has Elm package
|
||||
dependencies. The process is not automated very much for now -- you should
|
||||
get the <literal>elm-reactor</literal> source tree (e.g. with
|
||||
<command>nix-shell</command>) and run <command>elm2nix.rb</command> inside
|
||||
it. Place the resulting <filename>package.nix</filename> file into
|
||||
<filename>packages/elm-reactor-elm.nix</filename>.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
@ -6,7 +6,6 @@ with {
|
||||
inherit (import ./default.nix) fold;
|
||||
inherit (import ./strings.nix) concatStringsSep;
|
||||
inherit (import ./lists.nix) concatMap concatLists all deepSeqList;
|
||||
inherit (import ./misc.nix) maybeAttr;
|
||||
};
|
||||
|
||||
rec {
|
||||
@ -76,7 +75,7 @@ rec {
|
||||
=> { foo = 1; }
|
||||
*/
|
||||
filterAttrs = pred: set:
|
||||
listToAttrs (fold (n: ys: let v = set.${n}; in if pred n v then [(nameValuePair n v)] ++ ys else ys) [] (attrNames set));
|
||||
listToAttrs (concatMap (name: let v = set.${name}; in if pred name v then [(nameValuePair name v)] else []) (attrNames set));
|
||||
|
||||
|
||||
/* foldAttrs: apply fold functions to values grouped by key. Eg accumulate values as list:
|
||||
@ -86,7 +85,7 @@ rec {
|
||||
foldAttrs = op: nul: list_of_attrs:
|
||||
fold (n: a:
|
||||
fold (name: o:
|
||||
o // (listToAttrs [{inherit name; value = op n.${name} (maybeAttr name nul a); }])
|
||||
o // (listToAttrs [{inherit name; value = op n.${name} (a.${name} or nul); }])
|
||||
) a (attrNames n)
|
||||
) {} list_of_attrs;
|
||||
|
||||
@ -222,6 +221,16 @@ rec {
|
||||
isDerivation = x: isAttrs x && x ? type && x.type == "derivation";
|
||||
|
||||
|
||||
/* Convert a store path to a fake derivation. */
|
||||
toDerivation = path:
|
||||
let path' = builtins.storePath path; in
|
||||
{ type = "derivation";
|
||||
name = builtins.unsafeDiscardStringContext (builtins.substring 33 (-1) (baseNameOf path'));
|
||||
outPath = path';
|
||||
outputs = [ "out" ];
|
||||
};
|
||||
|
||||
|
||||
/* If the Boolean `cond' is true, return the attribute set `as',
|
||||
otherwise an empty attribute set. */
|
||||
optionalAttrs = cond: as: if cond then as else {};
|
||||
|
@ -1,6 +1,8 @@
|
||||
let
|
||||
|
||||
lib = import ./default.nix;
|
||||
inherit (builtins) attrNames isFunction;
|
||||
|
||||
in
|
||||
|
||||
rec {
|
||||
@ -49,10 +51,6 @@ rec {
|
||||
else { }));
|
||||
|
||||
|
||||
# usage: (you can use override multiple times)
|
||||
# let d = makeOverridable stdenv.mkDerivation { name = ..; buildInputs; }
|
||||
# noBuildInputs = d.override { buildInputs = []; }
|
||||
# additionalBuildInputs = d.override ( args : args // { buildInputs = args.buildInputs ++ [ additional ]; } )
|
||||
makeOverridable = f: origArgs:
|
||||
let
|
||||
ff = f origArgs;
|
||||
@ -60,24 +58,16 @@ rec {
|
||||
in
|
||||
if builtins.isAttrs ff then (ff //
|
||||
{ override = newArgs: makeOverridable f (overrideWith newArgs);
|
||||
deepOverride = newArgs:
|
||||
makeOverridable f (lib.overrideExisting (lib.mapAttrs (deepOverrider newArgs) origArgs) newArgs);
|
||||
overrideDerivation = fdrv:
|
||||
makeOverridable (args: overrideDerivation (f args) fdrv) origArgs;
|
||||
})
|
||||
else if builtins.isFunction ff then
|
||||
{ override = newArgs: makeOverridable f (overrideWith newArgs);
|
||||
__functor = self: ff;
|
||||
deepOverride = throw "deepOverride not yet supported for functors";
|
||||
overrideDerivation = throw "overrideDerivation not yet supported for functors";
|
||||
}
|
||||
else ff;
|
||||
|
||||
deepOverrider = newArgs: name: x: if builtins.isAttrs x then (
|
||||
if x ? deepOverride then (x.deepOverride newArgs) else
|
||||
if x ? override then (x.override newArgs) else
|
||||
x) else x;
|
||||
|
||||
|
||||
/* Call the package function in the file `fn' with the required
|
||||
arguments automatically. The function is called with the
|
||||
@ -102,12 +92,28 @@ rec {
|
||||
*/
|
||||
callPackageWith = autoArgs: fn: args:
|
||||
let
|
||||
f = if builtins.isFunction fn then fn else import fn;
|
||||
f = if builtins.isFunction fn then fn else import fn;
|
||||
auto = builtins.intersectAttrs (builtins.functionArgs f) autoArgs;
|
||||
in makeOverridable f (auto // args);
|
||||
|
||||
|
||||
/* Add attributes to each output of a derivation without changing the derivation itself */
|
||||
/* Like callPackage, but for a function that returns an attribute
|
||||
set of derivations. The override function is added to the
|
||||
individual attributes. */
|
||||
callPackagesWith = autoArgs: fn: args:
|
||||
let
|
||||
f = if builtins.isFunction fn then fn else import fn;
|
||||
auto = builtins.intersectAttrs (builtins.functionArgs f) autoArgs;
|
||||
finalArgs = auto // args;
|
||||
pkgs = f finalArgs;
|
||||
mkAttrOverridable = name: pkg: pkg // {
|
||||
override = newArgs: mkAttrOverridable name (f (finalArgs // newArgs)).${name};
|
||||
};
|
||||
in lib.mapAttrs mkAttrOverridable pkgs;
|
||||
|
||||
|
||||
/* Add attributes to each output of a derivation without changing
|
||||
the derivation itself. */
|
||||
addPassthru = drv: passthru:
|
||||
let
|
||||
outputs = drv.outputs or [ "out" ];
|
||||
|
@ -11,7 +11,7 @@ let
|
||||
types = import ./types.nix;
|
||||
meta = import ./meta.nix;
|
||||
debug = import ./debug.nix;
|
||||
misc = import ./misc.nix;
|
||||
misc = import ./deprecated.nix;
|
||||
maintainers = import ./maintainers.nix;
|
||||
platforms = import ./platforms.nix;
|
||||
systems = import ./systems.nix;
|
||||
|
@ -29,7 +29,6 @@ rec {
|
||||
} ));
|
||||
withStdOverrides = base // {
|
||||
override = base.passthru.function;
|
||||
deepOverride = a : (base.passthru.function ((lib.mapAttrs (lib.deepOverrider a) base.passthru.args) // a));
|
||||
} ;
|
||||
in
|
||||
withStdOverrides;
|
||||
@ -203,8 +202,6 @@ rec {
|
||||
in
|
||||
work startSet [] [];
|
||||
|
||||
genericClosure = builtins.genericClosure or lazyGenericClosure;
|
||||
|
||||
innerModifySumArgs = f: x: a: b: if b == null then (f a b) // x else
|
||||
innerModifySumArgs f x (a // b);
|
||||
modifySumArgs = f: x: innerModifySumArgs f x {};
|
170
lib/lists.nix
170
lib/lists.nix
@ -4,7 +4,7 @@ with import ./trivial.nix;
|
||||
|
||||
rec {
|
||||
|
||||
inherit (builtins) head tail length isList elemAt concatLists filter elem;
|
||||
inherit (builtins) head tail length isList elemAt concatLists filter elem genList;
|
||||
|
||||
|
||||
# Create a list consisting of a single element. `singleton x' is
|
||||
@ -38,16 +38,24 @@ rec {
|
||||
in foldl' (length list - 1);
|
||||
|
||||
|
||||
# map with index: `imap (i: v: "${v}-${toString i}") ["a" "b"] ==
|
||||
# ["a-1" "b-2"]'
|
||||
imap = f: list:
|
||||
let
|
||||
len = length list;
|
||||
imap' = n:
|
||||
if n == len
|
||||
then []
|
||||
else [ (f (n + 1) (elemAt list n)) ] ++ imap' (n + 1);
|
||||
in imap' 0;
|
||||
# Strict version of foldl.
|
||||
foldl' = builtins.foldl' or foldl;
|
||||
|
||||
|
||||
# Map with index: `imap (i: v: "${v}-${toString i}") ["a" "b"] ==
|
||||
# ["a-1" "b-2"]'. FIXME: why does this start to count at 1?
|
||||
imap =
|
||||
if builtins ? genList then
|
||||
f: list: genList (n: f (n + 1) (elemAt list n)) (length list)
|
||||
else
|
||||
f: list:
|
||||
let
|
||||
len = length list;
|
||||
imap' = n:
|
||||
if n == len
|
||||
then []
|
||||
else [ (f (n + 1) (elemAt list n)) ] ++ imap' (n + 1);
|
||||
in imap' 0;
|
||||
|
||||
|
||||
# Map and concatenate the result.
|
||||
@ -59,7 +67,7 @@ rec {
|
||||
# == [1 2 3 4 5]' and `flatten 1 == [1]'.
|
||||
flatten = x:
|
||||
if isList x
|
||||
then fold (x: y: (flatten x) ++ y) [] x
|
||||
then foldl' (x: y: x ++ (flatten y)) [] x
|
||||
else [x];
|
||||
|
||||
|
||||
@ -86,17 +94,17 @@ rec {
|
||||
|
||||
# Return true iff function `pred' returns true for at least element
|
||||
# of `list'.
|
||||
any = pred: fold (x: y: if pred x then true else y) false;
|
||||
any = builtins.any or (pred: fold (x: y: if pred x then true else y) false);
|
||||
|
||||
|
||||
# Return true iff function `pred' returns true for all elements of
|
||||
# `list'.
|
||||
all = pred: fold (x: y: if pred x then y else false) true;
|
||||
all = builtins.all or (pred: fold (x: y: if pred x then y else false) true);
|
||||
|
||||
|
||||
# Count how many times function `pred' returns true for the elements
|
||||
# of `list'.
|
||||
count = pred: fold (x: c: if pred x then c + 1 else c) 0;
|
||||
count = pred: foldl' (c: x: if pred x then c + 1 else c) 0;
|
||||
|
||||
|
||||
# Return a singleton list or an empty list, depending on a boolean
|
||||
@ -116,10 +124,17 @@ rec {
|
||||
|
||||
|
||||
# Return a list of integers from `first' up to and including `last'.
|
||||
range = first: last:
|
||||
if last < first
|
||||
then []
|
||||
else [first] ++ range (first + 1) last;
|
||||
range =
|
||||
if builtins ? genList then
|
||||
first: last:
|
||||
if first > last
|
||||
then []
|
||||
else genList (n: first + n) (last - first + 1)
|
||||
else
|
||||
first: last:
|
||||
if last < first
|
||||
then []
|
||||
else [first] ++ range (first + 1) last;
|
||||
|
||||
|
||||
# Partition the elements of a list in two lists, `right' and
|
||||
@ -132,30 +147,37 @@ rec {
|
||||
) { right = []; wrong = []; };
|
||||
|
||||
|
||||
zipListsWith = f: fst: snd:
|
||||
let
|
||||
len1 = length fst;
|
||||
len2 = length snd;
|
||||
len = if len1 < len2 then len1 else len2;
|
||||
zipListsWith' = n:
|
||||
if n != len then
|
||||
[ (f (elemAt fst n) (elemAt snd n)) ]
|
||||
++ zipListsWith' (n + 1)
|
||||
else [];
|
||||
in zipListsWith' 0;
|
||||
zipListsWith =
|
||||
if builtins ? genList then
|
||||
f: fst: snd: genList (n: f (elemAt fst n) (elemAt snd n)) (min (length fst) (length snd))
|
||||
else
|
||||
f: fst: snd:
|
||||
let
|
||||
len = min (length fst) (length snd);
|
||||
zipListsWith' = n:
|
||||
if n != len then
|
||||
[ (f (elemAt fst n) (elemAt snd n)) ]
|
||||
++ zipListsWith' (n + 1)
|
||||
else [];
|
||||
in zipListsWith' 0;
|
||||
|
||||
zipLists = zipListsWith (fst: snd: { inherit fst snd; });
|
||||
|
||||
|
||||
# Reverse the order of the elements of a list. FIXME: O(n^2)!
|
||||
reverseList = fold (e: acc: acc ++ [ e ]) [];
|
||||
# Reverse the order of the elements of a list.
|
||||
reverseList =
|
||||
if builtins ? genList then
|
||||
xs: let l = length xs; in genList (n: elemAt xs (l - n - 1)) l
|
||||
else
|
||||
fold (e: acc: acc ++ [ e ]) [];
|
||||
|
||||
|
||||
# Sort a list based on a comparator function which compares two
|
||||
# elements and returns true if the first argument is strictly below
|
||||
# the second argument. The returned list is sorted in an increasing
|
||||
# order. The implementation does a quick-sort.
|
||||
sort = strictLess: list:
|
||||
sort = builtins.sort or (
|
||||
strictLess: list:
|
||||
let
|
||||
len = length list;
|
||||
first = head list;
|
||||
@ -169,31 +191,50 @@ rec {
|
||||
pivot = pivot' 1 { left = []; right = []; };
|
||||
in
|
||||
if len < 2 then list
|
||||
else (sort strictLess pivot.left) ++ [ first ] ++ (sort strictLess pivot.right);
|
||||
else (sort strictLess pivot.left) ++ [ first ] ++ (sort strictLess pivot.right));
|
||||
|
||||
|
||||
# Return the first (at most) N elements of a list.
|
||||
take = count: list:
|
||||
let
|
||||
len = length list;
|
||||
take' = n:
|
||||
if n == len || n == count
|
||||
then []
|
||||
else
|
||||
[ (elemAt list n) ] ++ take' (n + 1);
|
||||
in take' 0;
|
||||
take =
|
||||
if builtins ? genList then
|
||||
count: sublist 0 count
|
||||
else
|
||||
count: list:
|
||||
let
|
||||
len = length list;
|
||||
take' = n:
|
||||
if n == len || n == count
|
||||
then []
|
||||
else
|
||||
[ (elemAt list n) ] ++ take' (n + 1);
|
||||
in take' 0;
|
||||
|
||||
|
||||
# Remove the first (at most) N elements of a list.
|
||||
drop = count: list:
|
||||
let
|
||||
len = length list;
|
||||
drop' = n:
|
||||
if n == -1 || n < count
|
||||
then []
|
||||
else
|
||||
drop' (n - 1) ++ [ (elemAt list n) ];
|
||||
in drop' (len - 1);
|
||||
drop =
|
||||
if builtins ? genList then
|
||||
count: list: sublist count (length list) list
|
||||
else
|
||||
count: list:
|
||||
let
|
||||
len = length list;
|
||||
drop' = n:
|
||||
if n == -1 || n < count
|
||||
then []
|
||||
else
|
||||
drop' (n - 1) ++ [ (elemAt list n) ];
|
||||
in drop' (len - 1);
|
||||
|
||||
|
||||
# Return a list consisting of at most ‘count’ elements of ‘list’,
|
||||
# starting at index ‘start’.
|
||||
sublist = start: count: list:
|
||||
let len = length list; in
|
||||
genList
|
||||
(n: elemAt list (n + start))
|
||||
(if start >= len then 0
|
||||
else if start + count > len then len - start
|
||||
else count);
|
||||
|
||||
|
||||
# Return the last element of a list.
|
||||
@ -205,25 +246,13 @@ rec {
|
||||
init = list: assert list != []; take (length list - 1) list;
|
||||
|
||||
|
||||
# Zip two lists together.
|
||||
zipTwoLists = xs: ys:
|
||||
let
|
||||
len1 = length xs;
|
||||
len2 = length ys;
|
||||
len = if len1 < len2 then len1 else len2;
|
||||
zipTwoLists' = n:
|
||||
if n != len then
|
||||
[ { first = elemAt xs n; second = elemAt ys n; } ]
|
||||
++ zipTwoLists' (n + 1)
|
||||
else [];
|
||||
in zipTwoLists' 0;
|
||||
|
||||
|
||||
deepSeqList = xs: y: if any (x: deepSeq x false) xs then y else y;
|
||||
|
||||
|
||||
crossLists = f: foldl (fs: args: concatMap (f: map f args) fs) [f];
|
||||
|
||||
# Remove duplicate elements from the list
|
||||
|
||||
# Remove duplicate elements from the list. O(n^2) complexity.
|
||||
unique = list:
|
||||
if list == [] then
|
||||
[]
|
||||
@ -233,9 +262,12 @@ rec {
|
||||
xs = unique (drop 1 list);
|
||||
in [x] ++ remove x xs;
|
||||
|
||||
# Intersects list 'e' and another list
|
||||
|
||||
# Intersects list 'e' and another list. O(nm) complexity.
|
||||
intersectLists = e: filter (x: elem x e);
|
||||
|
||||
# Subtracts list 'e' from another list
|
||||
|
||||
# Subtracts list 'e' from another list. O(nm) complexity.
|
||||
subtractLists = e: filter (x: !(elem x e));
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
aflatter = "Alexander Flatter <flatter@fastmail.fm>";
|
||||
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
|
||||
ak = "Alexander Kjeldaas <ak@formalprivacy.com>";
|
||||
akaWolf = "Artjom Vejsel <akawolf0@gmail.com>";
|
||||
akc = "Anders Claesson <akc@akc.is>";
|
||||
algorith = "Dries Van Daele <dries_van_daele@telenet.be>";
|
||||
all = "Nix Committers <nix-commits@lists.science.uu.nl>";
|
||||
@ -33,6 +34,7 @@
|
||||
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
|
||||
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
|
||||
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
|
||||
badi = "Badi' Abdul-Wahid <abdulwahidc@gmail.com>";
|
||||
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
|
||||
bbenoist = "Baptist BENOIST <return_0@live.com>";
|
||||
bcarrell = "Brandon Carrell <brandoncarrell@gmail.com>";
|
||||
@ -66,12 +68,15 @@
|
||||
couchemar = "Andrey Pavlov <couchemar@yandex.ru>";
|
||||
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
|
||||
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
|
||||
DamienCassou = "Damien Cassou <damien.cassou@gmail.com>";
|
||||
DamienCassou = "Damien Cassou <damien@cassou.me>";
|
||||
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
|
||||
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
|
||||
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
|
||||
deepfire = "Kosyrev Serge <_deepfire@feelingofgreen.ru>";
|
||||
desiderius = "Didier J. Devroye <didier@devroye.name>";
|
||||
devhell = "devhell <\"^\"@regexmail.net>";
|
||||
dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>";
|
||||
dfoxfranke = "Daniel Fox Franke <dfoxfranke@gmail.com>";
|
||||
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
|
||||
doublec = "Chris Double <chris.double@double.co.nz>";
|
||||
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
|
||||
@ -81,6 +86,7 @@
|
||||
eikek = "Eike Kettner <eike.kettner@posteo.de>";
|
||||
ellis = "Ellis Whitehead <nixos@ellisw.net>";
|
||||
emery = "Emery Hemingway <emery@vfemail.net>";
|
||||
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
|
||||
ericbmerritt = "Eric Merritt <eric@afiniate.com>";
|
||||
ertes = "Ertugrul Söylemez <ertesx@gmx.de>";
|
||||
exlevan = "Alexey Levan <exlevan@gmail.com>";
|
||||
@ -118,6 +124,7 @@
|
||||
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
|
||||
jb55 = "William Casarin <bill@casarin.me>";
|
||||
jcumming = "Jack Cummings <jack@mudshark.org>";
|
||||
jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>";
|
||||
jfb = "James Felix Black <james@yamtime.com>";
|
||||
jgeerds = "Jascha Geerds <jg@ekby.de>";
|
||||
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
|
||||
@ -129,12 +136,15 @@
|
||||
jwiegley = "John Wiegley <johnw@newartisans.com>";
|
||||
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
|
||||
jzellner = "Jeff Zellner <jeffz@eml.cc>";
|
||||
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
|
||||
kkallio = "Karn Kallio <tierpluspluslists@gmail.com>";
|
||||
koral = "Koral <koral@mailoo.org>";
|
||||
kovirobi = "Kovacsics Robert <kovirobi@gmail.com>";
|
||||
kragniz = "Louis Taylor <kragniz@gmail.com>";
|
||||
ktosiek = "Tomasz Kontusz <tomasz.kontusz@gmail.com>";
|
||||
lassulus = "Lassulus <lassulus@gmail.com>";
|
||||
layus = "Guillaume Maudoux <layus.on@gmail.com>";
|
||||
lebastr = "Alexander Lebedev <lebastr@gmail.com>";
|
||||
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
|
||||
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
|
||||
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
|
||||
@ -143,15 +153,19 @@
|
||||
linus = "Linus Arver <linusarver@gmail.com>";
|
||||
lnl7 = "Daiderd Jordan <daiderd@gmail.com>";
|
||||
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
|
||||
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
|
||||
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
|
||||
ludo = "Ludovic Courtès <ludo@gnu.org>";
|
||||
madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
|
||||
magnetophon = "Bart Brouns <bart@magnetophon.nl>";
|
||||
mahe = "Matthias Herrmann <matthias.mh.herrmann@gmail.com>";
|
||||
makefu = "Felix Richter <makefu@syntax-fehler.de>";
|
||||
malyn = "Michael Alyn Miller <malyn@strangeGizmo.com>";
|
||||
manveru = "Michael Fellinger <m.fellinger@gmail.com>";
|
||||
marcweber = "Marc Weber <marco-oweber@gmx.de>";
|
||||
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
|
||||
matejc = "Matej Cotman <cotman.matej@gmail.com>";
|
||||
mathnerd314 = "Mathnerd314 <mathnerd314.gph+hs@gmail.com>";
|
||||
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
|
||||
mbakke = "Marius Bakke <ymse@tuta.io>";
|
||||
meditans = "Carlo Nucera <meditans@gmail.com>";
|
||||
@ -173,6 +187,7 @@
|
||||
nslqqq = "Nikita Mikhailov <nslqqq@gmail.com>";
|
||||
obadz = "obadz <dav-nixos@odav.org>";
|
||||
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
|
||||
odi = "Oliver Dunkl <oliver.dunkl@gmail.com>";
|
||||
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
|
||||
olcai = "Erik Timan <dev@timan.info>";
|
||||
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
|
||||
@ -193,6 +208,7 @@
|
||||
pmahoney = "Patrick Mahoney <pat@polycrystal.org>";
|
||||
pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>";
|
||||
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
|
||||
psibi = "Sibi <sibi@psibi.in>";
|
||||
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
|
||||
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
|
||||
qknight = "Joachim Schiele <js@lastlog.de>";
|
||||
@ -202,6 +218,7 @@
|
||||
refnil = "Martin Lavoie <broemartino@gmail.com>";
|
||||
relrod = "Ricky Elrod <ricky@elrod.me>";
|
||||
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
|
||||
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
|
||||
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
|
||||
rob = "Rob Vermaas <rob.vermaas@gmail.com>";
|
||||
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
|
||||
@ -229,6 +246,7 @@
|
||||
sprock = "Roger Mason <rmason@mun.ca>";
|
||||
spwhitt = "Spencer Whitt <sw@swhitt.me>";
|
||||
stephenmw = "Stephen Weinberg <stephen@q5comm.com>";
|
||||
szczyp = "Szczyp <qb@szczyp.com>";
|
||||
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
|
||||
tailhook = "Paul Colomiets <paul@colomiets.name>";
|
||||
taktoa = "Remy Goldschmidt <taktoa@gmail.com>";
|
||||
|
114
lib/modules.nix
114
lib/modules.nix
@ -55,7 +55,7 @@ rec {
|
||||
};
|
||||
};
|
||||
|
||||
closed = closeModules (modules ++ [ internalModule ]) (specialArgs // { inherit config options; lib = import ./.; });
|
||||
closed = closeModules (modules ++ [ internalModule ]) ({ inherit config options; lib = import ./.; } // specialArgs);
|
||||
|
||||
# Note: the list of modules is reversed to maintain backward
|
||||
# compatibility with the old module system. Not sure if this is
|
||||
@ -76,8 +76,8 @@ rec {
|
||||
else yieldConfig (prefix ++ [n]) v) set) ["_definedNames"];
|
||||
in
|
||||
if options._module.check.value && set ? _definedNames then
|
||||
fold (m: res:
|
||||
fold (name: res:
|
||||
foldl' (res: m:
|
||||
foldl' (res: name:
|
||||
if set ? ${name} then res else throw "The option `${showOption (prefix ++ [name])}' defined in `${m.file}' does not exist.")
|
||||
res m.names)
|
||||
res set._definedNames
|
||||
@ -182,18 +182,18 @@ rec {
|
||||
let
|
||||
loc = prefix ++ [name];
|
||||
# Get all submodules that declare ‘name’.
|
||||
decls = concatLists (map (m:
|
||||
decls = concatMap (m:
|
||||
if m.options ? ${name}
|
||||
then [ { inherit (m) file; options = m.options.${name}; } ]
|
||||
else []
|
||||
) options);
|
||||
) options;
|
||||
# Get all submodules that define ‘name’.
|
||||
defns = concatLists (map (m:
|
||||
defns = concatMap (m:
|
||||
if m.config ? ${name}
|
||||
then map (config: { inherit (m) file; inherit config; })
|
||||
(pushDownProperties m.config.${name})
|
||||
else []
|
||||
) configs);
|
||||
) configs;
|
||||
nrOptions = count (m: isOption m.options) decls;
|
||||
# Extract the definitions for this loc
|
||||
defns' = map (m: { inherit (m) file; value = m.config.${name}; })
|
||||
@ -225,7 +225,7 @@ rec {
|
||||
'opts' is a list of modules. Each module has an options attribute which
|
||||
correspond to the definition of 'loc' in 'opt.file'. */
|
||||
mergeOptionDecls = loc: opts:
|
||||
fold (opt: res:
|
||||
foldl' (res: opt:
|
||||
if opt.options ? default && res ? default ||
|
||||
opt.options ? example && res ? example ||
|
||||
opt.options ? description && res ? description ||
|
||||
@ -251,7 +251,7 @@ rec {
|
||||
else if opt.options ? options then map (coerceOption opt.file) options' ++ res.options
|
||||
else res.options;
|
||||
in opt.options // res //
|
||||
{ declarations = [opt.file] ++ res.declarations;
|
||||
{ declarations = res.declarations ++ [opt.file];
|
||||
options = submodules;
|
||||
}
|
||||
) { inherit loc; declarations = []; options = []; } opts;
|
||||
@ -261,58 +261,67 @@ rec {
|
||||
evalOptionValue = loc: opt: defs:
|
||||
let
|
||||
# Add in the default value for this option, if any.
|
||||
defs' = (optional (opt ? default)
|
||||
{ file = head opt.declarations; value = mkOptionDefault opt.default; }) ++ defs;
|
||||
defs' =
|
||||
(optional (opt ? default)
|
||||
{ file = head opt.declarations; value = mkOptionDefault opt.default; }) ++ defs;
|
||||
|
||||
# Handle properties, check types, and merge everything together
|
||||
inherit (mergeDefinitions loc opt.type defs') isDefined defsFinal mergedValue;
|
||||
files = map (def: def.file) defsFinal;
|
||||
merged =
|
||||
if isDefined then mergedValue
|
||||
else throw "The option `${showOption loc}' is used but not defined.";
|
||||
# Handle properties, check types, and merge everything together.
|
||||
res =
|
||||
if opt.readOnly or false && length defs' > 1 then
|
||||
throw "The option `${showOption loc}' is read-only, but it's set multiple times."
|
||||
else
|
||||
mergeDefinitions loc opt.type defs';
|
||||
|
||||
# Check whether the option is defined, and apply the ‘apply’
|
||||
# function to the merged value. This allows options to yield a
|
||||
# value computed from the definitions.
|
||||
value =
|
||||
if !res.isDefined then
|
||||
throw "The option `${showOption loc}' is used but not defined."
|
||||
else if opt ? apply then
|
||||
opt.apply res.mergedValue
|
||||
else
|
||||
res.mergedValue;
|
||||
|
||||
# Finally, apply the ‘apply’ function to the merged
|
||||
# value. This allows options to yield a value computed
|
||||
# from the definitions.
|
||||
value = (opt.apply or id) merged;
|
||||
in opt //
|
||||
{ value = addErrorContext "while evaluating the option `${showOption loc}':" value;
|
||||
definitions = map (def: def.value) defsFinal;
|
||||
inherit isDefined files;
|
||||
definitions = map (def: def.value) res.defsFinal;
|
||||
files = map (def: def.file) res.defsFinal;
|
||||
inherit (res) isDefined;
|
||||
};
|
||||
|
||||
# Merge definitions of a value of a given type
|
||||
mergeDefinitions = loc: type: defs: rec {
|
||||
defsFinal =
|
||||
let
|
||||
# Process mkMerge and mkIf properties
|
||||
processIfAndMerge = defs: concatMap (m:
|
||||
map (value: { inherit (m) file; inherit value; }) (dischargeProperties m.value)
|
||||
) defs;
|
||||
# Merge definitions of a value of a given type.
|
||||
mergeDefinitions = loc: type: defs: rec {
|
||||
defsFinal =
|
||||
let
|
||||
# Process mkMerge and mkIf properties.
|
||||
defs' = concatMap (m:
|
||||
map (value: { inherit (m) file; inherit value; }) (dischargeProperties m.value)
|
||||
) defs;
|
||||
|
||||
# Process mkOverride properties
|
||||
processOverride = defs: filterOverrides defs;
|
||||
# Process mkOverride properties.
|
||||
defs'' = filterOverrides defs';
|
||||
|
||||
# Sort mkOrder properties
|
||||
processOrder = defs:
|
||||
# Avoid sorting if we don't have to.
|
||||
if any (def: def.value._type or "" == "order") defs
|
||||
then sortProperties defs
|
||||
else defs;
|
||||
in
|
||||
processOrder (processOverride (processIfAndMerge defs));
|
||||
# Sort mkOrder properties.
|
||||
defs''' =
|
||||
# Avoid sorting if we don't have to.
|
||||
if any (def: def.value._type or "" == "order") defs''
|
||||
then sortProperties defs''
|
||||
else defs'';
|
||||
in defs''';
|
||||
|
||||
# Type-check the remaining definitions, and merge them
|
||||
mergedValue = fold (def: res:
|
||||
if type.check def.value then res
|
||||
else throw "The option value `${showOption loc}' in `${def.file}' is not a ${type.name}.")
|
||||
(type.merge loc defsFinal) defsFinal;
|
||||
# Type-check the remaining definitions, and merge them.
|
||||
mergedValue = foldl' (res: def:
|
||||
if type.check def.value then res
|
||||
else throw "The option value `${showOption loc}' in `${def.file}' is not a ${type.name}.")
|
||||
(type.merge loc defsFinal) defsFinal;
|
||||
|
||||
isDefined = defsFinal != [];
|
||||
optionalValue =
|
||||
if isDefined then { value = mergedValue; }
|
||||
else {};
|
||||
};
|
||||
isDefined = defsFinal != [];
|
||||
|
||||
optionalValue =
|
||||
if isDefined then { value = mergedValue; }
|
||||
else {};
|
||||
};
|
||||
|
||||
/* Given a config set, expand mkMerge properties, and push down the
|
||||
other properties into the children. The result is a list of
|
||||
@ -383,8 +392,7 @@ rec {
|
||||
let
|
||||
defaultPrio = 100;
|
||||
getPrio = def: if def.value._type or "" == "override" then def.value.priority else defaultPrio;
|
||||
min = x: y: if x < y then x else y;
|
||||
highestPrio = fold (def: prio: min (getPrio def) prio) 9999 defs;
|
||||
highestPrio = foldl' (prio: def: min (getPrio def) prio) 9999 defs;
|
||||
strip = def: if def.value._type or "" == "override" then def // { value = def.value.content; } else def;
|
||||
in concatMap (def: if getPrio def == highestPrio then [(strip def)] else []) defs;
|
||||
|
||||
|
@ -4,7 +4,6 @@ let lib = import ./default.nix; in
|
||||
|
||||
with import ./trivial.nix;
|
||||
with import ./lists.nix;
|
||||
with import ./misc.nix;
|
||||
with import ./attrsets.nix;
|
||||
with import ./strings.nix;
|
||||
|
||||
@ -20,6 +19,7 @@ rec {
|
||||
, apply ? null # Function that converts the option value to something else.
|
||||
, internal ? null # Whether the option is for NixOS developers only.
|
||||
, visible ? null # Whether the option shows up in the manual.
|
||||
, readOnly ? null # Whether the option can be set only once
|
||||
, options ? null # Obsolete, used by types.optionSet.
|
||||
} @ attrs:
|
||||
attrs // { _type = "option"; };
|
||||
@ -53,8 +53,8 @@ rec {
|
||||
if length list == 1 then head list
|
||||
else if all isFunction list then x: mergeDefaultOption loc (map (f: f x) list)
|
||||
else if all isList list then concatLists list
|
||||
else if all isAttrs list then fold lib.mergeAttrs {} list
|
||||
else if all isBool list then fold lib.or false list
|
||||
else if all isAttrs list then foldl' lib.mergeAttrs {} list
|
||||
else if all isBool list then foldl' lib.or false list
|
||||
else if all isString list then lib.concatStrings list
|
||||
else if all isInt list && all (x: x == head list) list then head list
|
||||
else throw "Cannot merge definitions of `${showOption loc}' given in ${showFiles (getFiles defs)}.";
|
||||
@ -68,7 +68,7 @@ rec {
|
||||
/* "Merge" option definitions by checking that they all have the same value. */
|
||||
mergeEqualOption = loc: defs:
|
||||
if defs == [] then abort "This case should never happen."
|
||||
else fold (def: val:
|
||||
else foldl' (val: def:
|
||||
if def.value != val then
|
||||
throw "The option `${showOption loc}' has conflicting definitions, in ${showFiles (getFiles defs)}."
|
||||
else
|
||||
@ -83,7 +83,7 @@ rec {
|
||||
optionAttrSetToDocList = optionAttrSetToDocList' [];
|
||||
|
||||
optionAttrSetToDocList' = prefix: options:
|
||||
fold (opt: rest:
|
||||
concatMap (opt:
|
||||
let
|
||||
docOption = rec {
|
||||
name = showOption opt.loc;
|
||||
@ -91,6 +91,7 @@ rec {
|
||||
declarations = filter (x: x != unknownModule) opt.declarations;
|
||||
internal = opt.internal or false;
|
||||
visible = opt.visible or true;
|
||||
readOnly = opt.readOnly or false;
|
||||
type = opt.type.name or null;
|
||||
}
|
||||
// (if opt ? example then { example = scrubOptionValue opt.example; } else {})
|
||||
@ -101,8 +102,7 @@ rec {
|
||||
let ss = opt.type.getSubOptions opt.loc;
|
||||
in if ss != {} then optionAttrSetToDocList' opt.loc ss else [];
|
||||
in
|
||||
# FIXME: expensive, O(n^2)
|
||||
[ docOption ] ++ subOptions ++ rest) [] (collect isOption options);
|
||||
[ docOption ] ++ subOptions) (collect isOption options);
|
||||
|
||||
|
||||
/* This function recursively removes all derivation attributes from
|
||||
|
@ -8,11 +8,15 @@ in
|
||||
|
||||
rec {
|
||||
|
||||
inherit (builtins) stringLength substring head tail isString;
|
||||
inherit (builtins) stringLength substring head tail isString replaceStrings;
|
||||
|
||||
|
||||
# Concatenate a list of strings.
|
||||
concatStrings = lib.fold (x: y: x + y) "";
|
||||
concatStrings =
|
||||
if builtins ? concatStringsSep then
|
||||
builtins.concatStringsSep ""
|
||||
else
|
||||
lib.foldl' (x: y: x + y) "";
|
||||
|
||||
|
||||
# Map a function over a list and concatenate the resulting strings.
|
||||
@ -25,14 +29,13 @@ rec {
|
||||
intersperse = separator: list:
|
||||
if list == [] || length list == 1
|
||||
then list
|
||||
else [(head list) separator]
|
||||
++ (intersperse separator (tail list));
|
||||
else tail (lib.concatMap (x: [separator x]) list);
|
||||
|
||||
|
||||
# Concatenate a list of strings with a separator between each element, e.g.
|
||||
# concatStringsSep " " ["foo" "bar" "xyzzy"] == "foo bar xyzzy"
|
||||
concatStringsSep = separator: list:
|
||||
concatStrings (intersperse separator list);
|
||||
concatStringsSep = builtins.concatStringsSep or (separator: list:
|
||||
concatStrings (intersperse separator list));
|
||||
|
||||
concatMapStringsSep = sep: f: list: concatStringsSep sep (map f list);
|
||||
concatImapStringsSep = sep: f: list: concatStringsSep sep (lib.imap f list);
|
||||
@ -61,13 +64,13 @@ rec {
|
||||
|
||||
# Determine whether a string has given prefix/suffix.
|
||||
hasPrefix = pref: str:
|
||||
eqStrings (substring 0 (stringLength pref) str) pref;
|
||||
substring 0 (stringLength pref) str == pref;
|
||||
hasSuffix = suff: str:
|
||||
let
|
||||
lenStr = stringLength str;
|
||||
lenSuff = stringLength suff;
|
||||
in lenStr >= lenSuff &&
|
||||
eqStrings (substring (lenStr - lenSuff) lenStr str) suff;
|
||||
substring (lenStr - lenSuff) lenStr str == suff;
|
||||
|
||||
|
||||
# Convert a string to a list of characters (i.e. singleton strings).
|
||||
@ -76,36 +79,31 @@ rec {
|
||||
# will likely be horribly inefficient; Nix is not a general purpose
|
||||
# programming language. Complex string manipulations should, if
|
||||
# appropriate, be done in a derivation.
|
||||
stringToCharacters = s: let l = stringLength s; in
|
||||
if l == 0
|
||||
then []
|
||||
else map (p: substring p 1 s) (lib.range 0 (l - 1));
|
||||
stringToCharacters = s:
|
||||
map (p: substring p 1 s) (lib.range 0 (stringLength s - 1));
|
||||
|
||||
|
||||
# Manipulate a string charcater by character and replace them by strings
|
||||
# before concatenating the results.
|
||||
# Manipulate a string charactter by character and replace them by
|
||||
# strings before concatenating the results.
|
||||
stringAsChars = f: s:
|
||||
concatStrings (
|
||||
map f (stringToCharacters s)
|
||||
);
|
||||
|
||||
|
||||
# same as vim escape function.
|
||||
# Each character contained in list is prefixed by "\"
|
||||
escape = list : string :
|
||||
stringAsChars (c: if lib.elem c list then "\\${c}" else c) string;
|
||||
# Escape occurrence of the elements of ‘list’ in ‘string’ by
|
||||
# prefixing it with a backslash. For example, ‘escape ["(" ")"]
|
||||
# "(foo)"’ returns the string ‘\(foo\)’.
|
||||
escape = list: replaceChars list (map (c: "\\${c}") list);
|
||||
|
||||
|
||||
# still ugly slow. But more correct now
|
||||
# [] for zsh
|
||||
# Escape all characters that have special meaning in the Bourne shell.
|
||||
escapeShellArg = lib.escape (stringToCharacters "\\ ';$`()|<>\t*[]");
|
||||
|
||||
|
||||
# replace characters by their substitutes. This function is equivalent to
|
||||
# the `tr' command except that one character can be replace by multiple
|
||||
# ones. e.g.,
|
||||
# replaceChars ["<" ">"] ["<" ">"] "<foo>" returns "<foo>".
|
||||
replaceChars = del: new: s:
|
||||
# Obsolete - use replaceStrings instead.
|
||||
replaceChars = builtins.replaceStrings or (
|
||||
del: new: s:
|
||||
let
|
||||
substList = lib.zipLists del new;
|
||||
subst = c:
|
||||
@ -115,26 +113,23 @@ rec {
|
||||
else
|
||||
found.snd;
|
||||
in
|
||||
stringAsChars subst s;
|
||||
stringAsChars subst s);
|
||||
|
||||
|
||||
# Case conversion utilities
|
||||
# Case conversion utilities.
|
||||
lowerChars = stringToCharacters "abcdefghijklmnopqrstuvwxyz";
|
||||
upperChars = stringToCharacters "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
toLower = replaceChars upperChars lowerChars;
|
||||
toUpper = replaceChars lowerChars upperChars;
|
||||
|
||||
# Appends string context from another string
|
||||
|
||||
# Appends string context from another string.
|
||||
addContextFrom = a: b: substring 0 0 a + b;
|
||||
|
||||
# Compares strings not requiring context equality
|
||||
# Obviously, a workaround but works on all Nix versions
|
||||
eqStrings = a: b: addContextFrom b a == addContextFrom a b;
|
||||
|
||||
|
||||
# Cut a string with a separator and produces a list of strings which were
|
||||
# separated by this separator. e.g.,
|
||||
# `splitString "." "foo.bar.baz"' returns ["foo" "bar" "baz"].
|
||||
# Cut a string with a separator and produces a list of strings which
|
||||
# were separated by this separator; e.g., `splitString "."
|
||||
# "foo.bar.baz"' returns ["foo" "bar" "baz"].
|
||||
splitString = _sep: _s:
|
||||
let
|
||||
sep = addContextFrom _s _sep;
|
||||
@ -177,7 +172,7 @@ rec {
|
||||
sufLen = stringLength suf;
|
||||
sLen = stringLength s;
|
||||
in
|
||||
if sufLen <= sLen && eqStrings suf (substring (sLen - sufLen) sufLen s) then
|
||||
if sufLen <= sLen && suf == substring (sLen - sufLen) sufLen s then
|
||||
substring 0 (sLen - sufLen) s
|
||||
else
|
||||
s;
|
||||
@ -196,21 +191,22 @@ rec {
|
||||
|
||||
|
||||
# Extract name with version from URL. Ask for separator which is
|
||||
# supposed to start extension
|
||||
nameFromURL = url: sep: let
|
||||
components = splitString "/" url;
|
||||
filename = lib.last components;
|
||||
name = builtins.head (splitString sep filename);
|
||||
in
|
||||
assert ! eqStrings name filename;
|
||||
name;
|
||||
# supposed to start extension.
|
||||
nameFromURL = url: sep:
|
||||
let
|
||||
components = splitString "/" url;
|
||||
filename = lib.last components;
|
||||
name = builtins.head (splitString sep filename);
|
||||
in assert name != filename; name;
|
||||
|
||||
|
||||
# Create an --{enable,disable}-<feat> string that can be passed to
|
||||
# standard GNU Autoconf scripts.
|
||||
enableFeature = enable: feat: "--${if enable then "enable" else "disable"}-${feat}";
|
||||
|
||||
# Create a fixed width string with additional prefix to match required width
|
||||
|
||||
# Create a fixed width string with additional prefix to match
|
||||
# required width.
|
||||
fixedWidthString = width: filler: str:
|
||||
let
|
||||
strw = lib.stringLength str;
|
||||
@ -219,6 +215,12 @@ rec {
|
||||
assert strw <= width;
|
||||
if strw == width then str else filler + fixedWidthString reqWidth filler str;
|
||||
|
||||
# Format a number adding leading zeroes up to fixed width
|
||||
|
||||
# Format a number adding leading zeroes up to fixed width.
|
||||
fixedWidthNumber = width: n: fixedWidthString width "0" (toString n);
|
||||
|
||||
|
||||
# Check whether a value is a store path.
|
||||
isStorePath = x: builtins.substring 0 1 (toString x) == "/" && dirOf (builtins.toPath x) == builtins.storeDir;
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ rec {
|
||||
inherit (builtins)
|
||||
pathExists readFile isBool isFunction
|
||||
isInt add sub lessThan
|
||||
seq deepSeq;
|
||||
seq deepSeq genericClosure;
|
||||
|
||||
# Return the Nixpkgs version number.
|
||||
nixpkgsVersion =
|
||||
|
@ -88,20 +88,22 @@ rec {
|
||||
attrs = mkOptionType {
|
||||
name = "attribute set";
|
||||
check = isAttrs;
|
||||
merge = loc: fold (def: mergeAttrs def.value) {};
|
||||
merge = loc: foldl' (res: def: mergeAttrs res def.value) {};
|
||||
};
|
||||
|
||||
# derivation is a reserved keyword.
|
||||
package = mkOptionType {
|
||||
name = "derivation";
|
||||
check = isDerivation;
|
||||
merge = mergeOneOption;
|
||||
check = x: isDerivation x || isStorePath x;
|
||||
merge = loc: defs:
|
||||
let res = mergeOneOption loc defs;
|
||||
in if isDerivation res then res else toDerivation res;
|
||||
};
|
||||
|
||||
path = mkOptionType {
|
||||
name = "path";
|
||||
# Hacky: there is no ‘isPath’ primop.
|
||||
check = x: builtins.unsafeDiscardStringContext (builtins.substring 0 1 (toString x)) == "/";
|
||||
check = x: builtins.substring 0 1 (toString x) == "/";
|
||||
merge = mergeOneOption;
|
||||
};
|
||||
|
||||
|
@ -11,7 +11,7 @@ uninstall packages from the command line. For instance, to install
|
||||
Mozilla Thunderbird:
|
||||
|
||||
<screen>
|
||||
$ nix-env -iA nixos.pkgs.thunderbird</screen>
|
||||
$ nix-env -iA nixos.thunderbird</screen>
|
||||
|
||||
If you invoke this as root, the package is installed in the Nix
|
||||
profile <filename>/nix/var/nix/profiles/default</filename> and visible
|
||||
|
@ -23,13 +23,13 @@ Nixpkgs will be built or downloaded as part of the system when you run
|
||||
<para>You can get a list of the available packages as follows:
|
||||
<screen>
|
||||
$ nix-env -qaP '*' --description
|
||||
nixos.pkgs.firefox firefox-23.0 Mozilla Firefox - the browser, reloaded
|
||||
nixos.firefox firefox-23.0 Mozilla Firefox - the browser, reloaded
|
||||
<replaceable>...</replaceable>
|
||||
</screen>
|
||||
|
||||
The first column in the output is the <emphasis>attribute
|
||||
name</emphasis>, such as
|
||||
<literal>nixos.pkgs.thunderbird</literal>. (The
|
||||
<literal>nixos.thunderbird</literal>. (The
|
||||
<literal>nixos</literal> prefix allows distinguishing between
|
||||
different channels that you might have.)</para>
|
||||
|
||||
|
@ -61,6 +61,12 @@ by default because it’s not free software. You can enable it as follows:
|
||||
<programlisting>
|
||||
services.xserver.videoDrivers = [ "nvidia" ];
|
||||
</programlisting>
|
||||
Or if you have an older card, you may have to use one of the legacy drivers:
|
||||
<programlisting>
|
||||
services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
|
||||
services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
|
||||
services.xserver.videoDrivers = [ "nvidiaLegacy173" ];
|
||||
</programlisting>
|
||||
You may need to reboot after enabling this driver to prevent a clash
|
||||
with other kernel modules.</para>
|
||||
|
||||
|
@ -61,6 +61,16 @@ let
|
||||
echo "${version}" > version
|
||||
'';
|
||||
|
||||
toc = builtins.toFile "toc.xml"
|
||||
''
|
||||
<toc role="chunk-toc">
|
||||
<d:tocentry xmlns:d="http://docbook.org/ns/docbook" linkend="book-nixos-manual"><?dbhtml filename="index.html"?>
|
||||
<d:tocentry linkend="ch-options"><?dbhtml filename="options.html"?></d:tocentry>
|
||||
<d:tocentry linkend="ch-release-notes"><?dbhtml filename="release-notes.html"?></d:tocentry>
|
||||
</d:tocentry>
|
||||
</toc>
|
||||
'';
|
||||
|
||||
in rec {
|
||||
|
||||
# The NixOS options in JSON format.
|
||||
@ -113,9 +123,10 @@ in rec {
|
||||
--param chunk.section.depth 0 \
|
||||
--param chunk.first.sections 1 \
|
||||
--param use.id.as.filename 1 \
|
||||
--stringparam generate.toc "book toc chapter toc appendix toc" \
|
||||
--stringparam generate.toc "book toc appendix toc" \
|
||||
--stringparam chunk.toc ${toc} \
|
||||
--nonet --xinclude --output $dst/ \
|
||||
${docbook5_xsl}/xml/xsl/docbook/xhtml/chunkfast.xsl ./manual.xml
|
||||
${docbook5_xsl}/xml/xsl/docbook/xhtml/chunktoc.xsl ./manual.xml
|
||||
|
||||
mkdir -p $dst/images/callouts
|
||||
cp ${docbook5_xsl}/xml/xsl/docbook/images/callouts/*.gif $dst/images/callouts/
|
||||
|
@ -106,6 +106,15 @@ options = {
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>types.package</varname></term>
|
||||
<listitem>
|
||||
<para>A derivation (such as <literal>pkgs.hello</literal>) or a
|
||||
store path (such as
|
||||
<filename>/nix/store/1ifi1cfbfs5iajmvwgrbmrnrw3a147h9-hello-2.10</filename>).</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>types.listOf</varname> <replaceable>t</replaceable></term>
|
||||
<listitem>
|
||||
@ -138,4 +147,4 @@ You can also create new types using the function
|
||||
<varname>mkOptionType</varname>. See
|
||||
<filename>lib/types.nix</filename> in Nixpkgs for details.</para>
|
||||
|
||||
</section>
|
||||
</section>
|
||||
|
@ -6,8 +6,8 @@
|
||||
|
||||
<title>Booting from a USB Drive</title>
|
||||
|
||||
<para>For systems without CD drive, the NixOS livecd can be booted from
|
||||
a usb stick. For non-UEFI installations,
|
||||
<para>For systems without CD drive, the NixOS live CD can be booted from
|
||||
a USB stick. For non-UEFI installations,
|
||||
<link xlink:href="http://unetbootin.sourceforge.net/">unetbootin</link>
|
||||
will work. For UEFI installations, you should mount the ISO, copy its contents
|
||||
verbatim to your drive, then either:
|
||||
|
@ -107,4 +107,30 @@ newer Nix version, which may involve an upgrade of Nix’s database
|
||||
schema. This cannot be undone easily, so in that case you will not be
|
||||
able to go back to your original channel.</para></warning>
|
||||
|
||||
|
||||
<section><title>Automatic Upgrades</title>
|
||||
|
||||
<para>You can keep a NixOS system up-to-date automatically by adding
|
||||
the following to <filename>configuration.nix</filename>:
|
||||
|
||||
<programlisting>
|
||||
system.autoUpgrade.enable = true;
|
||||
</programlisting>
|
||||
|
||||
This enables a periodically executed systemd service named
|
||||
<literal>nixos-upgrade.service</literal>. It runs
|
||||
<command>nixos-rebuild switch --upgrade</command> to upgrade NixOS to
|
||||
the latest version in the current channel. (To see when the service
|
||||
runs, see <command>systemctl list-timers</command>.) You can also
|
||||
specify a channel explicitly, e.g.
|
||||
|
||||
<programlisting>
|
||||
system.autoUpgrade.channel = https://nixos.org/channels/nixos-15.09;
|
||||
</programlisting>
|
||||
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
</chapter>
|
||||
|
@ -2,7 +2,7 @@
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="NixOSManual">
|
||||
xml:id="book-nixos-manual">
|
||||
|
||||
<info>
|
||||
<title>NixOS Manual</title>
|
||||
@ -33,11 +33,12 @@
|
||||
<xi:include href="administration/running.xml" />
|
||||
<!-- <xi:include href="userconfiguration.xml" /> -->
|
||||
<xi:include href="development/development.xml" />
|
||||
<xi:include href="release-notes/release-notes.xml" />
|
||||
|
||||
<appendix xml:id="ch-options">
|
||||
<title>Configuration Options</title>
|
||||
<xi:include href="options-db.xml" />
|
||||
</appendix>
|
||||
|
||||
<xi:include href="release-notes/release-notes.xml" />
|
||||
|
||||
</book>
|
||||
|
@ -25,61 +25,65 @@
|
||||
<option>
|
||||
<xsl:value-of select="attr[@name = 'name']/string/@value" />
|
||||
</option>
|
||||
</term>
|
||||
</term>
|
||||
|
||||
<listitem>
|
||||
<listitem>
|
||||
|
||||
<para>
|
||||
<xsl:value-of disable-output-escaping="yes"
|
||||
select="attr[@name = 'description']/string/@value" />
|
||||
</para>
|
||||
<para>
|
||||
<xsl:value-of disable-output-escaping="yes"
|
||||
select="attr[@name = 'description']/string/@value" />
|
||||
</para>
|
||||
|
||||
<xsl:if test="attr[@name = 'type']">
|
||||
<para>
|
||||
<emphasis>Type:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:apply-templates select="attr[@name = 'type']" mode="top" />
|
||||
</para>
|
||||
</xsl:if>
|
||||
<xsl:if test="attr[@name = 'type']">
|
||||
<para>
|
||||
<emphasis>Type:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:value-of select="attr[@name = 'type']/string/@value"/>
|
||||
<xsl:if test="attr[@name = 'readOnly']/bool/@value = 'true'">
|
||||
<xsl:text> </xsl:text>
|
||||
<emphasis>(read only)</emphasis>
|
||||
</xsl:if>
|
||||
</para>
|
||||
</xsl:if>
|
||||
|
||||
<xsl:if test="attr[@name = 'default']">
|
||||
<para>
|
||||
<emphasis>Default:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:apply-templates select="attr[@name = 'default']" mode="top" />
|
||||
</para>
|
||||
</xsl:if>
|
||||
<xsl:if test="attr[@name = 'default']">
|
||||
<para>
|
||||
<emphasis>Default:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:apply-templates select="attr[@name = 'default']" mode="top" />
|
||||
</para>
|
||||
</xsl:if>
|
||||
|
||||
<xsl:if test="attr[@name = 'example']">
|
||||
<para>
|
||||
<emphasis>Example:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:choose>
|
||||
<xsl:when test="attr[@name = 'example']/attrs[attr[@name = '_type' and string[@value = 'literalExample']]]">
|
||||
<programlisting><xsl:value-of select="attr[@name = 'example']/attrs/attr[@name = 'text']/string/@value" /></programlisting>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:apply-templates select="attr[@name = 'example']" mode="top" />
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</para>
|
||||
</xsl:if>
|
||||
<xsl:if test="attr[@name = 'example']">
|
||||
<para>
|
||||
<emphasis>Example:</emphasis>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:choose>
|
||||
<xsl:when test="attr[@name = 'example']/attrs[attr[@name = '_type' and string[@value = 'literalExample']]]">
|
||||
<programlisting><xsl:value-of select="attr[@name = 'example']/attrs/attr[@name = 'text']/string/@value" /></programlisting>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:apply-templates select="attr[@name = 'example']" mode="top" />
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</para>
|
||||
</xsl:if>
|
||||
|
||||
<xsl:if test="count(attr[@name = 'declarations']/list/*) != 0">
|
||||
<para>
|
||||
<emphasis>Declared by:</emphasis>
|
||||
</para>
|
||||
<xsl:apply-templates select="attr[@name = 'declarations']" />
|
||||
</xsl:if>
|
||||
<xsl:if test="count(attr[@name = 'declarations']/list/*) != 0">
|
||||
<para>
|
||||
<emphasis>Declared by:</emphasis>
|
||||
</para>
|
||||
<xsl:apply-templates select="attr[@name = 'declarations']" />
|
||||
</xsl:if>
|
||||
|
||||
<xsl:if test="count(attr[@name = 'definitions']/list/*) != 0">
|
||||
<para>
|
||||
<emphasis>Defined by:</emphasis>
|
||||
</para>
|
||||
<xsl:apply-templates select="attr[@name = 'definitions']" />
|
||||
</xsl:if>
|
||||
<xsl:if test="count(attr[@name = 'definitions']/list/*) != 0">
|
||||
<para>
|
||||
<emphasis>Defined by:</emphasis>
|
||||
</para>
|
||||
<xsl:apply-templates select="attr[@name = 'definitions']" />
|
||||
</xsl:if>
|
||||
|
||||
</listitem>
|
||||
</listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
@ -1,19 +1,18 @@
|
||||
<part xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch-release-notes">
|
||||
<appendix xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="ch-release-notes">
|
||||
|
||||
<title>Release Notes</title>
|
||||
|
||||
<partintro>
|
||||
<para>This section lists the release notes for each stable version of NixOS
|
||||
and current unstable revision.</para>
|
||||
</partintro>
|
||||
|
||||
<xi:include href="rl-unstable.xml" />
|
||||
<xi:include href="rl-1509.xml" />
|
||||
<xi:include href="rl-1412.xml" />
|
||||
<xi:include href="rl-1404.xml" />
|
||||
<xi:include href="rl-1310.xml" />
|
||||
|
||||
</part>
|
||||
</appendix>
|
||||
|
@ -1,11 +1,11 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-13.10">
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-13.10">
|
||||
|
||||
<title>Release 13.10 (“Aardvark”, 2013/10/31)</title>
|
||||
|
||||
<para>This is the first stable release branch of NixOS.</para>
|
||||
|
||||
</chapter>
|
||||
</section>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-14.04">
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-14.04">
|
||||
|
||||
<title>Release 14.04 (“Baboon”, 2014/04/30)</title>
|
||||
|
||||
@ -157,4 +157,4 @@ networking.firewall.enable = false;
|
||||
|
||||
</para>
|
||||
|
||||
</chapter>
|
||||
</section>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-14.12">
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-14.12">
|
||||
|
||||
<title>Release 14.12 (“Caterpillar”, 2014/12/30)</title>
|
||||
|
||||
@ -174,4 +174,4 @@ now.</para></listitem>
|
||||
|
||||
</para>
|
||||
|
||||
</chapter>
|
||||
</section>
|
||||
|
231
nixos/doc/manual/release-notes/rl-1509.xml
Normal file
231
nixos/doc/manual/release-notes/rl-1509.xml
Normal file
@ -0,0 +1,231 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-15.09">
|
||||
|
||||
<title>Release 15.09 (“Dingo”, 2015/09/??)</title>
|
||||
|
||||
<para>In addition to numerous new and upgraded packages, this release has the following highlights:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The Haskell packages infrastructure has been re-designed from the ground up.
|
||||
NixOS now distributes the latest version of every single package registered on
|
||||
<link xlink:href="http://hackage.haskell.org/">Hackage</link>, i.e. well over
|
||||
8000 Haskell packages. Further information and usage instructions for the
|
||||
improved infrastructure are available at <link
|
||||
xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>.
|
||||
Users migrating from an earlier release will find also find helpful information
|
||||
below, in the list of backwards-incompatible changes.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Users running an SSH server who worry about the quality of their
|
||||
<literal>/etc/ssh/moduli</literal> file with respect to the <link
|
||||
xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html">vulnerabilities
|
||||
discovered in the Diffie-Hellman key exchange</link> can now replace OpenSSH's
|
||||
default version with one they generated themselves using the new
|
||||
<literal>services.openssh.moduliFile</literal> option.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
</para>
|
||||
|
||||
|
||||
<para>When upgrading from a previous release, please be aware of the
|
||||
following incompatible changes:
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem><para><command>sshd</command> no longer supports DSA and ECDSA
|
||||
host keys by default. If you have existing systems with such host keys
|
||||
and want to continue to use them, please set
|
||||
|
||||
<programlisting>
|
||||
system.stateVersion = "14.12";
|
||||
</programlisting>
|
||||
|
||||
(The new option <option>system.stateVersion</option> ensures that
|
||||
certain configuration changes that could break existing systems (such
|
||||
as the <command>sshd</command> host key setting) will maintain
|
||||
compatibility with the specified NixOS release.)</para></listitem>
|
||||
|
||||
<listitem><para><command>cron</command> is no longer enabled by
|
||||
default, unless you have a non-empty
|
||||
<option>services.cron.systemCronJobs</option>. To force
|
||||
<command>cron</command> to be enabled, set
|
||||
<option>services.cron.enable = true</option>.</para></listitem>
|
||||
|
||||
<listitem><para>Nix now requires binary caches to be cryptographically
|
||||
signed. If you have unsigned binary caches that you want to continue
|
||||
to use, you should set <option>nix.requireSignedBinaryCaches =
|
||||
false</option>.</para></listitem>
|
||||
|
||||
<listitem><para>Steam now doesn't need root rights to work. Instead of using
|
||||
<literal>*-steam-chrootenv</literal>, you should now just run <literal>steam</literal>.
|
||||
<literal>steamChrootEnv</literal> package was renamed to <literal>steam</literal>,
|
||||
and old <literal>steam</literal> package -- to <literal>steamOriginal</literal>.
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>CMPlayer has been renamed to bomi upstream. Package <literal>cmplayer</literal>
|
||||
was accordingly renamed to <literal>bomi</literal>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>Atom Shell has been renamed to Electron upstream. Package <literal>atom-shell</literal>
|
||||
was accordingly renamed to <literal>electron</literal>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>Elm is not released on Hackage anymore. You should now use <literal>elmPackages.elm</literal>
|
||||
which contains the latest Elm platform.</para></listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The CUPS printing service has been updated to version <literal>2.0.2</literal>.
|
||||
Furthermore its systemd service has been renamed to <literal>cups.service</literal>.
|
||||
</para>
|
||||
<para>
|
||||
Local printers are no longer shared or advertised by default. This behavior
|
||||
can be changed by enabling <literal>services.printing.defaultShared</literal>
|
||||
or <literal>services.printing.browsing</literal> respectively.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The VirtualBox host and guest options have been moved/renamed more
|
||||
consistently and less confusing to be now found in
|
||||
<literal>virtualisation.virtualbox.host.*</literal> instead of
|
||||
<literal>services.virtualboxHost.*</literal> and
|
||||
<literal>virtualisation.virtualbox.guest.*</literal> instead of
|
||||
<literal>services.virtualboxGuest.*</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Haskell packages can no longer be found by name, i.e. the commands
|
||||
<literal>nix-env -qa cabal-install</literal> and <literal>nix-env -i
|
||||
ghc</literal> will fail, even though we <emphasis>do</emphasis> ship
|
||||
both <literal>cabal-install</literal> and <literal>ghc</literal>.
|
||||
The reason for this inconvenience is the sheer size of the Haskell
|
||||
package set: name-based lookups such as these would become much
|
||||
slower than they are today if we'd add the entire Hackage database
|
||||
into the top level attribute set. Instead, the list of Haskell
|
||||
packages can be displayed by
|
||||
</para>
|
||||
<programlisting>
|
||||
nix-env -f "<nixpkgs>" -qaP -A haskellPackages
|
||||
</programlisting>
|
||||
<para>
|
||||
and packages can be installed with:
|
||||
</para>
|
||||
<programlisting>
|
||||
nix-env -f "<nixpkgs>" -iA haskellPackages.cabal-install
|
||||
</programlisting>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Previous versions of NixOS came with a feature called
|
||||
<literal>ghc-wrapper</literal>, a small wrapper script that allows
|
||||
GHC to transparently pick up on libraries installed in the user's
|
||||
profile. This feature has been deprecated;
|
||||
<literal>ghc-wrapper</literal> was removed from the distribution.
|
||||
The proper way to register Haskell libraries with the compiler now
|
||||
is the <literal>haskellPackages.ghcWithPackages</literal>
|
||||
function.
|
||||
<link xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>
|
||||
provides much information about this subject.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
All Haskell builds that have been generated with version 1.x of
|
||||
the <literal>cabal2nix</literal> utility are now invalid and need
|
||||
to be re-generated with a current version of
|
||||
<literal>cabal2nix</literal> to function. The most recent version
|
||||
of this tool can be installed by running
|
||||
<literal>nix-env -i cabal2nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>haskellPackages</literal> set in Nixpkgs used to have a
|
||||
function attribute called <literal>extension</literal> that users
|
||||
could override in their <literal>~/.nixpkgs/config.nix</literal>
|
||||
files to configure additional attributes, etc. That function still
|
||||
exists, but it's now called <literal>overrides</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The OpenBLAS library has been updated to version
|
||||
<literal>0.2.14</literal>. Support for the
|
||||
<literal>x86_64-darwin</literal> platform was added. Dynamic
|
||||
architecture detection was enabled; OpenBLAS now selects
|
||||
microarchitecture-optimized routines at runtime, so optimal
|
||||
performance is achieved without the need to rebuild OpenBLAS
|
||||
locally. OpenBLAS has replaced ATLAS in most packages which use an
|
||||
optimized BLAS or LAPACK implementation.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>phpfpm</literal> is now using the default PHP version
|
||||
(<literal>pkgs.php</literal>) instead of PHP 5.4 (<literal>pkgs.php54</literal>).
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>locate</literal> service no longer indexes the Nix store
|
||||
by default, preventing packages with potentially numerous versions from
|
||||
cluttering the output. Indexing the store can be activated by setting
|
||||
<literal>services.locate.includeStore = true</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The Nix expression search path (<envar>NIX_PATH</envar>) no longer
|
||||
contains <filename>/etc/nixos/nixpkgs</filename> by default. You
|
||||
can override <envar>NIX_PATH</envar> by setting
|
||||
<option>nix.nixPath</option>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
|
||||
<para>The following new services were added since the last release:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para><literal>brltty</literal></para></listitem>
|
||||
<listitem><para><literal>marathon</literal></para></listitem>
|
||||
<listitem><para><literal>tvheadend</literal></para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
|
||||
<para>Other notable improvements:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para>The nixos and nixpkgs channels were unified,
|
||||
so one <emphasis>can</emphasis> use <literal>nix-env -iA nixos.bash</literal>
|
||||
instead of <literal>nix-env -iA nixos.pkgs.bash</literal>.
|
||||
See <link xlink:href="https://github.com/NixOS/nixpkgs/commit/2cd7c1f198">the commit</link> for details.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
</section>
|
@ -1,184 +1,27 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-unstable">
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-release-unstable">
|
||||
|
||||
<title>Unstable revision</title>
|
||||
|
||||
<para>In addition to numerous new and upgraded packages, this release has the following highlights:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The Haskell packages infrastructure has been re-designed from the ground up.
|
||||
NixOS now distributes the latest version of every single package registered on
|
||||
<link xlink:href="http://hackage.haskell.org/">Hackage</link>, i.e. well over
|
||||
8000 Haskell packages. Further information and usage instructions for the
|
||||
improved infrastructure are available at <link
|
||||
xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>.
|
||||
Users migrating from an earlier release will find also find helpful information
|
||||
below, in the list of backwards-incompatible changes.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Users running an SSH server who worry about the quality of their
|
||||
<literal>/etc/ssh/moduli</literal> file with respect to the <link
|
||||
xlink:href="https://stribika.github.io/2015/01/04/secure-secure-shell.html">vulnerabilities
|
||||
discovered in the Diffie-Hellman key exchange</link> can now replace OpenSSH's
|
||||
default version with one they generated themselves using the new
|
||||
<literal>services.openssh.moduliFile</literal> option.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
</para>
|
||||
|
||||
<para>Following new services were added since the last release:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para><literal>brltty</literal></para></listitem>
|
||||
<listitem><para><literal>marathon</literal></para></listitem>
|
||||
<listitem><para><literal>Tvheadend</literal></para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<title>Unstable</title>
|
||||
|
||||
<para>When upgrading from a previous release, please be aware of the
|
||||
following incompatible changes:
|
||||
following incompatible changes:
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem><para>Steam now doesn't need root rights to work. Instead of using
|
||||
<literal>*-steam-chrootenv</literal>, you should now just run <literal>steam</literal>.
|
||||
<literal>steamChrootEnv</literal> package was renamed to <literal>steam</literal>,
|
||||
and old <literal>steam</literal> package -- to <literal>steamOriginal</literal>.
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>CMPlayer has been renamed to bomi upstream. Package <literal>cmplayer</literal>
|
||||
was accordingly renamed to <literal>bomi</literal>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>Atom Shell has been renamed to Electron upstream. Package <literal>atom-shell</literal>
|
||||
was accordingly renamed to <literal>electron</literal>
|
||||
</para></listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The default <literal>NIX_PATH</literal> for NixOS now includes
|
||||
<literal>/nix/var/nix/profiles/per-user/root/channels</literal>, so it's
|
||||
easy to add custom channels.
|
||||
</para>
|
||||
<para>
|
||||
Moreover, whenever a <command>nixos-rebuild <action>
|
||||
--upgrade</command> is issued, every channel that includes a file
|
||||
called <filename>.update-on-nixos-rebuild</filename> will be upgraded
|
||||
alongside of the <literal>nixos</literal> channel.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The CUPS printing service has been updated to version <literal>2.0.2</literal>.
|
||||
Furthermore its systemd service has been renamed to <literal>cups.service</literal>.
|
||||
</para>
|
||||
<para>
|
||||
Local printers are no longer shared or advertised by default. This behavior
|
||||
can be changed by enabling <literal>services.printing.defaultShared</literal>
|
||||
or <literal>services.printing.browsing</literal> respectively.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
HPLIP (printer, scanner, and fax drivers for HP devices) has
|
||||
been updated to version <literal>3.15.4</literal>. This release
|
||||
adds support for the <literal>arm6l-linux</literal> and
|
||||
<literal>arm7l-linux</literal> platforms.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Haskell packages can no longer be found by name, i.e. the commands
|
||||
<literal>nix-env -qa cabal-install</literal> and <literal>nix-env -i
|
||||
ghc</literal> will fail, even though we <emphasis>do</emphasis> ship
|
||||
both <literal>cabal-install</literal> and <literal>ghc</literal>.
|
||||
The reason for this inconvenience is the sheer size of the Haskell
|
||||
package set: name-based lookups such as these would become much
|
||||
slower than they are today if we'd add the entire Hackage database
|
||||
into the top level attribute set. Instead, the list of Haskell
|
||||
packages can be displayed by
|
||||
</para>
|
||||
<programlisting>
|
||||
nix-env -f "<nixpkgs>" -qaP -A haskellPackages
|
||||
</programlisting>
|
||||
<para>
|
||||
and packages can be installed with:
|
||||
</para>
|
||||
<programlisting>
|
||||
nix-env -f "<nixpkgs>" -iA haskellPackages.cabal-install
|
||||
</programlisting>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Previous versions of NixOS come with a feature called
|
||||
<literal>ghc-wrapper</literal>, a small wrapper script that allows
|
||||
GHC to transparently pick up on libraries installed in the user's
|
||||
profile. This feature has been deprecated;
|
||||
<literal>ghc-wrapper</literal> was removed from the distribution.
|
||||
The proper way to register Haskell libraries with the compiler now
|
||||
is the <literal>haskellPackages.ghcWithPackages</literal>
|
||||
function.
|
||||
<link xlink:href="https://nixos.org/wiki/Haskell">https://nixos.org/wiki/Haskell</link>
|
||||
provides much information about this subject.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
All Haskell builds that have been generated with version 1.x of
|
||||
the <literal>cabal2nix</literal> utility are now invalid and need
|
||||
to be re-generated with a current version of
|
||||
<literal>cabal2nix</literal> to function. The most recent version
|
||||
of this tool can be installed by running
|
||||
<literal>nix-env -i cabal2nix</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>haskellPackages</literal> set in Nixpkgs used to have a
|
||||
function attribute called <literal>extension</literal> that users
|
||||
could override in their <literal>~/.nixpkgs/config.nix</literal>
|
||||
files to configure additional attributes, etc. That function still
|
||||
exists, but it's now called <literal>overrides</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The OpenBLAS library has been updated to version
|
||||
<literal>0.2.14</literal>. Support for the
|
||||
<literal>x86_64-darwin</literal> platform was added. Dynamic
|
||||
architecture detection was enabled; OpenBLAS now selects
|
||||
microarchitecture-optimized routines at runtime, so optimal
|
||||
performance is achieved without the need to rebuild OpenBLAS
|
||||
locally. OpenBLAS has replaced ATLAS in most packages which use an
|
||||
optimized BLAS or LAPACK implementation.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>phpfpm</literal> is now using the default PHP version
|
||||
(<literal>pkgs.php</literal>) instead of PHP 5.4 (<literal>pkgs.php54</literal>).
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<command>wmiiSnap</command> has been replaced with
|
||||
<command>wmii_hg</command>, but
|
||||
<command>services.xserver.windowManager.wmii.enable</command>
|
||||
has been updated respectively so this only affects you if you
|
||||
have explicitly installed <command>wmiiSnap</command>.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<command>wmiimenu</command> is removed, as it has been removed by
|
||||
the developers upstream. Use <command>wimenu</command> from the
|
||||
<command>wmii-hg</command> package.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
</chapter>
|
||||
</section>
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ system, minimal ? false }:
|
||||
|
||||
let pkgs = import ./nixpkgs.nix { config = {}; inherit system; }; in
|
||||
let pkgs = import ../.. { config = {}; inherit system; }; in
|
||||
|
||||
with pkgs.lib;
|
||||
with import ../lib/qemu-flags.nix;
|
||||
@ -41,22 +41,22 @@ rec {
|
||||
|
||||
machines = attrNames nodes;
|
||||
|
||||
machinesNumbered = zipTwoLists machines (range 1 254);
|
||||
machinesNumbered = zipLists machines (range 1 254);
|
||||
|
||||
nodes_ = flip map machinesNumbered (m: nameValuePair m.first
|
||||
nodes_ = flip map machinesNumbered (m: nameValuePair m.fst
|
||||
[ ( { config, pkgs, nodes, ... }:
|
||||
let
|
||||
interfacesNumbered = zipTwoLists config.virtualisation.vlans (range 1 255);
|
||||
interfaces = flip map interfacesNumbered ({ first, second }:
|
||||
nameValuePair "eth${toString second}" { ip4 =
|
||||
[ { address = "192.168.${toString first}.${toString m.second}";
|
||||
interfacesNumbered = zipLists config.virtualisation.vlans (range 1 255);
|
||||
interfaces = flip map interfacesNumbered ({ fst, snd }:
|
||||
nameValuePair "eth${toString snd}" { ip4 =
|
||||
[ { address = "192.168.${toString fst}.${toString m.snd}";
|
||||
prefixLength = 24;
|
||||
} ];
|
||||
});
|
||||
in
|
||||
{ key = "ip-address";
|
||||
config =
|
||||
{ networking.hostName = m.first;
|
||||
{ networking.hostName = m.fst;
|
||||
|
||||
networking.interfaces = listToAttrs interfaces;
|
||||
|
||||
@ -76,11 +76,11 @@ rec {
|
||||
|
||||
virtualisation.qemu.options =
|
||||
flip map interfacesNumbered
|
||||
({ first, second }: qemuNICFlags second first m.second);
|
||||
({ fst, snd }: qemuNICFlags snd fst m.snd);
|
||||
};
|
||||
}
|
||||
)
|
||||
(getAttr m.first nodes)
|
||||
(getAttr m.fst nodes)
|
||||
] );
|
||||
|
||||
in listToAttrs nodes_;
|
||||
|
@ -1,6 +0,0 @@
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
{ pkgs =
|
||||
(import nixpkgs/default.nix { inherit system; })
|
||||
// { recurseForDerivations = true; };
|
||||
}
|
@ -17,6 +17,8 @@
|
||||
baseModules ? import ../modules/module-list.nix
|
||||
, # !!! See comment about args in lib/modules.nix
|
||||
extraArgs ? {}
|
||||
, # !!! See comment about args in lib/modules.nix
|
||||
specialArgs ? {}
|
||||
, modules
|
||||
, # !!! See comment about check in lib/modules.nix
|
||||
check ? true
|
||||
@ -47,16 +49,11 @@ in rec {
|
||||
inherit prefix check;
|
||||
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
|
||||
args = extraArgs;
|
||||
specialArgs = { modulesPath = ../modules; };
|
||||
specialArgs = { modulesPath = ../modules; } // specialArgs;
|
||||
}) config options;
|
||||
|
||||
# These are the extra arguments passed to every module. In
|
||||
# particular, Nixpkgs is passed through the "pkgs" argument.
|
||||
# FIXME: we enable config.allowUnfree to make packages like
|
||||
# nvidia-x11 available. This isn't a problem because if the user has
|
||||
# ‘nixpkgs.config.allowUnfree = false’, then evaluation will fail on
|
||||
# the 64-bit package anyway. However, it would be cleaner to respect
|
||||
# nixpkgs.config here.
|
||||
extraArgs = extraArgs_ // {
|
||||
inherit modules baseModules;
|
||||
};
|
||||
|
@ -8,9 +8,7 @@ pkgs.releaseTools.makeSourceTarball {
|
||||
officialRelease = false; # FIXME: fix this in makeSourceTarball
|
||||
inherit version versionSuffix;
|
||||
|
||||
buildInputs = [ pkgs.nixUnstable ];
|
||||
|
||||
expr = builtins.readFile ./channel-expr.nix;
|
||||
buildInputs = [ pkgs.nix ];
|
||||
|
||||
distPhase = ''
|
||||
rm -rf .git
|
||||
@ -18,11 +16,9 @@ pkgs.releaseTools.makeSourceTarball {
|
||||
echo -n ${nixpkgs.rev or nixpkgs.shortRev} > .git-revision
|
||||
releaseName=nixos-$VERSION$VERSION_SUFFIX
|
||||
mkdir -p $out/tarballs
|
||||
mkdir ../$releaseName
|
||||
cp -prd . ../$releaseName/nixpkgs
|
||||
cp -prd . ../$releaseName
|
||||
chmod -R u+w ../$releaseName
|
||||
ln -s nixpkgs/nixos ../$releaseName/nixos
|
||||
echo "$expr" > ../$releaseName/default.nix
|
||||
ln -s . ../$releaseName/nixpkgs # hack to make ‘<nixpkgs>’ work
|
||||
NIX_STATE_DIR=$TMPDIR nix-env -f ../$releaseName/default.nix -qaP --meta --xml \* > /dev/null
|
||||
cd ..
|
||||
chmod -R u+w $releaseName
|
||||
|
88
nixos/lib/make-ext4-fs.nix
Normal file
88
nixos/lib/make-ext4-fs.nix
Normal file
@ -0,0 +1,88 @@
|
||||
# Builds an ext4 image containing a populated /nix/store with the closure
|
||||
# of store paths passed in the storePaths parameter. The generated image
|
||||
# is sized to only fit its contents, with the expectation that a script
|
||||
# resizes the filesystem at boot time.
|
||||
{ pkgs
|
||||
, storePaths
|
||||
, volumeLabel
|
||||
}:
|
||||
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "ext4-fs.img";
|
||||
|
||||
buildInputs = with pkgs; [e2fsprogs libfaketime perl];
|
||||
|
||||
# For obtaining the closure of `storePaths'.
|
||||
exportReferencesGraph =
|
||||
map (x: [("closure-" + baseNameOf x) x]) storePaths;
|
||||
|
||||
buildCommand =
|
||||
''
|
||||
# Add the closures of the top-level store objects.
|
||||
storePaths=$(perl ${pkgs.pathsFromGraph} closure-*)
|
||||
|
||||
# Also include a manifest of the closures in a format suitable
|
||||
# for nix-store --load-db.
|
||||
printRegistration=1 perl ${pkgs.pathsFromGraph} closure-* > nix-path-registration
|
||||
|
||||
# Make a crude approximation of the size of the target image.
|
||||
# If the script starts failing, increase the fudge factors here.
|
||||
numInodes=$(find $storePaths | wc -l)
|
||||
numDataBlocks=$(du -c -B 4096 --apparent-size $storePaths | awk '$2 == "total" { print int($1 * 1.03) }')
|
||||
bytes=$((2 * 4096 * $numInodes + 4096 * $numDataBlocks))
|
||||
echo "Creating an EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks)"
|
||||
|
||||
truncate -s $bytes $out
|
||||
faketime "1970-01-01 00:00:00" mkfs.ext4 -L ${volumeLabel} -U 44444444-4444-4444-8888-888888888888 $out
|
||||
|
||||
# Populate the image contents by piping a bunch of commands to the `debugfs` tool from e2fsprogs.
|
||||
# For example, to copy /nix/store/abcd...efg-coreutils-8.23/bin/sleep:
|
||||
# cd /nix/store/abcd...efg-coreutils-8.23/bin
|
||||
# write /nix/store/abcd...efg-coreutils-8.23/bin/sleep sleep
|
||||
# sif sleep mode 040555
|
||||
# sif sleep gid 30000
|
||||
# In particular, debugfs doesn't handle absolute target paths; you have to 'cd' in the virtual
|
||||
# filesystem first. Likewise the intermediate directories must already exist (using `find`
|
||||
# handles that for us). And when setting the file's permissions, the inode type flags (__S_IFDIR,
|
||||
# __S_IFREG) need to be set as well.
|
||||
(
|
||||
echo write nix-path-registration nix-path-registration
|
||||
echo mkdir nix
|
||||
echo cd /nix
|
||||
echo mkdir store
|
||||
|
||||
# XXX: This explodes in exciting ways if anything in /nix/store has a space in it.
|
||||
find $storePaths -printf '%y %f %h %m\n'| while read -r type file dir perms; do
|
||||
# echo "TYPE=$type DIR=$dir FILE=$file PERMS=$perms" >&2
|
||||
|
||||
echo "cd $dir"
|
||||
case $type in
|
||||
d)
|
||||
echo "mkdir $file"
|
||||
echo sif $file mode $((040000 | 0$perms)) # magic constant is __S_IFDIR
|
||||
;;
|
||||
f)
|
||||
echo "write $dir/$file $file"
|
||||
echo sif $file mode $((0100000 | 0$perms)) # magic constant is __S_IFREG
|
||||
;;
|
||||
l)
|
||||
echo "symlink $file $(readlink "$dir/$file")"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown entry: $type $dir $file $perms" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo sif $file gid 30000 # chgrp to nixbld
|
||||
done
|
||||
) | faketime "1970-01-01 00:00:00" debugfs -w $out -f /dev/stdin > errorlog 2>&1
|
||||
|
||||
# The debugfs tool doesn't terminate on error nor exit with a non-zero status. Check manually.
|
||||
if egrep -q 'Could not allocate|File not found' errorlog; then
|
||||
cat errorlog
|
||||
echo "--- Failed to create EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks) ---"
|
||||
return 1
|
||||
fi
|
||||
'';
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
/* Terrible backward compatibility hack to get the path to Nixpkgs
|
||||
from here. Usually, that's the relative path ‘../..’. However,
|
||||
when using the NixOS channel, <nixos> resolves to a symlink to
|
||||
nixpkgs/nixos, so ‘../..’ doesn't resolve to the top-level Nixpkgs
|
||||
directory but one above it. So check for that situation. */
|
||||
if builtins.pathExists ../../.version then import ../..
|
||||
else if builtins.pathExists ../../nixpkgs then import ../../nixpkgs
|
||||
else abort "Can't find Nixpkgs, please set ‘NIX_PATH=nixpkgs=/path/to/nixpkgs’."
|
@ -15,6 +15,8 @@ rec {
|
||||
|
||||
unpackPhase = "true";
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
installPhase =
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
|
@ -1,59 +1,125 @@
|
||||
{ configuration ? import ../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>
|
||||
|
||||
# []: display all options
|
||||
# [<option names>]: display the selected options
|
||||
, displayOptions ? [
|
||||
"hardware.pcmcia.enable"
|
||||
"environment.systemPackages"
|
||||
"boot.kernelModules"
|
||||
"services.udev.packages"
|
||||
"jobs"
|
||||
"environment.etc"
|
||||
"system.activationScripts"
|
||||
]
|
||||
# provide an option name, as a string literal.
|
||||
, testOption ? null
|
||||
|
||||
# provide a list of option names, as string literals.
|
||||
, testOptions ? [ ]
|
||||
}:
|
||||
|
||||
# This file is used to generate a dot graph which contains all options and
|
||||
# there dependencies to track problems and their sources.
|
||||
# This file is made to be used as follow:
|
||||
#
|
||||
# $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
|
||||
#
|
||||
# or
|
||||
#
|
||||
# $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
|
||||
#
|
||||
# otther target exists such as, `dotContent`, `dot`, and `pdf`. If you are
|
||||
# looking for the option usage of multiple options, you can provide a list
|
||||
# as argument.
|
||||
#
|
||||
# $ nix-build ./option-usage.nix --arg testOptions \
|
||||
# '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
|
||||
# -A txt -o gummiboot.list
|
||||
#
|
||||
# Note, this script is slow as it has to evaluate all options of the system
|
||||
# once per queried option.
|
||||
#
|
||||
# This nix expression works by doing a first evaluation, which evaluates the
|
||||
# result of every option.
|
||||
#
|
||||
# Then, for each queried option, we evaluate the NixOS modules a second
|
||||
# time, except that we replace the `config` argument of all the modules with
|
||||
# the result of the original evaluation, except for the tested option which
|
||||
# value is replaced by a `throw` statement which is caught by the `tryEval`
|
||||
# evaluation of each option value.
|
||||
#
|
||||
# We then compare the result of the evluation of the original module, with
|
||||
# the result of the second evaluation, and consider that the new failures are
|
||||
# caused by our mutation of the `config` argument.
|
||||
#
|
||||
# Doing so returns all option results which are directly using the
|
||||
# tested option result.
|
||||
|
||||
with import ../../lib;
|
||||
|
||||
let
|
||||
|
||||
evalFun = {
|
||||
extraArgs ? {}
|
||||
specialArgs ? {}
|
||||
}: import ../lib/eval-config.nix {
|
||||
modules = [ configuration ];
|
||||
inherit extraArgs;
|
||||
inherit specialArgs;
|
||||
};
|
||||
|
||||
eval = evalFun {};
|
||||
inherit (eval) pkgs;
|
||||
|
||||
reportNewFailures = old: new: with pkgs.lib;
|
||||
excludedTestOptions = [
|
||||
# We cannot evluate _module.args, as it is used during the computation
|
||||
# of the modules list.
|
||||
"_module.args"
|
||||
|
||||
# For some reasons which we yet have to investigate, some options cannot
|
||||
# be replaced by a throw without cuasing a non-catchable failure.
|
||||
"networking.bonds"
|
||||
"networking.bridges"
|
||||
"networking.interfaces"
|
||||
"networking.macvlans"
|
||||
"networking.sits"
|
||||
"networking.vlans"
|
||||
"services.openssh.startWhenNeeded"
|
||||
];
|
||||
|
||||
# for some reasons which we yet have to investigate, some options are
|
||||
# time-consuming to compute, thus we filter them out at the moment.
|
||||
excludedOptions = [
|
||||
"boot.systemd.services"
|
||||
"systemd.services"
|
||||
"environment.gnome3.packageSet"
|
||||
"kde.extraPackages"
|
||||
];
|
||||
excludeOptions = list:
|
||||
filter (opt: !(elem (showOption opt.loc) excludedOptions)) list;
|
||||
|
||||
|
||||
reportNewFailures = old: new:
|
||||
let
|
||||
filterChanges =
|
||||
filter ({fst, snd}:
|
||||
!(fst.config.success -> snd.config.success)
|
||||
!(fst.success -> snd.success)
|
||||
);
|
||||
|
||||
keepNames =
|
||||
map ({fst, snd}:
|
||||
assert fst.name == snd.name; snd.name
|
||||
/* assert fst.name == snd.name; */ snd.name
|
||||
);
|
||||
|
||||
# Use tryEval (strict ...) to know if there is any failure while
|
||||
# evaluating the option value.
|
||||
#
|
||||
# Note, the `strict` function is not strict enough, but using toXML
|
||||
# builtins multiply by 4 the memory usage and the time used to compute
|
||||
# each options.
|
||||
tryCollectOptions = moduleResult:
|
||||
flip map (excludeOptions (collect isOption moduleResult)) (opt:
|
||||
{ name = showOption opt.loc; } // builtins.tryEval (strict opt.value));
|
||||
in
|
||||
keepNames (
|
||||
filterChanges (
|
||||
zipLists (collect isOption old) (collect isOption new)
|
||||
zipLists (tryCollectOptions old) (tryCollectOptions new)
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
# Create a list of modules where each module contains only one failling
|
||||
# options.
|
||||
introspectionModules = with pkgs.lib;
|
||||
introspectionModules =
|
||||
let
|
||||
setIntrospection = opt: rec {
|
||||
name = opt.name;
|
||||
path = splitString "." opt.name;
|
||||
name = showOption opt.loc;
|
||||
path = opt.loc;
|
||||
config = setAttrByPath path
|
||||
(throw "Usage introspection of '${name}' by forced failure.");
|
||||
};
|
||||
@ -61,39 +127,67 @@ let
|
||||
map setIntrospection (collect isOption eval.options);
|
||||
|
||||
overrideConfig = thrower:
|
||||
pkgs.lib.recursiveUpdateUntil (path: old: new:
|
||||
recursiveUpdateUntil (path: old: new:
|
||||
path == thrower.path
|
||||
) eval.config thrower.config;
|
||||
|
||||
|
||||
graph = with pkgs.lib;
|
||||
graph =
|
||||
map (thrower: {
|
||||
option = thrower.name;
|
||||
usedBy = reportNewFailures eval.options (evalFun {
|
||||
extraArgs = {
|
||||
config = overrideConfig thrower;
|
||||
};
|
||||
}).options;
|
||||
usedBy = assert __trace "Investigate ${thrower.name}" true;
|
||||
reportNewFailures eval.options (evalFun {
|
||||
specialArgs = {
|
||||
config = overrideConfig thrower;
|
||||
};
|
||||
}).options;
|
||||
}) introspectionModules;
|
||||
|
||||
graphToDot = graph: with pkgs.lib; ''
|
||||
displayOptionsGraph =
|
||||
let
|
||||
checkList =
|
||||
if !(isNull testOption) then [ testOption ]
|
||||
else testOptions;
|
||||
checkAll = checkList == [];
|
||||
in
|
||||
flip filter graph ({option, usedBy}:
|
||||
(checkAll || elem option checkList)
|
||||
&& !(elem option excludedTestOptions)
|
||||
);
|
||||
|
||||
graphToDot = graph: ''
|
||||
digraph "Option Usages" {
|
||||
${concatMapStrings ({option, usedBy}:
|
||||
assert __trace option true;
|
||||
if displayOptions == [] || elem option displayOptions then
|
||||
concatMapStrings (user: ''
|
||||
"${option}" -> "${user}"''
|
||||
) usedBy
|
||||
else ""
|
||||
) graph}
|
||||
concatMapStrings (user: ''
|
||||
"${option}" -> "${user}"''
|
||||
) usedBy
|
||||
) displayOptionsGraph}
|
||||
}
|
||||
'';
|
||||
|
||||
graphToText = graph:
|
||||
concatMapStrings ({option, usedBy}:
|
||||
concatMapStrings (user: ''
|
||||
${user}
|
||||
'') usedBy
|
||||
) displayOptionsGraph;
|
||||
|
||||
in
|
||||
|
||||
pkgs.texFunctions.dot2pdf {
|
||||
dotGraph = pkgs.writeTextFile {
|
||||
rec {
|
||||
dotContent = graphToDot graph;
|
||||
dot = pkgs.writeTextFile {
|
||||
name = "option_usages.dot";
|
||||
text = graphToDot graph;
|
||||
text = dotContent;
|
||||
};
|
||||
|
||||
pdf = pkgs.texFunctions.dot2pdf {
|
||||
dotGraph = dot;
|
||||
};
|
||||
|
||||
txtContent = graphToText graph;
|
||||
txt = pkgs.writeTextFile {
|
||||
name = "option_usages.txt";
|
||||
text = txtContent;
|
||||
};
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ with lib;
|
||||
config =
|
||||
let fontconfig = config.fonts.fontconfig;
|
||||
fcBool = x: "<bool>" + (if x then "true" else "false") + "</bool>";
|
||||
nixosConf = ''
|
||||
renderConf = ''
|
||||
<?xml version='1.0'?>
|
||||
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
|
||||
<fontconfig>
|
||||
@ -169,6 +169,21 @@ with lib;
|
||||
</edit>
|
||||
</match>
|
||||
|
||||
${optionalString (fontconfig.dpi != 0) ''
|
||||
<match target="pattern">
|
||||
<edit name="dpi" mode="assign">
|
||||
<double>${toString fontconfig.dpi}</double>
|
||||
</edit>
|
||||
</match>
|
||||
''}
|
||||
|
||||
</fontconfig>
|
||||
'';
|
||||
genericAliasConf = ''
|
||||
<?xml version='1.0'?>
|
||||
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
|
||||
<fontconfig>
|
||||
|
||||
<!-- Default fonts -->
|
||||
${optionalString (fontconfig.defaultFonts.sansSerif != []) ''
|
||||
<alias>
|
||||
@ -201,14 +216,6 @@ with lib;
|
||||
</alias>
|
||||
''}
|
||||
|
||||
${optionalString (fontconfig.dpi != 0) ''
|
||||
<match target="pattern">
|
||||
<edit name="dpi" mode="assign">
|
||||
<double>${toString fontconfig.dpi}</double>
|
||||
</edit>
|
||||
</match>
|
||||
''}
|
||||
|
||||
</fontconfig>
|
||||
'';
|
||||
in mkIf fontconfig.enable {
|
||||
@ -219,7 +226,8 @@ with lib;
|
||||
environment.etc."fonts/fonts.conf".source =
|
||||
pkgs.makeFontsConf { fontconfig = pkgs.fontconfig_210; fontDirectories = config.fonts.fonts; };
|
||||
|
||||
environment.etc."fonts/conf.d/98-nixos.conf".text = nixosConf;
|
||||
environment.etc."fonts/conf.d/10-nixos-rendering.conf".text = renderConf;
|
||||
environment.etc."fonts/conf.d/60-nixos-generic-alias.conf".text = genericAliasConf;
|
||||
|
||||
# Versioned fontconfig > 2.10. Take shared fonts.conf from fontconfig.
|
||||
# Otherwise specify only font directories.
|
||||
@ -236,7 +244,8 @@ with lib;
|
||||
</fontconfig>
|
||||
'';
|
||||
|
||||
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/98-nixos.conf".text = nixosConf;
|
||||
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/10-nixos-rendering.conf".text = renderConf;
|
||||
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/60-nixos-generic-alias.conf".text = genericAliasConf;
|
||||
|
||||
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/99-user.conf" = {
|
||||
enable = fontconfig.includeUserConf;
|
||||
|
@ -45,6 +45,7 @@ let
|
||||
pkgs.strace
|
||||
pkgs.su
|
||||
pkgs.time
|
||||
pkgs.texinfoInteractive
|
||||
pkgs.utillinux
|
||||
extraManpages
|
||||
];
|
||||
@ -57,7 +58,7 @@ in
|
||||
environment = {
|
||||
|
||||
systemPackages = mkOption {
|
||||
type = types.listOf types.path;
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
example = literalExample "[ pkgs.firefox pkgs.thunderbird ]";
|
||||
description = ''
|
||||
@ -105,12 +106,14 @@ in
|
||||
"/lib"
|
||||
"/man"
|
||||
"/sbin"
|
||||
"/share/doc"
|
||||
"/share/emacs"
|
||||
"/share/vim-plugins"
|
||||
"/share/org"
|
||||
"/share/info"
|
||||
"/share/terminfo"
|
||||
"/share/man"
|
||||
"/share/nano"
|
||||
"/share/org"
|
||||
"/share/terminfo"
|
||||
"/share/vim-plugins"
|
||||
];
|
||||
|
||||
system.path = pkgs.buildEnv {
|
||||
@ -136,6 +139,13 @@ in
|
||||
if [ -x $out/bin/update-desktop-database -a -w $out/share/applications ]; then
|
||||
$out/bin/update-desktop-database $out/share/applications
|
||||
fi
|
||||
|
||||
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
|
||||
shopt -s nullglob
|
||||
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
|
||||
$out/bin/install-info $i $out/share/info/dir
|
||||
done
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -26,6 +26,7 @@ in
|
||||
|
||||
hardwareClockInLocalTime = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "If set, keep the hardware clock in local time instead of UTC.";
|
||||
};
|
||||
|
||||
|
@ -216,7 +216,7 @@ let
|
||||
exist. If <option>users.mutableUsers</option> is true, the
|
||||
password can be changed subsequently using the
|
||||
<command>passwd</command> command. Otherwise, it's
|
||||
equivalent to setting the <option>password</option> option.
|
||||
equivalent to setting the <option>hashedPassword</option> option.
|
||||
|
||||
${hashedPasswordDescription}
|
||||
'';
|
||||
@ -336,13 +336,13 @@ let
|
||||
map (range: "${user.name}:${toString range.startUid}:${toString range.count}\n")
|
||||
user.subUidRanges);
|
||||
|
||||
subuidFile = concatStrings (map mkSubuidEntry (attrValues cfg.extraUsers));
|
||||
subuidFile = concatStrings (map mkSubuidEntry (attrValues cfg.users));
|
||||
|
||||
mkSubgidEntry = user: concatStrings (
|
||||
map (range: "${user.name}:${toString range.startGid}:${toString range.count}\n")
|
||||
user.subGidRanges);
|
||||
|
||||
subgidFile = concatStrings (map mkSubgidEntry (attrValues cfg.extraUsers));
|
||||
subgidFile = concatStrings (map mkSubgidEntry (attrValues cfg.users));
|
||||
|
||||
idsAreUnique = set: idAttr: !(fold (name: args@{ dup, acc }:
|
||||
let
|
||||
@ -354,8 +354,8 @@ let
|
||||
else { dup = false; acc = newAcc; }
|
||||
) { dup = false; acc = {}; } (builtins.attrNames set)).dup;
|
||||
|
||||
uidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) cfg.extraUsers) "uid";
|
||||
gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.extraGroups) "gid";
|
||||
uidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) cfg.users) "uid";
|
||||
gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.groups) "gid";
|
||||
|
||||
spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
|
||||
inherit (cfg) mutableUsers;
|
||||
@ -364,13 +364,13 @@ let
|
||||
name uid group description home shell createHome isSystemUser
|
||||
password passwordFile hashedPassword
|
||||
initialPassword initialHashedPassword;
|
||||
}) cfg.extraUsers;
|
||||
}) cfg.users;
|
||||
groups = mapAttrsToList (n: g:
|
||||
{ inherit (g) name gid;
|
||||
members = g.members ++ (mapAttrsToList (n: u: u.name) (
|
||||
filterAttrs (n: u: elem g.name u.extraGroups) cfg.extraUsers
|
||||
filterAttrs (n: u: elem g.name u.extraGroups) cfg.users
|
||||
));
|
||||
}) cfg.extraGroups;
|
||||
}) cfg.groups;
|
||||
});
|
||||
|
||||
in {
|
||||
@ -388,10 +388,10 @@ in {
|
||||
<literal>groupadd</literal> commands. On system activation, the
|
||||
existing contents of the <literal>/etc/passwd</literal> and
|
||||
<literal>/etc/group</literal> files will be merged with the
|
||||
contents generated from the <literal>users.extraUsers</literal> and
|
||||
<literal>users.extraGroups</literal> options.
|
||||
contents generated from the <literal>users.users</literal> and
|
||||
<literal>users.groups</literal> options.
|
||||
The initial password for a user will be set
|
||||
according to <literal>users.extraUsers</literal>, but existing passwords
|
||||
according to <literal>users.users</literal>, but existing passwords
|
||||
will not be changed.
|
||||
|
||||
<warning><para>
|
||||
@ -399,7 +399,7 @@ in {
|
||||
group files will simply be replaced on system activation. This also
|
||||
holds for the user passwords; all changed
|
||||
passwords will be reset according to the
|
||||
<literal>users.extraUsers</literal> configuration on activation.
|
||||
<literal>users.users</literal> configuration on activation.
|
||||
</para></warning>
|
||||
'';
|
||||
};
|
||||
@ -412,7 +412,7 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
users.extraUsers = mkOption {
|
||||
users.users = mkOption {
|
||||
default = {};
|
||||
type = types.loaOf types.optionSet;
|
||||
example = {
|
||||
@ -433,7 +433,7 @@ in {
|
||||
options = [ userOpts ];
|
||||
};
|
||||
|
||||
users.extraGroups = mkOption {
|
||||
users.groups = mkOption {
|
||||
default = {};
|
||||
example =
|
||||
{ students.gid = 1001;
|
||||
@ -461,7 +461,7 @@ in {
|
||||
|
||||
config = {
|
||||
|
||||
users.extraUsers = {
|
||||
users.users = {
|
||||
root = {
|
||||
uid = ids.uids.root;
|
||||
description = "System administrator";
|
||||
@ -478,7 +478,7 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
users.extraGroups = {
|
||||
users.groups = {
|
||||
root.gid = ids.gids.root;
|
||||
wheel.gid = ids.gids.wheel;
|
||||
disk.gid = ids.gids.disk;
|
||||
@ -525,6 +525,27 @@ in {
|
||||
{ assertion = !cfg.enforceIdUniqueness || (uidsAreUnique && gidsAreUnique);
|
||||
message = "UIDs and GIDs must be unique!";
|
||||
}
|
||||
{ # If mutableUsers is false, to prevent users creating a
|
||||
# configuration that locks them out of the system, ensure that
|
||||
# there is at least one "privileged" account that has a
|
||||
# password or an SSH authorized key. Privileged accounts are
|
||||
# root and users in the wheel group.
|
||||
assertion = !cfg.mutableUsers ->
|
||||
any id (mapAttrsToList (name: cfg:
|
||||
(name == "root"
|
||||
|| cfg.group == "wheel"
|
||||
|| elem "wheel" cfg.extraGroups)
|
||||
&&
|
||||
((cfg.hashedPassword != null && cfg.hashedPassword != "!")
|
||||
|| cfg.password != null
|
||||
|| cfg.passwordFile != null
|
||||
|| cfg.openssh.authorizedKeys.keys != []
|
||||
|| cfg.openssh.authorizedKeys.keyFiles != [])
|
||||
) cfg.users);
|
||||
message = ''
|
||||
Neither the root account nor any wheel user has a password or SSH authorized key.
|
||||
You must set one to prevent being locked out of your system.'';
|
||||
}
|
||||
];
|
||||
|
||||
};
|
||||
|
@ -22,9 +22,7 @@ with lib;
|
||||
###### implementation
|
||||
|
||||
config = mkIf config.hardware.enableAllFirmware {
|
||||
hardware.firmware = [
|
||||
"${pkgs.firmwareLinuxNonfree}/lib/firmware"
|
||||
];
|
||||
hardware.firmware = [ pkgs.firmwareLinuxNonfree ];
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -52,9 +52,9 @@ in
|
||||
systemd.services.bumblebeed = {
|
||||
description = "Bumblebee Hybrid Graphics Switcher";
|
||||
wantedBy = [ "display-manager.service" ];
|
||||
script = "bumblebeed --use-syslog -g ${config.hardware.bumblebee.group}";
|
||||
path = [ kernel.bbswitch bumblebee ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${config.hardware.bumblebee.group}";
|
||||
Restart = "always";
|
||||
RestartSec = 60;
|
||||
CPUSchedulingPolicy = "idle";
|
||||
|
@ -5,11 +5,11 @@ let
|
||||
in
|
||||
|
||||
{
|
||||
boot.extraModulePackages = [wis_go7007];
|
||||
boot.extraModulePackages = [ wis_go7007 ];
|
||||
|
||||
environment.systemPackages = [wis_go7007];
|
||||
environment.systemPackages = [ wis_go7007 ];
|
||||
|
||||
hardware.firmware = ["${wis_go7007}/firmware"];
|
||||
hardware.firmware = [ wis_go7007 ];
|
||||
|
||||
services.udev.packages = [wis_go7007];
|
||||
services.udev.packages = [ wis_go7007 ];
|
||||
}
|
||||
|
@ -9,18 +9,17 @@ let
|
||||
|
||||
# We need a copy of the Nix expressions for Nixpkgs and NixOS on the
|
||||
# CD. These are installed into the "nixos" channel of the root
|
||||
# user, as expected by nixos-rebuild/nixos-install.
|
||||
# user, as expected by nixos-rebuild/nixos-install. FIXME: merge
|
||||
# with make-channel.nix.
|
||||
channelSources = pkgs.runCommand "nixos-${config.system.nixosVersion}"
|
||||
{ expr = readFile ../../../lib/channel-expr.nix; }
|
||||
{ }
|
||||
''
|
||||
mkdir -p $out/nixos
|
||||
cp -prd ${pkgs.path} $out/nixos/nixpkgs
|
||||
ln -s nixpkgs/nixos $out/nixos/nixos
|
||||
mkdir -p $out
|
||||
cp -prd ${pkgs.path} $out/nixos
|
||||
chmod -R u+w $out/nixos
|
||||
rm -rf $out/nixos/nixpkgs/.git
|
||||
echo -n ${config.system.nixosVersion} > $out/nixos/nixpkgs/.version
|
||||
echo -n "" > $out/nixos/nixpkgs/.version-suffix
|
||||
echo "$expr" > $out/nixos/default.nix
|
||||
ln -s . $out/nixos/nixpkgs
|
||||
rm -rf $out/nixos/.git
|
||||
echo -n ${config.system.nixosVersionSuffix} > $out/nixos/.version-suffix
|
||||
'';
|
||||
|
||||
in
|
||||
|
@ -30,8 +30,7 @@ let
|
||||
# * COM32 entries (chainload, reboot, poweroff) are not recognized. They
|
||||
# result in incorrect boot entries.
|
||||
|
||||
baseIsolinuxCfg =
|
||||
''
|
||||
baseIsolinuxCfg = ''
|
||||
SERIAL 0 38400
|
||||
TIMEOUT ${builtins.toString syslinuxTimeout}
|
||||
UI vesamenu.c32
|
||||
@ -44,7 +43,7 @@ let
|
||||
LINUX /boot/bzImage
|
||||
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
|
||||
INITRD /boot/initrd
|
||||
'';
|
||||
'';
|
||||
|
||||
isolinuxMemtest86Entry = ''
|
||||
LABEL memtest
|
||||
@ -55,12 +54,12 @@ let
|
||||
|
||||
isolinuxCfg = baseIsolinuxCfg + (optionalString config.boot.loader.grub.memtest86.enable isolinuxMemtest86Entry);
|
||||
|
||||
# The efi boot image
|
||||
# The EFI boot image.
|
||||
efiDir = pkgs.runCommand "efi-directory" {} ''
|
||||
mkdir -p $out/EFI/boot
|
||||
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
|
||||
mkdir -p $out/loader/entries
|
||||
echo "title NixOS LiveCD" > $out/loader/entries/nixos-livecd.conf
|
||||
echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
|
||||
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
|
||||
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
|
||||
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
|
||||
@ -218,6 +217,8 @@ in
|
||||
system.boot.loader.kernelFile = "bzImage";
|
||||
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi pkgs.syslinux ];
|
||||
|
||||
boot.consoleLogLevel = mkDefault 7;
|
||||
|
||||
# In stage 1 of the boot, mount the CD as the root FS by label so
|
||||
# that we don't need to know its device. We pass the label of the
|
||||
# root filesystem on the kernel command line, rather than in
|
||||
@ -229,6 +230,7 @@ in
|
||||
boot.kernelParams =
|
||||
[ "root=LABEL=${config.isoImage.volumeID}"
|
||||
"boot.shell_on_fail"
|
||||
"nomodeset"
|
||||
];
|
||||
|
||||
fileSystems."/" =
|
||||
@ -268,6 +270,8 @@ in
|
||||
|
||||
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" "usb-storage" ];
|
||||
|
||||
boot.blacklistedKernelModules = [ "nouveau" ];
|
||||
|
||||
boot.initrd.kernelModules = [ "loop" ];
|
||||
|
||||
# Closures to be copied to the Nix store on the CD, namely the init
|
||||
|
@ -0,0 +1,40 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
extlinux-conf-builder =
|
||||
import ../../system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix {
|
||||
inherit pkgs;
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../profiles/minimal.nix
|
||||
../../profiles/installation-device.nix
|
||||
./sd-image.nix
|
||||
];
|
||||
|
||||
assertions = lib.singleton {
|
||||
assertion = pkgs.stdenv.system == "armv7l-linux";
|
||||
message = "sd-image-armv7l-multiplatform.nix can be only built natively on ARMv7; " +
|
||||
"it cannot be cross compiled";
|
||||
};
|
||||
|
||||
boot.loader.grub.enable = false;
|
||||
boot.loader.generic-extlinux-compatible.enable = true;
|
||||
|
||||
# FIXME: change this to linuxPackages_latest once v4.2 is out
|
||||
boot.kernelPackages = pkgs.linuxPackages_testing;
|
||||
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"];
|
||||
|
||||
# FIXME: fix manual evaluation on ARM
|
||||
services.nixosManual.enable = lib.mkOverride 0 false;
|
||||
|
||||
# FIXME: this probably should be in installation-device.nix
|
||||
users.extraUsers.root.initialHashedPassword = "";
|
||||
|
||||
sdImage = {
|
||||
populateBootCommands = ''
|
||||
${extlinux-conf-builder} -t 3 -c ${config.system.build.toplevel} -d ./boot
|
||||
'';
|
||||
};
|
||||
}
|
46
nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
Normal file
46
nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
Normal file
@ -0,0 +1,46 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
extlinux-conf-builder =
|
||||
import ../../system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix {
|
||||
inherit pkgs;
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../profiles/minimal.nix
|
||||
../../profiles/installation-device.nix
|
||||
./sd-image.nix
|
||||
];
|
||||
|
||||
assertions = lib.singleton {
|
||||
assertion = pkgs.stdenv.system == "armv6l-linux";
|
||||
message = "sd-image-raspberrypi.nix can be only built natively on ARMv6; " +
|
||||
"it cannot be cross compiled";
|
||||
};
|
||||
|
||||
# Needed by RPi firmware
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
boot.loader.grub.enable = false;
|
||||
boot.loader.generic-extlinux-compatible.enable = true;
|
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_rpi;
|
||||
|
||||
# FIXME: fix manual evaluation on ARM
|
||||
services.nixosManual.enable = lib.mkOverride 0 false;
|
||||
|
||||
# FIXME: this probably should be in installation-device.nix
|
||||
users.extraUsers.root.initialHashedPassword = "";
|
||||
|
||||
sdImage = {
|
||||
populateBootCommands = ''
|
||||
for f in bootcode.bin fixup.dat start.elf; do
|
||||
cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/$f boot/
|
||||
done
|
||||
cp ${pkgs.ubootRaspberryPi}/u-boot.bin boot/u-boot-rpi.bin
|
||||
echo 'kernel u-boot-rpi.bin' > boot/config.txt
|
||||
${extlinux-conf-builder} -t 3 -c ${config.system.build.toplevel} -d ./boot
|
||||
'';
|
||||
};
|
||||
}
|
127
nixos/modules/installer/cd-dvd/sd-image.nix
Normal file
127
nixos/modules/installer/cd-dvd/sd-image.nix
Normal file
@ -0,0 +1,127 @@
|
||||
# This module creates a bootable SD card image containing the given NixOS
|
||||
# configuration. The generated image is MBR partitioned, with a FAT /boot
|
||||
# partition, and ext4 root partition. The generated image is sized to fit
|
||||
# its contents, and a boot script automatically resizes the root partition
|
||||
# to fit the device on the first boot.
|
||||
#
|
||||
# The derivation for the SD image will be placed in
|
||||
# config.system.build.sdImage
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
rootfsImage = import ../../../lib/make-ext4-fs.nix {
|
||||
inherit pkgs;
|
||||
inherit (config.sdImage) storePaths;
|
||||
volumeLabel = "NIXOS_SD";
|
||||
};
|
||||
in
|
||||
{
|
||||
options.sdImage = {
|
||||
storePaths = mkOption {
|
||||
type = with types; listOf package;
|
||||
example = literalExample "[ pkgs.stdenv ]";
|
||||
description = ''
|
||||
Derivations to be included in the Nix store in the generated SD image.
|
||||
'';
|
||||
};
|
||||
|
||||
bootSize = mkOption {
|
||||
type = types.int;
|
||||
default = 128;
|
||||
description = ''
|
||||
Size of the /boot partition, in megabytes.
|
||||
'';
|
||||
};
|
||||
|
||||
populateBootCommands = mkOption {
|
||||
example = literalExample "'' cp \${pkgs.myBootLoader}/u-boot.bin boot/ ''";
|
||||
description = ''
|
||||
Shell commands to populate the ./boot directory.
|
||||
All files in that directory are copied to the
|
||||
/boot partition on the SD image.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
fileSystems = {
|
||||
"/boot" = {
|
||||
device = "/dev/disk/by-label/NIXOS_BOOT";
|
||||
fsType = "vfat";
|
||||
};
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/NIXOS_SD";
|
||||
fsType = "ext4";
|
||||
};
|
||||
};
|
||||
|
||||
sdImage.storePaths = [ config.system.build.toplevel ];
|
||||
|
||||
system.build.sdImage = pkgs.stdenv.mkDerivation {
|
||||
name = "sd-image-${pkgs.stdenv.system}.img";
|
||||
|
||||
buildInputs = with pkgs; [ dosfstools e2fsprogs mtools libfaketime utillinux ];
|
||||
|
||||
buildCommand = ''
|
||||
# Create the image file sized to fit /boot and /, plus 4M of slack
|
||||
rootSizeBlocks=$(du -B 512 --apparent-size ${rootfsImage} | awk '{ print $1 }')
|
||||
bootSizeBlocks=$((${toString config.sdImage.bootSize} * 1024 * 1024 / 512))
|
||||
imageSize=$((rootSizeBlocks * 512 + bootSizeBlocks * 512 + 4096 * 1024))
|
||||
truncate -s $imageSize $out
|
||||
|
||||
# type=b is 'W95 FAT32', type=83 is 'Linux'.
|
||||
sfdisk $out <<EOF
|
||||
label: dos
|
||||
label-id: 0x2178694e
|
||||
|
||||
start=1M, size=$bootSizeBlocks, type=b, bootable
|
||||
type=83
|
||||
EOF
|
||||
|
||||
# Copy the rootfs into the SD image
|
||||
eval $(partx $out -o START,SECTORS --nr 2 --pairs)
|
||||
dd conv=notrunc if=${rootfsImage} of=$out seek=$START count=$SECTORS
|
||||
|
||||
# Create a FAT32 /boot partition of suitable size into bootpart.img
|
||||
eval $(partx $out -o START,SECTORS --nr 1 --pairs)
|
||||
truncate -s $((SECTORS * 512)) bootpart.img
|
||||
faketime "1970-01-01 00:00:00" mkfs.vfat -i 0x2178694e -n NIXOS_BOOT bootpart.img
|
||||
|
||||
# Populate the files intended for /boot
|
||||
mkdir boot
|
||||
${config.sdImage.populateBootCommands}
|
||||
|
||||
# Copy the populated /boot into the SD image
|
||||
(cd boot; mcopy -bpsvm -i ../bootpart.img ./* ::)
|
||||
dd conv=notrunc if=bootpart.img of=$out seek=$START count=$SECTORS
|
||||
'';
|
||||
};
|
||||
|
||||
boot.postBootCommands = ''
|
||||
# On the first boot do some maintenance tasks
|
||||
if [ -f /nix-path-registration ]; then
|
||||
# Figure out device names for the boot device and root filesystem.
|
||||
rootPart=$(readlink -f /dev/disk/by-label/NIXOS_SD)
|
||||
bootDevice=$(lsblk -npo PKNAME $rootPart)
|
||||
|
||||
# Resize the root partition and the filesystem to fit the disk
|
||||
echo ",+," | sfdisk -N2 --no-reread $bootDevice
|
||||
${pkgs.parted}/bin/partprobe
|
||||
${pkgs.e2fsprogs}/bin/resize2fs $rootPart
|
||||
|
||||
# Register the contents of the initial Nix store
|
||||
${config.nix.package}/bin/nix-store --load-db < /nix-path-registration
|
||||
|
||||
# nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag.
|
||||
touch /etc/NIXOS
|
||||
${config.nix.package}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
|
||||
|
||||
# Prevents this from running on later boots.
|
||||
rm -f /nix-path-registration
|
||||
fi
|
||||
'';
|
||||
};
|
||||
}
|
81
nixos/modules/installer/tools/auto-upgrade.nix
Normal file
81
nixos/modules/installer/tools/auto-upgrade.nix
Normal file
@ -0,0 +1,81 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let cfg = config.system.autoUpgrade; in
|
||||
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
system.autoUpgrade = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to periodically upgrade NixOS to the latest
|
||||
version. If enabled, a systemd timer will run
|
||||
<literal>nixos-rebuild switch --upgrade</literal> once a
|
||||
day.
|
||||
'';
|
||||
};
|
||||
|
||||
channel = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = https://nixos.org/channels/nixos-14.12-small;
|
||||
description = ''
|
||||
The URI of the NixOS channel to use for automatic
|
||||
upgrades. By default, this is the channel set using
|
||||
<command>nix-channel</command> (run <literal>nix-channel
|
||||
--list</literal> to see the current value).
|
||||
'';
|
||||
};
|
||||
|
||||
flags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "-I" "stuff=/home/alice/nixos-stuff" "--option" "extra-binary-caches" "http://my-cache.example.org/" ];
|
||||
description = ''
|
||||
Any additional flags passed to <command>nixos-rebuild</command>.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
system.autoUpgrade.flags =
|
||||
[ "--no-build-output" ]
|
||||
++ (if cfg.channel == null
|
||||
then [ "--upgrade" ]
|
||||
else [ "-I" "nixpkgs=${cfg.channel}/nixexprs.tar.xz" ]);
|
||||
|
||||
systemd.services.nixos-upgrade = {
|
||||
description = "NixOS Upgrade";
|
||||
|
||||
restartIfChanged = false;
|
||||
unitConfig.X-StopOnRemoval = false;
|
||||
|
||||
serviceConfig.Type = "oneshot";
|
||||
|
||||
environment = config.nix.envVars //
|
||||
{ inherit (config.environment.sessionVariables) NIX_PATH SSL_CERT_FILE;
|
||||
HOME = "/root";
|
||||
};
|
||||
|
||||
path = [ pkgs.gnutar pkgs.xz config.nix.package ];
|
||||
|
||||
script = ''
|
||||
${config.system.build.nixos-rebuild}/bin/nixos-rebuild test ${toString cfg.flags}
|
||||
'';
|
||||
|
||||
startAt = mkIf cfg.enable "04:40";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -53,5 +53,5 @@ fi
|
||||
|
||||
# Build a network of VMs
|
||||
|
||||
nix-build '<nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
|
||||
nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
|
||||
--argstr networkExpr $networkExpr $noOutLinkArg $showTraceArg
|
||||
|
@ -27,7 +27,7 @@ let
|
||||
|
||||
if [ -z "$(type -P git)" ]; then
|
||||
echo "installing Git..."
|
||||
nix-env -iA nixos.pkgs.git || nix-env -i git
|
||||
nix-env -iA nixos.git
|
||||
fi
|
||||
|
||||
# Move any old nixpkgs directories out of the way.
|
||||
|
@ -235,7 +235,7 @@ chomp $virt;
|
||||
# Check if we're a VirtualBox guest. If so, enable the guest
|
||||
# additions.
|
||||
if ($virt eq "oracle") {
|
||||
push @attrs, "services.virtualboxGuest.enable = true;"
|
||||
push @attrs, "virtualisation.virtualbox.guest.enable = true;"
|
||||
}
|
||||
|
||||
|
||||
@ -544,6 +544,9 @@ $bootLoaderConfig
|
||||
# uid = 1000;
|
||||
# };
|
||||
|
||||
# The NixOS release to be compatible with for stateful data such as databases.
|
||||
system.stateVersion = "@nixosRelease@";
|
||||
|
||||
}
|
||||
EOF
|
||||
} else {
|
||||
|
@ -256,14 +256,8 @@ NIXOS_INSTALL_GRUB=1 chroot $mountPoint \
|
||||
chroot $mountPoint /nix/var/nix/profiles/system/activate
|
||||
|
||||
|
||||
# Some systems may not be prepared to use NixOS' paths.
|
||||
export PATH=/run/current-system/sw/bin:/run/current-system/sw/sbin:$PATH
|
||||
export NIX_PATH=/nix/var/nix/profiles/per-user/root/channels/nixos:nixpkgs=/etc/nixos/nixpkgs
|
||||
export NIX_PATH=$NIX_PATH:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
|
||||
|
||||
|
||||
# Ask the user to set a root password.
|
||||
if [ "$(chroot $mountPoint nix-instantiate --eval '<nixpkgs/nixos>' -A config.users.mutableUsers)" = true ] && [ -t 0 ] ; then
|
||||
if [ "$(chroot $mountPoint /run/current-system/sw/bin/sh -l -c "nix-instantiate --eval '<nixpkgs/nixos>' -A config.users.mutableUsers")" = true ] && [ -t 0 ] ; then
|
||||
echo "setting root password..."
|
||||
chroot $mountPoint /var/setuid-wrappers/passwd
|
||||
fi
|
||||
|
@ -157,9 +157,9 @@ if [ -n "$buildNix" ]; then
|
||||
if ! nix-build '<nixpkgs>' -A nix -o $tmpDir/nix "${extraBuildFlags[@]}" > /dev/null; then
|
||||
machine="$(uname -m)"
|
||||
if [ "$machine" = x86_64 ]; then
|
||||
nixStorePath=/nix/store/ffig6yaggbh12dh9y5pnf1grf5lqyipz-nix-1.8
|
||||
nixStorePath=/nix/store/664kxr14kfgx4dl095crvmr7pbh9xlh5-nix-1.9
|
||||
elif [[ "$machine" =~ i.86 ]]; then
|
||||
nixStorePath=/nix/store/lglhfp4mimfa5wzjjf1kqz6f5wlsj2mn-nix-1.8
|
||||
nixStorePath=/nix/store/p7xdvz72xx3rhm121jclsbdmmcds7xh6-nix-1.9
|
||||
else
|
||||
echo "$0: unsupported platform"
|
||||
exit 1
|
||||
|
@ -40,6 +40,7 @@ let
|
||||
src = ./nixos-generate-config.pl;
|
||||
path = [ pkgs.btrfsProgs ];
|
||||
perl = "${pkgs.perl}/bin/perl -I${pkgs.perlPackages.FileSlurp}/lib/perl5/site_perl";
|
||||
inherit (config.system) nixosRelease;
|
||||
};
|
||||
|
||||
nixos-option = makeProg {
|
||||
@ -56,7 +57,9 @@ let
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
config = {
|
||||
|
||||
environment.systemPackages =
|
||||
[ nixos-build-vms
|
||||
nixos-install
|
||||
@ -69,5 +72,7 @@ in
|
||||
system.build = {
|
||||
inherit nixos-install nixos-generate-config nixos-option nixos-rebuild;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -30,5 +30,5 @@ with lib;
|
||||
};
|
||||
|
||||
};
|
||||
# impl of assertions is in <nixos/modules/system/activation/top-level.nix>
|
||||
# impl of assertions is in <nixpkgs/nixos/modules/system/activation/top-level.nix>
|
||||
}
|
||||
|
@ -2,8 +2,13 @@
|
||||
|
||||
{
|
||||
_module.args = {
|
||||
pkgs_i686 = import ../../lib/nixpkgs.nix {
|
||||
pkgs_i686 = import ../../.. {
|
||||
system = "i686-linux";
|
||||
# FIXME: we enable config.allowUnfree to make packages like
|
||||
# nvidia-x11 available. This isn't a problem because if the user has
|
||||
# ‘nixpkgs.config.allowUnfree = false’, then evaluation will fail on
|
||||
# the 64-bit package anyway. However, it would be cleaner to respect
|
||||
# nixpkgs.config here.
|
||||
config.allowUnfree = true;
|
||||
};
|
||||
|
||||
|
@ -226,6 +226,9 @@
|
||||
gitit = 202;
|
||||
riemanntools = 203;
|
||||
subsonic = 204;
|
||||
riak = 205;
|
||||
shout = 206;
|
||||
gateone = 207;
|
||||
|
||||
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
|
||||
|
||||
@ -430,6 +433,9 @@
|
||||
gitit = 202;
|
||||
riemanntools = 203;
|
||||
subsonic = 204;
|
||||
riak = 205;
|
||||
#shout = 206; #unused
|
||||
gateone = 207;
|
||||
|
||||
# When adding a gid, make sure it doesn't match an existing
|
||||
# uid. Users and groups with the same name should have equal
|
||||
|
@ -35,7 +35,7 @@ in {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Extra flags to append to <command>updatedb</command>.
|
||||
Extra flags to pass to <command>updatedb</command>.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -56,6 +56,14 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
includeStore = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to include <filename>/nix/store</filename> in the locate database.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
@ -63,7 +71,6 @@ in {
|
||||
###### implementation
|
||||
|
||||
config = {
|
||||
|
||||
systemd.services.update-locatedb =
|
||||
{ description = "Update Locate Database";
|
||||
path = [ pkgs.su ];
|
||||
@ -71,8 +78,9 @@ in {
|
||||
''
|
||||
mkdir -m 0755 -p $(dirname ${toString cfg.output})
|
||||
exec updatedb \
|
||||
--localuser=${cfg.localuser} \
|
||||
--output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
|
||||
--localuser=${cfg.localuser} \
|
||||
${optionalString (!cfg.includeStore) "--prunepaths='/nix/store'"} \
|
||||
--output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
serviceConfig.Nice = 19;
|
||||
serviceConfig.IOSchedulingClass = "idle";
|
||||
|
@ -72,7 +72,7 @@ in
|
||||
};
|
||||
|
||||
config = {
|
||||
_module.args.pkgs = import ../../lib/nixpkgs.nix {
|
||||
_module.args.pkgs = import ../../.. {
|
||||
system = config.nixpkgs.system;
|
||||
|
||||
inherit (config.nixpkgs) config;
|
||||
|
@ -6,12 +6,35 @@ with lib;
|
||||
|
||||
options = {
|
||||
|
||||
system.stateVersion = mkOption {
|
||||
type = types.str;
|
||||
default = config.system.nixosRelease;
|
||||
description = ''
|
||||
Every once in a while, a new NixOS release may change
|
||||
configuration defaults in a way incompatible with stateful
|
||||
data. For instance, if the default version of PostgreSQL
|
||||
changes, the new version will probably be unable to read your
|
||||
existing databases. To prevent such breakage, you can set the
|
||||
value of this option to the NixOS release with which you want
|
||||
to be compatible. The effect is that NixOS will option
|
||||
defaults corresponding to the specified release (such as using
|
||||
an older version of PostgreSQL).
|
||||
'';
|
||||
};
|
||||
|
||||
system.nixosVersion = mkOption {
|
||||
internal = true;
|
||||
type = types.str;
|
||||
description = "NixOS version.";
|
||||
};
|
||||
|
||||
system.nixosRelease = mkOption {
|
||||
readOnly = true;
|
||||
type = types.str;
|
||||
default = readFile "${toString pkgs.path}/.version";
|
||||
description = "NixOS release.";
|
||||
};
|
||||
|
||||
system.nixosVersionSuffix = mkOption {
|
||||
internal = true;
|
||||
type = types.str;
|
||||
@ -25,7 +48,7 @@ with lib;
|
||||
};
|
||||
|
||||
system.nixosCodeName = mkOption {
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
type = types.str;
|
||||
description = "NixOS release code name.";
|
||||
};
|
||||
@ -41,8 +64,7 @@ with lib;
|
||||
|
||||
config = {
|
||||
|
||||
system.nixosVersion =
|
||||
mkDefault (readFile "${toString pkgs.path}/.version" + config.system.nixosVersionSuffix);
|
||||
system.nixosVersion = mkDefault (config.system.nixosRelease + config.system.nixosVersionSuffix);
|
||||
|
||||
system.nixosVersionSuffix =
|
||||
let suffixFile = "${toString pkgs.path}/.version-suffix"; in
|
||||
@ -53,7 +75,7 @@ with lib;
|
||||
mkDefault (if pathExists fn then readFile fn else "master");
|
||||
|
||||
# Note: code names must only increase in alphabetical order.
|
||||
system.nixosCodeName = "Dingo";
|
||||
system.nixosCodeName = "Emu";
|
||||
|
||||
# Generate /etc/os-release. See
|
||||
# http://0pointer.de/public/systemd-man/os-release.html for the
|
||||
|
@ -41,6 +41,7 @@
|
||||
./hardware/video/bumblebee.nix
|
||||
./hardware/video/nvidia.nix
|
||||
./hardware/video/ati.nix
|
||||
./installer/tools/auto-upgrade.nix
|
||||
./installer/tools/nixos-checkout.nix
|
||||
./installer/tools/tools.nix
|
||||
./misc/assertions.nix
|
||||
@ -60,7 +61,6 @@
|
||||
./programs/command-not-found/command-not-found.nix
|
||||
./programs/dconf.nix
|
||||
./programs/environment.nix
|
||||
./programs/info.nix
|
||||
./programs/ibus.nix
|
||||
./programs/kbdlight.nix
|
||||
./programs/light.nix
|
||||
@ -72,9 +72,9 @@
|
||||
./programs/ssmtp.nix
|
||||
./programs/uim.nix
|
||||
./programs/venus.nix
|
||||
./programs/virtualbox-host.nix
|
||||
./programs/wvdial.nix
|
||||
./programs/freetds.nix
|
||||
./programs/xfs_quota.nix
|
||||
./programs/zsh/zsh.nix
|
||||
./rename.nix
|
||||
./security/apparmor.nix
|
||||
@ -131,6 +131,7 @@
|
||||
./services/databases/opentsdb.nix
|
||||
./services/databases/postgresql.nix
|
||||
./services/databases/redis.nix
|
||||
./services/databases/riak.nix
|
||||
./services/databases/virtuoso.nix
|
||||
./services/desktops/accountsservice.nix
|
||||
./services/desktops/geoclue2.nix
|
||||
@ -278,15 +279,18 @@
|
||||
./services/networking/dnsmasq.nix
|
||||
./services/networking/docker-registry-server.nix
|
||||
./services/networking/ejabberd.nix
|
||||
./services/networking/fan.nix
|
||||
./services/networking/firefox/sync-server.nix
|
||||
./services/networking/firewall.nix
|
||||
./services/networking/flashpolicyd.nix
|
||||
./services/networking/freenet.nix
|
||||
./services/networking/gateone.nix
|
||||
./services/networking/git-daemon.nix
|
||||
./services/networking/gnunet.nix
|
||||
./services/networking/gogoclient.nix
|
||||
./services/networking/gvpe.nix
|
||||
./services/networking/haproxy.nix
|
||||
./services/networking/heyefi.nix
|
||||
./services/networking/hostapd.nix
|
||||
./services/networking/i2pd.nix
|
||||
./services/networking/i2p.nix
|
||||
@ -326,6 +330,8 @@
|
||||
./services/networking/searx.nix
|
||||
./services/networking/seeks.nix
|
||||
./services/networking/skydns.nix
|
||||
./services/networking/shout.nix
|
||||
./services/networking/softether.nix
|
||||
./services/networking/spiped.nix
|
||||
./services/networking/sslh.nix
|
||||
./services/networking/ssh/lshd.nix
|
||||
@ -387,6 +393,7 @@
|
||||
./services/web-servers/lighttpd/default.nix
|
||||
./services/web-servers/lighttpd/gitweb.nix
|
||||
./services/web-servers/nginx/default.nix
|
||||
./services/web-servers/nginx/reverse_proxy.nix
|
||||
./services/web-servers/phpfpm.nix
|
||||
./services/web-servers/shellinabox.nix
|
||||
./services/web-servers/tomcat.nix
|
||||
@ -483,6 +490,7 @@
|
||||
./virtualisation/openvswitch.nix
|
||||
./virtualisation/parallels-guest.nix
|
||||
./virtualisation/virtualbox-guest.nix
|
||||
./virtualisation/virtualbox-host.nix
|
||||
./virtualisation/vmware-guest.nix
|
||||
./virtualisation/xen-dom0.nix
|
||||
]
|
||||
|
@ -30,7 +30,7 @@ let
|
||||
relocatedModuleFiles =
|
||||
let
|
||||
relocateNixOS = path:
|
||||
"<nixos" + removePrefix nixosPath (toString path) + ">";
|
||||
"<nixpkgs/nixos" + removePrefix nixosPath (toString path) + ">";
|
||||
relocateOthers = null;
|
||||
in
|
||||
{ nixos = map relocateNixOS partitionedModuleFiles.nixos;
|
||||
|
@ -7,6 +7,8 @@ with lib;
|
||||
|
||||
{
|
||||
environment.noXlibs = mkDefault true;
|
||||
i18n.supportedLocales = [ config.i18n.defaultLocale ];
|
||||
|
||||
# This isn't perfect, but let's expect the user specifies an UTF-8 defaultLocale
|
||||
i18n.supportedLocales = [ (config.i18n.defaultLocale + "/UTF-8") ];
|
||||
services.nixosManual.enable = mkDefault false;
|
||||
}
|
||||
|
@ -57,9 +57,9 @@ in
|
||||
if [ $? = 126 ]; then
|
||||
"$@"
|
||||
fi
|
||||
else
|
||||
else
|
||||
# Indicate than there was an error so ZSH falls back to its default handler
|
||||
return 127
|
||||
return 127
|
||||
fi
|
||||
}
|
||||
'';
|
||||
|
@ -30,11 +30,11 @@ The program ‘$program’ is currently not installed. It is provided by
|
||||
the package ‘$package’, which I will now install for you.
|
||||
EOF
|
||||
;
|
||||
exit 126 if system("nix-env", "-i", $package) == 0;
|
||||
exit 126 if system("nix-env", "-iA", "nixos.$package") == 0;
|
||||
} else {
|
||||
print STDERR <<EOF;
|
||||
The program ‘$program’ is currently not installed. You can install it by typing:
|
||||
nix-env -i $package
|
||||
nix-env -iA nixos.$package
|
||||
EOF
|
||||
}
|
||||
} else {
|
||||
@ -42,7 +42,7 @@ EOF
|
||||
The program ‘$program’ is currently not installed. It is provided by
|
||||
several packages. You can install it by typing one of the following:
|
||||
EOF
|
||||
print STDERR " nix-env -i $_->{package}\n" foreach @$res;
|
||||
print STDERR " nix-env -iA nixos.$_->{package}\n" foreach @$res;
|
||||
}
|
||||
|
||||
exit 127;
|
||||
|
@ -23,15 +23,6 @@ in
|
||||
EDITOR = mkDefault "nano";
|
||||
};
|
||||
|
||||
environment.sessionVariables =
|
||||
{ NIX_PATH =
|
||||
[ "/nix/var/nix/profiles/per-user/root/channels/nixos"
|
||||
"nixpkgs=/etc/nixos/nixpkgs"
|
||||
"nixos-config=/etc/nixos/configuration.nix"
|
||||
"/nix/var/nix/profiles/per-user/root/channels"
|
||||
];
|
||||
};
|
||||
|
||||
environment.profiles =
|
||||
[ "$HOME/.nix-profile"
|
||||
"/nix/var/nix/profiles/default"
|
||||
|
@ -1,38 +0,0 @@
|
||||
{config, pkgs, ...}:
|
||||
|
||||
let
|
||||
|
||||
texinfo = pkgs.texinfoInteractive;
|
||||
|
||||
# Quick hack to make the `info' command work properly. `info' needs
|
||||
# a "dir" file containing all the installed Info files, which we
|
||||
# don't have (it would be impure to have a package installation
|
||||
# update some global "dir" file). So this wrapper script around
|
||||
# "info" builds a temporary "dir" file on the fly. This is a bit
|
||||
# slow (on a cold cache) but not unacceptably so.
|
||||
infoWrapper = pkgs.writeScriptBin "info"
|
||||
''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
|
||||
dir=$(mktemp --tmpdir -d "info.dir.XXXXXX")
|
||||
|
||||
if test -z "$dir"; then exit 1; fi
|
||||
|
||||
trap 'rm -rf "$dir"' EXIT
|
||||
|
||||
shopt -s nullglob
|
||||
|
||||
for i in $(IFS=:; echo $INFOPATH); do
|
||||
for j in $i/*.info; do
|
||||
${texinfo}/bin/install-info --quiet $j $dir/dir
|
||||
done
|
||||
done
|
||||
|
||||
INFOPATH=$dir:$INFOPATH ${texinfo}/bin/info "$@"
|
||||
''; # */
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
environment.systemPackages = [ infoWrapper texinfo ];
|
||||
}
|
@ -18,6 +18,14 @@ let
|
||||
exec ${askPassword}
|
||||
'';
|
||||
|
||||
knownHosts = map (h: getAttr h cfg.knownHosts) (attrNames cfg.knownHosts);
|
||||
|
||||
knownHostsText = flip (concatMapStringsSep "\n") knownHosts
|
||||
(h: assert h.hostNames != [];
|
||||
concatStringsSep "," h.hostNames + " "
|
||||
+ (if h.publicKey != null then h.publicKey else readFile h.publicKeyFile)
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
@ -92,31 +100,93 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
knownHosts = mkOption {
|
||||
default = {};
|
||||
type = types.loaOf (types.submodule ({ name, ... }: {
|
||||
options = {
|
||||
hostNames = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
A list of host names and/or IP numbers used for accessing
|
||||
the host's ssh service.
|
||||
'';
|
||||
};
|
||||
publicKey = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = "ecdsa-sha2-nistp521 AAAAE2VjZHN...UEPg==";
|
||||
description = ''
|
||||
The public key data for the host. You can fetch a public key
|
||||
from a running SSH server with the <command>ssh-keyscan</command>
|
||||
command. The public key should not include any host names, only
|
||||
the key type and the key itself.
|
||||
'';
|
||||
};
|
||||
publicKeyFile = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
description = ''
|
||||
The path to the public key file for the host. The public
|
||||
key file is read at build time and saved in the Nix store.
|
||||
You can fetch a public key file from a running SSH server
|
||||
with the <command>ssh-keyscan</command> command. The content
|
||||
of the file should follow the same format as described for
|
||||
the <literal>publicKey</literal> option.
|
||||
'';
|
||||
};
|
||||
};
|
||||
config = {
|
||||
hostNames = mkDefault [ name ];
|
||||
};
|
||||
}));
|
||||
description = ''
|
||||
The set of system-wide known SSH hosts.
|
||||
'';
|
||||
example = [
|
||||
{
|
||||
hostNames = [ "myhost" "myhost.mydomain.com" "10.10.1.4" ];
|
||||
publicKeyFile = literalExample "./pubkeys/myhost_ssh_host_dsa_key.pub";
|
||||
}
|
||||
{
|
||||
hostNames = [ "myhost2" ];
|
||||
publicKeyFile = literalExample "./pubkeys/myhost2_ssh_host_dsa_key.pub";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
assertions = singleton
|
||||
{ assertion = cfg.forwardX11 -> cfg.setXAuthLocation;
|
||||
message = "cannot enable X11 forwarding without setting XAuth location";
|
||||
};
|
||||
|
||||
environment.etc =
|
||||
[ { # SSH configuration. Slight duplication of the sshd_config
|
||||
# generation in the sshd service.
|
||||
source = pkgs.writeText "ssh_config" ''
|
||||
AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
|
||||
${optionalString cfg.setXAuthLocation ''
|
||||
XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
|
||||
''}
|
||||
ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
target = "ssh/ssh_config";
|
||||
assertions =
|
||||
[ { assertion = cfg.forwardX11 -> cfg.setXAuthLocation;
|
||||
message = "cannot enable X11 forwarding without setting XAuth location";
|
||||
}
|
||||
];
|
||||
] ++ flip mapAttrsToList cfg.knownHosts (name: data: {
|
||||
assertion = (data.publicKey == null && data.publicKeyFile != null) ||
|
||||
(data.publicKey != null && data.publicKeyFile == null);
|
||||
message = "knownHost ${name} must contain either a publicKey or publicKeyFile";
|
||||
});
|
||||
|
||||
# SSH configuration. Slight duplication of the sshd_config
|
||||
# generation in the sshd service.
|
||||
environment.etc."ssh/ssh_config".text =
|
||||
''
|
||||
AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
|
||||
|
||||
${optionalString cfg.setXAuthLocation ''
|
||||
XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
|
||||
''}
|
||||
|
||||
ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
environment.etc."ssh/ssh_known_hosts".text = knownHostsText;
|
||||
|
||||
# FIXME: this should really be socket-activated for über-awesomeness.
|
||||
systemd.user.services.ssh-agent =
|
||||
|
110
nixos/modules/programs/xfs_quota.nix
Normal file
110
nixos/modules/programs/xfs_quota.nix
Normal file
@ -0,0 +1,110 @@
|
||||
# Configuration for the xfs_quota command
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.programs.xfs_quota;
|
||||
|
||||
limitOptions = opts: concatStringsSep " " [
|
||||
(optionalString (opts.sizeSoftLimit != null) "bsoft=${opts.sizeSoftLimit}")
|
||||
(optionalString (opts.sizeHardLimit != null) "bhard=${opts.sizeHardLimit}")
|
||||
];
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
programs.xfs_quota = {
|
||||
projects = mkOption {
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
id = mkOption {
|
||||
type = types.int;
|
||||
description = "Project ID.";
|
||||
};
|
||||
|
||||
fileSystem = mkOption {
|
||||
type = types.string;
|
||||
description = "XFS filesystem hosting the xfs_quota project.";
|
||||
default = "/";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
type = types.string;
|
||||
description = "Project directory.";
|
||||
};
|
||||
|
||||
sizeSoftLimit = mkOption {
|
||||
type = types.nullOr types.string;
|
||||
default = null;
|
||||
example = "30g";
|
||||
description = "Soft limit of the project size";
|
||||
};
|
||||
|
||||
sizeHardLimit = mkOption {
|
||||
type = types.nullOr types.string;
|
||||
default = null;
|
||||
example = "50g";
|
||||
description = "Hard limit of the project size.";
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
description = "Setup of xfs_quota projects. Make sure the filesystem is mounted with the pquota option.";
|
||||
|
||||
example = {
|
||||
"projname" = {
|
||||
id = 50;
|
||||
path = "/xfsprojects/projname";
|
||||
sizeHardLimit = "50g";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf (cfg.projects != {}) {
|
||||
|
||||
environment.etc.projects.source = pkgs.writeText "etc-project"
|
||||
(concatStringsSep "\n" (mapAttrsToList
|
||||
(name: opts: "${toString opts.id}:${opts.path}") cfg.projects));
|
||||
|
||||
environment.etc.projid.source = pkgs.writeText "etc-projid"
|
||||
(concatStringsSep "\n" (mapAttrsToList
|
||||
(name: opts: "${name}:${toString opts.id}") cfg.projects));
|
||||
|
||||
systemd.services = mapAttrs' (name: opts:
|
||||
nameValuePair "xfs_quota-${name}" {
|
||||
description = "Setup xfs_quota for project ${name}";
|
||||
script = ''
|
||||
${pkgs.xfsprogs}/bin/xfs_quota -x -c 'project -s ${name}' ${opts.fileSystem}
|
||||
${pkgs.xfsprogs}/bin/xfs_quota -x -c 'limit -p ${limitOptions opts} ${name}' ${opts.fileSystem}
|
||||
'';
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ ((replaceChars [ "/" ] [ "-" ] opts.fileSystem) + ".mount") ];
|
||||
|
||||
restartTriggers = [ config.environment.etc.projects.source ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
}
|
||||
) cfg.projects;
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -77,6 +77,8 @@ in zipModules ([]
|
||||
++ obsolete [ "environment" "nix" ] [ "nix" "package" ]
|
||||
++ obsolete [ "fonts" "enableFontConfig" ] [ "fonts" "fontconfig" "enable" ]
|
||||
++ obsolete [ "fonts" "extraFonts" ] [ "fonts" "fonts" ]
|
||||
++ alias [ "users" "extraUsers" ] [ "users" "users" ]
|
||||
++ alias [ "users" "extraGroups" ] [ "users" "groups" ]
|
||||
|
||||
++ obsolete [ "security" "extraSetuidPrograms" ] [ "security" "setuidPrograms" ]
|
||||
++ obsolete [ "networking" "enableWLAN" ] [ "networking" "wireless" "enable" ]
|
||||
@ -98,6 +100,9 @@ in zipModules ([]
|
||||
++ obsolete [ "boot" "initrd" "extraKernelModules" ] [ "boot" "initrd" "kernelModules" ]
|
||||
++ obsolete [ "boot" "extraKernelParams" ] [ "boot" "kernelParams" ]
|
||||
|
||||
# smartd
|
||||
++ obsolete [ "services" "smartd" "deviceOpts" ] [ "services" "smartd" "defaults" "monitored" ]
|
||||
|
||||
# OpenSSH
|
||||
++ obsolete [ "services" "sshd" "ports" ] [ "services" "openssh" "ports" ]
|
||||
++ alias [ "services" "sshd" "enable" ] [ "services" "openssh" "enable" ]
|
||||
@ -107,9 +112,17 @@ in zipModules ([]
|
||||
++ obsolete [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ]
|
||||
++ obsolete [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ]
|
||||
++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ]
|
||||
++ alias [ "services" "openssh" "knownHosts" ] [ "programs" "ssh" "knownHosts" ]
|
||||
|
||||
# VirtualBox
|
||||
++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ]
|
||||
++ obsolete [ "services" "virtualbox" "enable" ] [ "virtualisation" "virtualbox" "guest" "enable" ]
|
||||
++ obsolete [ "services" "virtualboxGuest" "enable" ] [ "virtualisation" "virtualbox" "guest" "enable" ]
|
||||
++ obsolete [ "programs" "virtualbox" "enable" ] [ "virtualisation" "virtualbox" "host" "enable" ]
|
||||
++ obsolete [ "programs" "virtualbox" "addNetworkInterface" ] [ "virtualisation" "virtualbox" "host" "addNetworkInterface" ]
|
||||
++ obsolete [ "programs" "virtualbox" "enableHardening" ] [ "virtualisation" "virtualbox" "host" "enableHardening" ]
|
||||
++ obsolete [ "services" "virtualboxHost" "enable" ] [ "virtualisation" "virtualbox" "host" "enable" ]
|
||||
++ obsolete [ "services" "virtualboxHost" "addNetworkInterface" ] [ "virtualisation" "virtualbox" "host" "addNetworkInterface" ]
|
||||
++ obsolete [ "services" "virtualboxHost" "enableHardening" ] [ "virtualisation" "virtualbox" "host" "enableHardening" ]
|
||||
|
||||
# Tarsnap
|
||||
++ obsolete [ "services" "tarsnap" "config" ] [ "services" "tarsnap" "archives" ]
|
||||
|
@ -37,13 +37,5 @@ in
|
||||
) cfg.profiles;
|
||||
};
|
||||
};
|
||||
|
||||
security.pam.services.apparmor.text = ''
|
||||
## AppArmor changes hats according to `order`: first try user, then
|
||||
## group, and finally fall back to a hat called "DEFAULT"
|
||||
##
|
||||
## For now, enable debugging as this is an experimental feature.
|
||||
session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
@ -192,6 +192,16 @@ let
|
||||
description = "Whether to log authentication failures in <filename>/var/log/faillog</filename>.";
|
||||
};
|
||||
|
||||
enableAppArmor = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enable support for attaching AppArmor profiles at the
|
||||
user/group level, e.g., as part of a role based access
|
||||
control scheme.
|
||||
'';
|
||||
};
|
||||
|
||||
text = mkOption {
|
||||
type = types.nullOr types.lines;
|
||||
description = "Contents of the PAM service file.";
|
||||
@ -251,9 +261,9 @@ let
|
||||
${optionalString (!(config.security.pam.enableEcryptfs || cfg.pamMount)) "auth required pam_deny.so"}
|
||||
|
||||
# Password management.
|
||||
password requisite pam_unix.so nullok sha512
|
||||
${optionalString config.security.pam.enableEcryptfs
|
||||
"password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
|
||||
password requisite pam_unix.so nullok sha512
|
||||
${optionalString cfg.pamMount
|
||||
"password optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
|
||||
${optionalString config.users.ldap.enable
|
||||
@ -294,6 +304,8 @@ let
|
||||
"session optional ${pkgs.pam}/lib/security/pam_motd.so motd=${motd}"}
|
||||
${optionalString cfg.pamMount
|
||||
"session optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
|
||||
${optionalString (cfg.enableAppArmor && config.security.apparmor.enable)
|
||||
"session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug"}
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -78,12 +78,6 @@ in {
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
readOnlyPort = mkOption {
|
||||
description = "Kubernets apiserver read-only port.";
|
||||
default = 7080;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes apiserver secure port.";
|
||||
default = 6443;
|
||||
@ -102,10 +96,16 @@ in {
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client auth.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
tokenAuth = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver token authentication file. See
|
||||
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authentication.md"/>
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/authentication.html"/>
|
||||
'';
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
@ -120,7 +120,7 @@ in {
|
||||
authorizationMode = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC). See
|
||||
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authorization.md"/>
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/authorization.html"/>
|
||||
'';
|
||||
default = "AlwaysAllow";
|
||||
type = types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC"];
|
||||
@ -129,7 +129,7 @@ in {
|
||||
authorizationPolicy = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization policy file. See
|
||||
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authorization.md"/>
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/authorization.html"/>
|
||||
'';
|
||||
default = [];
|
||||
example = literalExample ''
|
||||
@ -158,6 +158,38 @@ in {
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
runtimeConfig = mkOption {
|
||||
description = ''
|
||||
Api runtime configuration. See
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/cluster-management.html"/>
|
||||
'';
|
||||
default = "";
|
||||
example = "api/all=false,api/v1=true";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
admissionControl = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to use. See
|
||||
<link xlink:href="http://kubernetes.io/v1.0/docs/admin/admission-controllers.html"/>
|
||||
'';
|
||||
default = ["AlwaysAdmit"];
|
||||
example = [
|
||||
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
|
||||
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
|
||||
];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
serviceAccountKey = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
||||
used to verify ServiceAccount tokens.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes apiserver extra command line options.";
|
||||
default = "";
|
||||
@ -222,14 +254,26 @@ in {
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
machines = mkOption {
|
||||
description = "Kubernetes controller list of machines to schedule to schedule onto";
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
serviceAccountPrivateKey = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager PEM-encoded private RSA key file used to
|
||||
sign service account tokens
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
rootCaFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager certificate authority file included in
|
||||
service account's token secret.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes controller extra command line options.";
|
||||
description = "Kubernetes controller manager extra command line options.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
@ -260,6 +304,20 @@ in {
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
healthz = {
|
||||
bind = mkOption {
|
||||
description = "Kubernetes kubelet healthz listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet healthz port.";
|
||||
default = 10248;
|
||||
type = types.int;
|
||||
};
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
description = "Kubernetes kubelet hostname override";
|
||||
default = config.networking.hostName;
|
||||
@ -273,7 +331,10 @@ in {
|
||||
};
|
||||
|
||||
apiServers = mkOption {
|
||||
description = "Kubernetes kubelet list of Kubernetes API servers for publishing events, and reading pods and services.";
|
||||
description = ''
|
||||
Kubernetes kubelet list of Kubernetes API servers for publishing events,
|
||||
and reading pods and services.
|
||||
'';
|
||||
default = ["${cfg.apiserver.address}:${toString cfg.apiserver.port}"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
@ -374,7 +435,6 @@ in {
|
||||
--etcd-servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
|
||||
--insecure-bind-address=${cfg.apiserver.address} \
|
||||
--insecure-port=${toString cfg.apiserver.port} \
|
||||
--read-only-port=${toString cfg.apiserver.readOnlyPort} \
|
||||
--bind-address=${cfg.apiserver.publicAddress} \
|
||||
--allow-privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
|
||||
${optionalString (cfg.apiserver.tlsCertFile!="")
|
||||
@ -383,22 +443,24 @@ in {
|
||||
"--tls-private-key-file=${cfg.apiserver.tlsPrivateKeyFile}"} \
|
||||
${optionalString (cfg.apiserver.tokenAuth!=[])
|
||||
"--token-auth-file=${tokenAuthFile}"} \
|
||||
${optionalString (cfg.apiserver.clientCaFile!="")
|
||||
"--client-ca-file=${cfg.apiserver.clientCaFile}"} \
|
||||
--authorization-mode=${cfg.apiserver.authorizationMode} \
|
||||
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
|
||||
"--authorization-policy-file=${authorizationPolicyFile}"} \
|
||||
--secure-port=${toString cfg.apiserver.securePort} \
|
||||
--service-cluster-ip-range=${cfg.apiserver.portalNet} \
|
||||
${optionalString (cfg.apiserver.runtimeConfig!="")
|
||||
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
|
||||
--admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
|
||||
${optionalString (cfg.apiserver.serviceAccountKey!=null)
|
||||
"--service-account-key-file=${cfg.apiserver.serviceAccountKey}"} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${cfg.apiserver.extraOpts}
|
||||
'';
|
||||
User = "kubernetes";
|
||||
};
|
||||
postStart = ''
|
||||
until ${pkgs.curl}/bin/curl -s -o /dev/null 'http://${cfg.apiserver.address}:${toString cfg.apiserver.port}/'; do
|
||||
sleep 1;
|
||||
done
|
||||
'';
|
||||
};
|
||||
})
|
||||
|
||||
@ -431,7 +493,10 @@ in {
|
||||
--address=${cfg.controllerManager.address} \
|
||||
--port=${toString cfg.controllerManager.port} \
|
||||
--master=${cfg.controllerManager.master} \
|
||||
--machines=${concatStringsSep "," cfg.controllerManager.machines} \
|
||||
${optionalString (cfg.controllerManager.serviceAccountPrivateKey!=null)
|
||||
"--service-account-private-key-file=${cfg.controllerManager.serviceAccountPrivateKey}"} \
|
||||
${optionalString (cfg.controllerManager.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.controllerManager.rootCaFile}"} \
|
||||
--logtostderr=true \
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${cfg.controllerManager.extraOpts}
|
||||
@ -454,6 +519,8 @@ in {
|
||||
--register-node=${if cfg.kubelet.registerNode then "true" else "false"} \
|
||||
--address=${cfg.kubelet.address} \
|
||||
--port=${toString cfg.kubelet.port} \
|
||||
--healthz-bind-address=${cfg.kubelet.healthz.bind} \
|
||||
--healthz-port=${toString cfg.kubelet.healthz.port} \
|
||||
--hostname-override=${cfg.kubelet.hostname} \
|
||||
--allow-privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
|
||||
--root-dir=${cfg.dataDir} \
|
||||
@ -483,6 +550,8 @@ in {
|
||||
${optionalString cfg.verbose "--v=6 --log-flush-frequency=1s"} \
|
||||
${cfg.proxy.extraOpts}
|
||||
'';
|
||||
Restart = "always"; # Retry connection
|
||||
RestartSec = "5s";
|
||||
};
|
||||
};
|
||||
})
|
||||
@ -504,9 +573,6 @@ in {
|
||||
User = "kubernetes";
|
||||
};
|
||||
};
|
||||
|
||||
services.skydns.enable = mkDefault true;
|
||||
services.skydns.domain = mkDefault cfg.kubelet.clusterDomain;
|
||||
})
|
||||
|
||||
(mkIf (any (el: el == "master") cfg.roles) {
|
||||
@ -524,6 +590,9 @@ in {
|
||||
|
||||
(mkIf (any (el: el == "node" || el == "master") cfg.roles) {
|
||||
services.etcd.enable = mkDefault true;
|
||||
|
||||
services.skydns.enable = mkDefault true;
|
||||
services.skydns.domain = mkDefault cfg.kubelet.clusterDomain;
|
||||
})
|
||||
|
||||
(mkIf (
|
||||
@ -538,8 +607,10 @@ in {
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
mkdir -p /var/run/kubernetes
|
||||
chown kubernetes /var/run/kubernetes
|
||||
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} /var/run/kubernetes/.dockercfg
|
||||
chown kubernetes /var/lib/kubernetes
|
||||
|
||||
rm ${cfg.dataDir}/.dockercfg || true
|
||||
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} ${cfg.dataDir}/.dockercfg
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -154,6 +154,12 @@ in
|
||||
|
||||
config = mkIf config.services.postgresql.enable {
|
||||
|
||||
services.postgresql.package =
|
||||
# Note: when changing the default, make it conditional on
|
||||
# ‘system.stateVersion’ to maintain compatibility with existing
|
||||
# systems!
|
||||
mkDefault pkgs.postgresql94;
|
||||
|
||||
services.postgresql.authentication = mkAfter
|
||||
''
|
||||
# Generated file; do not edit!
|
||||
@ -207,6 +213,7 @@ in
|
||||
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${postgresql}/bin/postgres postgres ${toString flags}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
User = "postgres";
|
||||
Group = "postgres";
|
||||
PermissionsStartOnly = true;
|
||||
|
148
nixos/modules/services/databases/riak.nix
Normal file
148
nixos/modules/services/databases/riak.nix
Normal file
@ -0,0 +1,148 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.riak;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.riak = {
|
||||
|
||||
enable = mkEnableOption "riak";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
example = literalExample "pkgs.riak2";
|
||||
description = ''
|
||||
Riak package to use.
|
||||
'';
|
||||
};
|
||||
|
||||
nodeName = mkOption {
|
||||
type = types.string;
|
||||
default = "riak@127.0.0.1";
|
||||
description = ''
|
||||
Name of the Erlang node.
|
||||
'';
|
||||
};
|
||||
|
||||
distributedCookie = mkOption {
|
||||
type = types.string;
|
||||
default = "riak";
|
||||
description = ''
|
||||
Cookie for distributed node communication. All nodes in the
|
||||
same cluster should use the same cookie or they will not be able to
|
||||
communicate.
|
||||
'';
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/db/riak";
|
||||
description = ''
|
||||
Data directory for Riak.
|
||||
'';
|
||||
};
|
||||
|
||||
logDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/log/riak";
|
||||
description = ''
|
||||
Log directory for Riak.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Additional text to be appended to <filename>riak.conf</filename>.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
environment.etc."riak/riak.conf".text = ''
|
||||
nodename = ${cfg.nodeName}
|
||||
distributed_cookie = ${cfg.distributedCookie}
|
||||
|
||||
platform_log_dir = ${cfg.logDir}
|
||||
platform_etc_dir = /etc/riak
|
||||
platform_data_dir = ${cfg.dataDir}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
users.extraUsers.riak = {
|
||||
name = "riak";
|
||||
uid = config.ids.uids.riak;
|
||||
group = "riak";
|
||||
description = "Riak server user";
|
||||
};
|
||||
|
||||
users.extraGroups.riak.gid = config.ids.gids.riak;
|
||||
|
||||
systemd.services.riak = {
|
||||
description = "Riak Server";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
path = [
|
||||
pkgs.utillinux # for `logger`
|
||||
pkgs.bash
|
||||
];
|
||||
|
||||
environment.RIAK_DATA_DIR = "${cfg.dataDir}";
|
||||
environment.RIAK_LOG_DIR = "${cfg.logDir}";
|
||||
environment.RIAK_ETC_DIR = "/etc/riak";
|
||||
|
||||
preStart = ''
|
||||
if ! test -e ${cfg.logDir}; then
|
||||
mkdir -m 0755 -p ${cfg.logDir}
|
||||
chown -R riak ${cfg.logDir}
|
||||
fi
|
||||
|
||||
if ! test -e ${cfg.dataDir}; then
|
||||
mkdir -m 0700 -p ${cfg.dataDir}
|
||||
chown -R riak ${cfg.dataDir}
|
||||
fi
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/riak console";
|
||||
ExecStop = "${cfg.package}/bin/riak stop";
|
||||
StandardInput = "tty";
|
||||
User = "riak";
|
||||
Group = "riak";
|
||||
PermissionsStartOnly = true;
|
||||
# Give Riak a decent amount of time to clean up.
|
||||
TimeoutStopSec = 120;
|
||||
LimitNOFILE = 65536;
|
||||
};
|
||||
|
||||
unitConfig.RequiresMountsFor = [
|
||||
"${cfg.dataDir}"
|
||||
"${cfg.logDir}"
|
||||
"/etc/riak"
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -39,11 +39,14 @@ with lib;
|
||||
mkService = dev:
|
||||
assert dev != "";
|
||||
let dev' = utils.escapeSystemdPath dev; in
|
||||
nameValuePair "freefall-${dev'}"
|
||||
{ description = "Free-fall protection for ${dev}";
|
||||
nameValuePair "freefall-${dev'}" {
|
||||
description = "Free-fall protection for ${dev}";
|
||||
after = [ "${dev'}.device" ];
|
||||
wantedBy = [ "${dev'}.device" ];
|
||||
path = [ pkgs.freefall ];
|
||||
unitConfig = {
|
||||
DefaultDependencies = false;
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.freefall}/bin/freefall ${dev}";
|
||||
Restart = "on-failure";
|
||||
|
@ -17,8 +17,8 @@ let
|
||||
# what is available directly from the PCR registers.
|
||||
firmware_log_file = /sys/kernel/security/tpm0/binary_bios_measurements
|
||||
kernel_log_file = /sys/kernel/security/ima/binary_runtime_measurements
|
||||
#firmware_pcrs = 0,1,2,3,4,5,6,7
|
||||
#kernel_pcrs = 10,11
|
||||
firmware_pcrs = ${cfg.firmwarePCRs}
|
||||
kernel_pcrs = ${cfg.kernelPCRs}
|
||||
platform_cred = ${cfg.platformCred}
|
||||
conformance_cred = ${cfg.conformanceCred}
|
||||
endorsement_cred = ${cfg.endorsementCred}
|
||||
@ -60,20 +60,32 @@ in
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
default = "/var/lib/tpm";
|
||||
default = "/var/lib/tpm";
|
||||
type = types.path;
|
||||
description = ''
|
||||
description = ''
|
||||
The location of the system persistent storage file.
|
||||
The system persistent storage file holds keys and data across
|
||||
restarts of the TCSD and system reboots.
|
||||
'';
|
||||
'';
|
||||
};
|
||||
|
||||
firmwarePCRs = mkOption {
|
||||
default = "0,1,2,3,4,5,6,7";
|
||||
type = types.string;
|
||||
description = "PCR indices used in the TPM for firmware measurements.";
|
||||
};
|
||||
|
||||
kernelPCRs = mkOption {
|
||||
default = "8,9,10,11,12";
|
||||
type = types.string;
|
||||
description = "PCR indices used in the TPM for kernel measurements.";
|
||||
};
|
||||
|
||||
platformCred = mkOption {
|
||||
default = "${cfg.stateDir}/platform.cert";
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to the platform credential for your TPM. Your TPM
|
||||
Path to the platform credential for your TPM. Your TPM
|
||||
manufacturer may have provided you with a set of credentials
|
||||
(certificates) that should be used when creating identities
|
||||
using your TPM. When a user of your TPM makes an identity,
|
||||
|
@ -171,25 +171,23 @@ in
|
||||
};
|
||||
|
||||
hardware.firmware = mkOption {
|
||||
type = types.listOf types.path;
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
description = ''
|
||||
List of directories containing firmware files. Such files
|
||||
List of packages containing firmware files. Such files
|
||||
will be loaded automatically if the kernel asks for them
|
||||
(i.e., when it has detected specific hardware that requires
|
||||
firmware to function). If more than one path contains a
|
||||
firmware file with the same name, the first path in the list
|
||||
takes precedence. Note that you must rebuild your system if
|
||||
you add files to any of these directories. For quick testing,
|
||||
firmware to function). If multiple packages contain firmware
|
||||
files with the same name, the first package in the list takes
|
||||
precedence. Note that you must rebuild your system if you add
|
||||
files to any of these directories. For quick testing,
|
||||
put firmware files in <filename>/root/test-firmware</filename>
|
||||
and add that directory to the list. Note that you can also
|
||||
add firmware packages to this list as these are directories in
|
||||
the nix store.
|
||||
and add that directory to the list.
|
||||
'';
|
||||
apply = list: pkgs.buildEnv {
|
||||
name = "firmware";
|
||||
paths = list;
|
||||
pathsToLink = [ "/" ];
|
||||
pathsToLink = [ "/lib/firmware" ];
|
||||
ignoreCollisions = true;
|
||||
};
|
||||
};
|
||||
@ -236,7 +234,7 @@ in
|
||||
(isYes "NET")
|
||||
];
|
||||
|
||||
boot.extraModprobeConfig = "options firmware_class path=${config.hardware.firmware}";
|
||||
boot.extraModprobeConfig = "options firmware_class path=${config.hardware.firmware}/lib/firmware";
|
||||
|
||||
system.activationScripts.udevd =
|
||||
''
|
||||
@ -254,7 +252,7 @@ in
|
||||
|
||||
# Allow the kernel to find our firmware.
|
||||
if [ -e /sys/module/firmware_class/parameters/path ]; then
|
||||
echo -n "${config.hardware.firmware}" > /sys/module/firmware_class/parameters/path
|
||||
echo -n "${config.hardware.firmware}/lib/firmware" > /sys/module/firmware_class/parameters/path
|
||||
fi
|
||||
'';
|
||||
|
||||
|
@ -10,7 +10,7 @@ let
|
||||
''
|
||||
base_dir = /var/run/dovecot2/
|
||||
|
||||
protocols = ${optionalString cfg.enableImap "imap"} ${optionalString cfg.enablePop3 "pop3"}
|
||||
protocols = ${optionalString cfg.enableImap "imap"} ${optionalString cfg.enablePop3 "pop3"} ${optionalString cfg.enableLmtp "lmtp"}
|
||||
''
|
||||
+ (if cfg.sslServerCert!="" then
|
||||
''
|
||||
@ -70,6 +70,11 @@ in
|
||||
description = "Start the IMAP listener (when Dovecot is enabled).";
|
||||
};
|
||||
|
||||
enableLmtp = mkOption {
|
||||
default = false;
|
||||
description = "Start the LMTP listener (when Dovecot is enabled).";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
default = "dovecot2";
|
||||
description = "Dovecot user name.";
|
||||
|
@ -11,7 +11,7 @@ let
|
||||
listCtl = domain: list: "${listDir domain list}/control";
|
||||
transport = domain: list: "${domain}--${list}@local.list.mlmmj mlmmj:${domain}/${list}";
|
||||
virtual = domain: list: "${list}@${domain} ${domain}--${list}@local.list.mlmmj";
|
||||
alias = domain: list: "${list}: \"|${pkgs.mlmmj}/mlmmj-receive -L ${listDir domain list}/\"";
|
||||
alias = domain: list: "${list}: \"|${pkgs.mlmmj}/bin/mlmmj-receive -L ${listDir domain list}/\"";
|
||||
subjectPrefix = list: "[${list}]";
|
||||
listAddress = domain: list: "${list}@${domain}";
|
||||
customHeaders = list: domain: [ "List-Id: ${list}" "Reply-To: ${list}@${domain}" ];
|
||||
|
@ -63,7 +63,7 @@ in {
|
||||
|
||||
package = mkOption {
|
||||
description = "Confd package to use.";
|
||||
default = pkgs.goPackages.confd;
|
||||
default = pkgs.confd;
|
||||
type = types.package;
|
||||
};
|
||||
};
|
||||
|
@ -122,14 +122,6 @@ in {
|
||||
mkdir -m 0700 -p ${cfg.dataDir}
|
||||
if [ "$(id -u)" = 0 ]; then chown etcd ${cfg.dataDir}; fi
|
||||
'';
|
||||
postStart = ''
|
||||
until ${pkgs.etcdctl}/bin/etcdctl set /nixos/state 'up'; do
|
||||
sleep 1;
|
||||
done
|
||||
until ${pkgs.etcdctl}/bin/etcdctl get /nixos/state | grep up; do
|
||||
sleep 1;
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.etcdctl ];
|
||||
|
@ -8,6 +8,8 @@ let
|
||||
|
||||
homeDir = "/var/lib/gitit";
|
||||
|
||||
toYesNo = b: if b then "yes" else "no";
|
||||
|
||||
gititShared = with cfg.haskellPackages; gitit + "/share/" + pkgs.stdenv.system + "-" + ghc.name + "/" + gitit.pname + "-" + gitit.version;
|
||||
|
||||
gititWithPkgs = hsPkgs: extras: hsPkgs.ghcWithPackages (self: with self; [ gitit ] ++ (extras self));
|
||||
@ -17,9 +19,6 @@ let
|
||||
in writeScript "gitit" ''
|
||||
#!${stdenv.shell}
|
||||
cd $HOME
|
||||
export PATH="${makeSearchPath "bin" (
|
||||
[ git curl ] ++ (if cfg.pdfExport == "yes" then [texLiveFull] else [])
|
||||
)}:$PATH";
|
||||
export NIX_GHC="${env}/bin/ghc"
|
||||
export NIX_GHCPKG="${env}/bin/ghc-pkg"
|
||||
export NIX_GHC_DOCDIR="${env}/share/doc/ghc/html"
|
||||
@ -27,11 +26,7 @@ let
|
||||
${env}/bin/gitit -f ${configFile}
|
||||
'';
|
||||
|
||||
gititOptions = let
|
||||
|
||||
yesNo = types.enum [ "yes" "no" ];
|
||||
|
||||
in {
|
||||
gititOptions = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
@ -40,7 +35,6 @@ let
|
||||
};
|
||||
|
||||
haskellPackages = mkOption {
|
||||
default = pkgs.haskellPackages;
|
||||
defaultText = "pkgs.haskellPackages";
|
||||
example = literalExample "pkgs.haskell.packages.ghc784";
|
||||
description = "haskellPackages used to build gitit and plugins.";
|
||||
@ -143,7 +137,6 @@ let
|
||||
|
||||
staticDir = mkOption {
|
||||
type = types.path;
|
||||
default = gititShared + "/data/static";
|
||||
description = ''
|
||||
Specifies the path of the static directory (containing javascript,
|
||||
css, and images). If it does not exist, gitit will create it and
|
||||
@ -204,8 +197,8 @@ let
|
||||
};
|
||||
|
||||
showLhsBirdTracks = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Specifies whether to show Haskell code blocks in "bird style", with
|
||||
"> " at the beginning of each line.
|
||||
@ -214,7 +207,6 @@ let
|
||||
|
||||
templatesDir = mkOption {
|
||||
type = types.path;
|
||||
default = gititShared + "/data/templates";
|
||||
description = ''
|
||||
Specifies the path of the directory containing page templates. If it
|
||||
does not exist, gitit will create it with default templates. Users
|
||||
@ -286,8 +278,8 @@ let
|
||||
};
|
||||
|
||||
tableOfContents = mkOption {
|
||||
type = yesNo;
|
||||
default = "yes";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Specifies whether to print a tables of contents (with links to
|
||||
sections) on each wiki page.
|
||||
@ -295,23 +287,18 @@ let
|
||||
};
|
||||
|
||||
plugins = mkOption {
|
||||
type = types.path;
|
||||
default = gititShared + "/plugins/Dot.hs";
|
||||
type = with types; listOf str;
|
||||
description = ''
|
||||
Specifies a list of plugins to load. Plugins may be specified either
|
||||
by their path or by their module name. If the plugin name starts
|
||||
Specifies a list of plugins to load. Plugins may be specified either
|
||||
by their path or by their module name. If the plugin name starts
|
||||
with Gitit.Plugin., gitit will assume that the plugin is an installed
|
||||
module and will not try to find a source file.
|
||||
Examples:
|
||||
plugins: plugins/DotPlugin.hs, CapitalizeEmphasisPlugin.hs
|
||||
plugins: plugins/DotPlugin
|
||||
plugins: Gitit.Plugin.InterwikiLinks
|
||||
'';
|
||||
};
|
||||
|
||||
useCache = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Specifies whether to cache rendered pages. Note that if use-feed is
|
||||
selected, feeds will be cached regardless of the value of use-cache.
|
||||
@ -342,14 +329,14 @@ let
|
||||
};
|
||||
|
||||
debugMode = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Causes debug information to be logged while gitit is running.";
|
||||
};
|
||||
|
||||
compressResponses = mkOption {
|
||||
type = yesNo;
|
||||
default = "yes";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Specifies whether HTTP responses should be compressed.";
|
||||
};
|
||||
|
||||
@ -361,16 +348,18 @@ let
|
||||
line of the file should contain two fields, separated by whitespace.
|
||||
The first field is the mime type, the second is a file extension.
|
||||
For example:
|
||||
video/x-ms-wmx wmx
|
||||
<programlisting>
|
||||
video/x-ms-wmx wmx
|
||||
</programlisting>
|
||||
If the file is not found, some simple defaults will be used.
|
||||
'';
|
||||
};
|
||||
|
||||
useReCaptcha = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
If "yes", causes gitit to use the reCAPTCHA service
|
||||
If true, causes gitit to use the reCAPTCHA service
|
||||
(http://recaptcha.net) to prevent bots from creating accounts.
|
||||
'';
|
||||
};
|
||||
@ -475,8 +464,8 @@ let
|
||||
};
|
||||
|
||||
useFeed = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Specifies whether an ATOM feed should be enabled (for the site and
|
||||
for individual pages).
|
||||
@ -488,19 +477,19 @@ let
|
||||
default = null;
|
||||
description = ''
|
||||
The base URL of the wiki, to be used in constructing feed IDs and RPX
|
||||
token_urls. Set this if use-feed is 'yes' or authentication-method
|
||||
token_urls. Set this if useFeed is false or authentication-method
|
||||
is 'rpx'.
|
||||
'';
|
||||
};
|
||||
|
||||
absoluteUrls = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Make wikilinks absolute with respect to the base-url. So, for
|
||||
example, in a wiki served at the base URL '/wiki', on a page
|
||||
Sub/Page, the wikilink '[Cactus]()' will produce a link to
|
||||
'/wiki/Cactus' if absolute-urls is 'yes', and a relative link to
|
||||
'/wiki/Cactus' if absoluteUrls is true, and a relative link to
|
||||
'Cactus' (referring to '/wiki/Sub/Cactus') if absolute-urls is 'no'.
|
||||
'';
|
||||
};
|
||||
@ -518,10 +507,10 @@ let
|
||||
};
|
||||
|
||||
pdfExport = mkOption {
|
||||
type = yesNo;
|
||||
default = "no";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
If yes, PDF will appear in export options. PDF will be created using
|
||||
If true, PDF will appear in export options. PDF will be created using
|
||||
pdflatex, which must be installed and in the path. Note that PDF
|
||||
exports create significant additional server load.
|
||||
'';
|
||||
@ -541,10 +530,10 @@ let
|
||||
};
|
||||
|
||||
xssSanitize = mkOption {
|
||||
type = yesNo;
|
||||
default = "yes";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
If yes, all HTML (including that produced by pandoc) is filtered
|
||||
If true, all HTML (including that produced by pandoc) is filtered
|
||||
through xss-sanitize. Set to no only if you trust all of your users.
|
||||
'';
|
||||
};
|
||||
@ -564,7 +553,7 @@ let
|
||||
default-page-type: ${cfg.defaultPageType}
|
||||
math: ${cfg.math}
|
||||
mathjax-script: ${cfg.mathJaxScript}
|
||||
show-lhs-bird-tracks: ${cfg.showLhsBirdTracks}
|
||||
show-lhs-bird-tracks: ${toYesNo cfg.showLhsBirdTracks}
|
||||
templates-dir: ${cfg.templatesDir}
|
||||
log-file: ${cfg.logFile}
|
||||
log-level: ${cfg.logLevel}
|
||||
@ -572,16 +561,16 @@ let
|
||||
no-delete: ${cfg.noDelete}
|
||||
no-edit: ${cfg.noEdit}
|
||||
default-summary: ${cfg.defaultSummary}
|
||||
table-of-contents: ${cfg.tableOfContents}
|
||||
plugins: ${cfg.plugins}
|
||||
use-cache: ${cfg.useCache}
|
||||
table-of-contents: ${toYesNo cfg.tableOfContents}
|
||||
plugins: ${concatStringsSep "," cfg.plugins}
|
||||
use-cache: ${toYesNo cfg.useCache}
|
||||
cache-dir: ${cfg.cacheDir}
|
||||
max-upload-size: ${cfg.maxUploadSize}
|
||||
max-page-size: ${cfg.maxPageSize}
|
||||
debug-mode: ${cfg.debugMode}
|
||||
compress-responses: ${cfg.compressResponses}
|
||||
debug-mode: ${toYesNo cfg.debugMode}
|
||||
compress-responses: ${toYesNo cfg.compressResponses}
|
||||
mime-types-file: ${cfg.mimeTypesFile}
|
||||
use-recaptcha: ${cfg.useReCaptcha}
|
||||
use-recaptcha: ${toYesNo cfg.useReCaptcha}
|
||||
recaptcha-private-key: ${toString cfg.reCaptchaPrivateKey}
|
||||
recaptcha-public-key: ${toString cfg.reCaptchaPublicKey}
|
||||
access-question: ${cfg.accessQuestion}
|
||||
@ -590,14 +579,14 @@ let
|
||||
rpx-key: ${toString cfg.rpxKey}
|
||||
mail-command: ${cfg.mailCommand}
|
||||
reset-password-message: ${cfg.resetPasswordMessage}
|
||||
use-feed: ${cfg.useFeed}
|
||||
use-feed: ${toYesNo cfg.useFeed}
|
||||
base-url: ${toString cfg.baseUrl}
|
||||
absolute-urls: ${cfg.absoluteUrls}
|
||||
absolute-urls: ${toYesNo cfg.absoluteUrls}
|
||||
feed-days: ${toString cfg.feedDays}
|
||||
feed-refresh-time: ${toString cfg.feedRefreshTime}
|
||||
pdf-export: ${cfg.pdfExport}
|
||||
pdf-export: ${toYesNo cfg.pdfExport}
|
||||
pandoc-user-data: ${toString cfg.pandocUserData}
|
||||
xss-sanitize: ${cfg.xssSanitize}
|
||||
xss-sanitize: ${toYesNo cfg.xssSanitize}
|
||||
'';
|
||||
|
||||
in
|
||||
@ -608,6 +597,13 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
services.gitit = {
|
||||
haskellPackages = mkDefault pkgs.haskellPackages;
|
||||
staticDir = gititShared + "/data/static";
|
||||
templatesDir = gititShared + "/data/templates";
|
||||
plugins = [ ];
|
||||
};
|
||||
|
||||
users.extraUsers.gitit = {
|
||||
group = config.users.extraGroups.gitit.name;
|
||||
description = "Gitit user";
|
||||
@ -625,8 +621,16 @@ in
|
||||
description = "Git and Pandoc Powered Wiki";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = with pkgs; [ curl ]
|
||||
++ optional cfg.pdfExport texLiveFull
|
||||
++ optional (cfg.repositoryType == "darcs") darcs
|
||||
++ optional (cfg.repositoryType == "mercurial") mercurial
|
||||
++ optional (cfg.repositoryType == "git") git;
|
||||
|
||||
preStart = with cfg; ''
|
||||
preStart = let
|
||||
gm = "gitit@${config.networking.hostName}";
|
||||
in
|
||||
with cfg; ''
|
||||
chown ${uid}:${gid} -R ${homeDir}
|
||||
for dir in ${repositoryPath} ${staticDir} ${templatesDir} ${cacheDir}
|
||||
do
|
||||
@ -638,14 +642,35 @@ in
|
||||
fi
|
||||
done
|
||||
cd ${repositoryPath}
|
||||
if [ ! -d .git ]
|
||||
then
|
||||
${pkgs.git}/bin/git init
|
||||
${pkgs.git}/bin/git config user.email "gitit@${config.networking.hostName}"
|
||||
${pkgs.git}/bin/git config user.name "gitit"
|
||||
chown ${uid}:${gid} -R {repositoryPath}
|
||||
fi
|
||||
cd -
|
||||
${
|
||||
if repositoryType == "darcs" then
|
||||
''
|
||||
if [ ! -d _darcs ]
|
||||
then
|
||||
${pkgs.darcs}/bin/darcs initialize
|
||||
echo "${gm}" > _darcs/prefs/email
|
||||
''
|
||||
else if repositoryType == "mercurial" then
|
||||
''
|
||||
if [ ! -d .hg ]
|
||||
then
|
||||
${pkgs.mercurial}/bin/hg init
|
||||
cat >> .hg/hgrc <<NAMED
|
||||
[ui]
|
||||
username = gitit ${gm}
|
||||
NAMED
|
||||
''
|
||||
else
|
||||
''
|
||||
if [ ! -d .git ]
|
||||
then
|
||||
${pkgs.git}/bin/git init
|
||||
${pkgs.git}/bin/git config user.email "${gm}"
|
||||
${pkgs.git}/bin/git config user.name "gitit"
|
||||
''}
|
||||
chown ${uid}:${gid} -R ${repositoryPath}
|
||||
fi
|
||||
cd -
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
|
@ -78,8 +78,8 @@ in
|
||||
description = ''
|
||||
This option defines the maximum number of jobs that Nix will try
|
||||
to build in parallel. The default is 1. You should generally
|
||||
set it to the number of CPUs in your system (e.g., 2 on an Athlon
|
||||
64 X2).
|
||||
set it to the total number of logical cores in your system (e.g., 16
|
||||
for two CPUs with 4 cores each and hyper-threading).
|
||||
'';
|
||||
};
|
||||
|
||||
@ -254,7 +254,7 @@ in
|
||||
|
||||
requireSignedBinaryCaches = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
default = true;
|
||||
description = ''
|
||||
If enabled, Nix will only download binaries from binary
|
||||
caches if they are cryptographically signed with any of the
|
||||
@ -309,6 +309,20 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
nixPath = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default =
|
||||
[ "/nix/var/nix/profiles/per-user/root/channels/nixos"
|
||||
"nixos-config=/etc/nixos/configuration.nix"
|
||||
"/nix/var/nix/profiles/per-user/root/channels"
|
||||
];
|
||||
description = ''
|
||||
The default Nix expression search path, used by the Nix
|
||||
evaluator to look up paths enclosed in angle brackets
|
||||
(e.g. <literal><nixpkgs></literal>).
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
@ -378,7 +392,9 @@ in
|
||||
};
|
||||
|
||||
# Set up the environment variables for running Nix.
|
||||
environment.sessionVariables = cfg.envVars;
|
||||
environment.sessionVariables = cfg.envVars //
|
||||
{ NIX_PATH = concatStringsSep ":" cfg.nixPath;
|
||||
};
|
||||
|
||||
environment.extraInit =
|
||||
''
|
||||
|
@ -52,7 +52,7 @@ in
|
||||
|
||||
systemd.services.nix-gc =
|
||||
{ description = "Nix Garbage Collector";
|
||||
script = "exec ${config.nix.package}/bin/nix-collect-garbage ${cfg.options}";
|
||||
script = "exec ${config.nix.package}/bin/nix-store --gc ${cfg.options}";
|
||||
startAt = optionalString cfg.automatic cfg.dates;
|
||||
};
|
||||
|
||||
|
@ -52,6 +52,7 @@ in
|
||||
TTYPath = "/dev/${cfg.tty}";
|
||||
TTYReset = true;
|
||||
TTYVTDisallocate = true;
|
||||
WorkingDirectory = "/tmp";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
@ -51,12 +51,37 @@ let
|
||||
# ganglia_port: 8651
|
||||
'';
|
||||
|
||||
diskConfig = pkgs.writeText "disk.yaml" ''
|
||||
init_config:
|
||||
|
||||
instances:
|
||||
- use_mount: no
|
||||
'';
|
||||
|
||||
networkConfig = pkgs.writeText "network.yaml" ''
|
||||
init_config:
|
||||
|
||||
instances:
|
||||
# Network check only supports one configured instance
|
||||
- collect_connection_state: false
|
||||
excluded_interfaces:
|
||||
- lo
|
||||
- lo0
|
||||
'';
|
||||
|
||||
postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig;
|
||||
nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig;
|
||||
|
||||
mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig;
|
||||
|
||||
etcfiles =
|
||||
[ { source = ddConf;
|
||||
target = "dd-agent/datadog.conf";
|
||||
}
|
||||
{ source = diskConfig;
|
||||
target = "dd-agent/conf.d/disk.yaml";
|
||||
}
|
||||
{ source = networkConfig;
|
||||
target = "dd-agent/conf.d/network.yaml";
|
||||
} ] ++
|
||||
(optional (cfg.postgresqlConfig != null)
|
||||
{ source = postgresqlConfig;
|
||||
@ -65,6 +90,10 @@ let
|
||||
(optional (cfg.nginxConfig != null)
|
||||
{ source = nginxConfig;
|
||||
target = "dd-agent/conf.d/nginx.yaml";
|
||||
}) ++
|
||||
(optional (cfg.mongoConfig != null)
|
||||
{ source = mongoConfig;
|
||||
target = "dd-agent/conf.d/mongo.yaml";
|
||||
});
|
||||
|
||||
in {
|
||||
@ -106,6 +135,12 @@ in {
|
||||
default = null;
|
||||
type = types.uniq (types.nullOr types.string);
|
||||
};
|
||||
|
||||
mongoConfig = mkOption {
|
||||
description = "MongoDB integration configuration";
|
||||
default = null;
|
||||
type = types.uniq (types.nullOr types.string);
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
@ -123,7 +158,7 @@ in {
|
||||
|
||||
systemd.services.dd-agent = {
|
||||
description = "Datadog agent monitor";
|
||||
path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps];
|
||||
path = [ pkgs."dd-agent" pkgs.python pkgs.sysstat pkgs.procps ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground";
|
||||
@ -132,7 +167,7 @@ in {
|
||||
Restart = "always";
|
||||
RestartSec = 2;
|
||||
};
|
||||
restartTriggers = [ pkgs.dd-agent ddConf postgresqlConfig nginxConfig ];
|
||||
restartTriggers = [ pkgs.dd-agent ddConf diskConfig networkConfig postgresqlConfig nginxConfig mongoConfig ];
|
||||
};
|
||||
|
||||
systemd.services.dogstatsd = {
|
||||
@ -149,7 +184,7 @@ in {
|
||||
RestartSec = 2;
|
||||
};
|
||||
environment.SSL_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt";
|
||||
restartTriggers = [ pkgs.dd-agent ddConf postgresqlConfig nginxConfig ];
|
||||
restartTriggers = [ pkgs.dd-agent ddConf diskConfig networkConfig postgresqlConfig nginxConfig mongoConfig ];
|
||||
};
|
||||
|
||||
environment.etc = etcfiles;
|
||||
|
@ -200,13 +200,13 @@ in {
|
||||
|
||||
staticRootPath = mkOption {
|
||||
description = "Root path for static assets.";
|
||||
default = "${cfg.package}/share/go/src/github.com/grafana/grafana/public";
|
||||
default = "${cfg.package.out}/share/go/src/github.com/grafana/grafana/public";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
description = "Package to use.";
|
||||
default = pkgs.goPackages.grafana;
|
||||
default = pkgs.grafana-backend;
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
@ -319,7 +319,7 @@ in {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["networking.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/grafana --config ${cfgFile} web";
|
||||
ExecStart = "${cfg.package-backend}/bin/grafana --config ${cfgFile} web";
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "grafana";
|
||||
};
|
||||
|
@ -20,9 +20,11 @@ let
|
||||
cfg.collectors)}
|
||||
'';
|
||||
|
||||
cmdLineOpts = concatStringsSep " " (
|
||||
[ "-h=${cfg.bosunHost}" "-c=${collectors}" ] ++ cfg.extraOpts
|
||||
);
|
||||
conf = pkgs.writeText "scollector.toml" ''
|
||||
Host = "${cfg.bosunHost}"
|
||||
ColDir = "${collectors}"
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
@ -92,6 +94,14 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra scollector configuration added to the end of scollector.toml
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
@ -108,7 +118,7 @@ in {
|
||||
PermissionsStartOnly = true;
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = "${cfg.package}/bin/scollector ${cmdLineOpts}";
|
||||
ExecStart = "${cfg.package}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -4,8 +4,66 @@ with lib;
|
||||
|
||||
let
|
||||
|
||||
host = config.networking.hostName or "unknown"
|
||||
+ optionalString (config.networking.domain != null) ".${config.networking.domain}";
|
||||
|
||||
cfg = config.services.smartd;
|
||||
|
||||
nm = cfg.notifications.mail;
|
||||
nw = cfg.notifications.wall;
|
||||
nx = cfg.notifications.x11;
|
||||
|
||||
smartdNotify = pkgs.writeScript "smartd-notify.sh" ''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
${optionalString nm.enable ''
|
||||
{
|
||||
cat << EOF
|
||||
From: smartd on ${host} <root>
|
||||
To: undisclosed-recipients:;
|
||||
Subject: SMART error on $SMARTD_DEVICESTRING: $SMARTD_FAILTYPE
|
||||
|
||||
$SMARTD_FULLMESSAGE
|
||||
EOF
|
||||
|
||||
${pkgs.smartmontools}/sbin/smartctl -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE"
|
||||
} | ${nm.mailer} -i "${nm.recipient}"
|
||||
''}
|
||||
${optionalString nw.enable ''
|
||||
{
|
||||
cat << EOF
|
||||
Problem detected with disk: $SMARTD_DEVICESTRING
|
||||
Warning message from smartd is:
|
||||
|
||||
$SMARTD_MESSAGE
|
||||
EOF
|
||||
} | ${pkgs.utillinux}/bin/wall 2>/dev/null
|
||||
''}
|
||||
${optionalString nx.enable ''
|
||||
export DISPLAY=${nx.display}
|
||||
{
|
||||
cat << EOF
|
||||
Problem detected with disk: $SMARTD_DEVICESTRING
|
||||
Warning message from smartd is:
|
||||
|
||||
$SMARTD_FULLMESSAGE
|
||||
EOF
|
||||
} | ${pkgs.xorg.xmessage}/bin/xmessage -file - 2>/dev/null &
|
||||
''}
|
||||
'';
|
||||
|
||||
notifyOpts = optionalString (nm.enable || nw.enable || nx.enable)
|
||||
("-m <nomailer> -M exec ${smartdNotify} " + optionalString cfg.notifications.test "-M test ");
|
||||
|
||||
smartdConf = pkgs.writeText "smartd.conf" ''
|
||||
# Autogenerated smartd startup config file
|
||||
DEFAULT ${notifyOpts}${cfg.defaults.monitored}
|
||||
|
||||
${concatMapStringsSep "\n" (d: "${d.device} ${d.options}") cfg.devices}
|
||||
|
||||
${optionalString cfg.autodetect
|
||||
"DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"}
|
||||
'';
|
||||
|
||||
smartdOpts = { name, ... }: {
|
||||
|
||||
options = {
|
||||
@ -22,34 +80,11 @@ let
|
||||
type = types.separatedString " ";
|
||||
description = "Options that determine how smartd monitors the device.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
smartdMail = pkgs.writeScript "smartdmail.sh" ''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
TMPNAM=/tmp/smartd-message.$$.tmp
|
||||
if test -n "$SMARTD_ADDRESS"; then
|
||||
echo >"$TMPNAM" "From: smartd <root>"
|
||||
echo >>"$TMPNAM" 'To: undisclosed-recipients:;'
|
||||
echo >>"$TMPNAM" "Subject: $SMARTD_SUBJECT"
|
||||
echo >>"$TMPNAM"
|
||||
echo >>"$TMPNAM" "Failure on $SMARTD_DEVICESTRING: $SMARTD_FAILTYPE"
|
||||
echo >>"$TMPNAM"
|
||||
cat >>"$TMPNAM"
|
||||
${pkgs.smartmontools}/sbin/smartctl >>"$TMPNAM" -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE"
|
||||
/var/setuid-wrappers/sendmail <"$TMPNAM" -f "$SENDER" -i "$SMARTD_ADDRESS"
|
||||
fi
|
||||
'';
|
||||
|
||||
smartdConf = pkgs.writeText "smartd.conf" (concatMapStrings (device:
|
||||
''
|
||||
${device.device} -a -m root -M exec ${smartdMail} ${device.options} ${cfg.deviceOpts}
|
||||
''
|
||||
) cfg.devices);
|
||||
|
||||
smartdFlags = if (cfg.devices == []) then "" else "--configfile=${smartdConf}";
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
@ -59,26 +94,104 @@ in
|
||||
|
||||
services.smartd = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
enable = mkEnableOption "smartd daemon from <literal>smartmontools</literal> package";
|
||||
|
||||
autodetect = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
example = true;
|
||||
description = ''
|
||||
Run smartd from the smartmontools package. Note that e-mail
|
||||
notifications will not be enabled unless you configure the list of
|
||||
devices with <varname>services.smartd.devices</varname> as well.
|
||||
Whenever smartd should monitor all devices connected to the
|
||||
machine at the time it's being started (the default).
|
||||
|
||||
Set to false to monitor the devices listed in
|
||||
<option>services.smartd.devices</option> only.
|
||||
'';
|
||||
};
|
||||
|
||||
deviceOpts = mkOption {
|
||||
default = "";
|
||||
type = types.string;
|
||||
example = "-o on -s (S/../.././02|L/../../7/04)";
|
||||
description = ''
|
||||
Additional options for each device that is monitored. The example
|
||||
turns on SMART Automatic Offline Testing on startup, and schedules short
|
||||
self-tests daily, and long self-tests weekly.
|
||||
'';
|
||||
notifications = {
|
||||
|
||||
mail = {
|
||||
enable = mkOption {
|
||||
default = config.services.mail.sendmailSetuidWrapper != null;
|
||||
type = types.bool;
|
||||
description = "Whenever to send e-mail notifications.";
|
||||
};
|
||||
|
||||
recipient = mkOption {
|
||||
default = "root";
|
||||
type = types.string;
|
||||
description = "Recipient of the notification messages.";
|
||||
};
|
||||
|
||||
mailer = mkOption {
|
||||
default = "/var/setuid-wrappers/sendmail";
|
||||
type = types.path;
|
||||
description = ''
|
||||
Sendmail-compatible binary to be used to send the messages.
|
||||
|
||||
You should probably enable
|
||||
<option>services.postfix</option> or some other MTA for
|
||||
this to work.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
wall = {
|
||||
enable = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
description = "Whenever to send wall notifications to all users.";
|
||||
};
|
||||
};
|
||||
|
||||
x11 = {
|
||||
enable = mkOption {
|
||||
default = config.services.xserver.enable;
|
||||
type = types.bool;
|
||||
description = "Whenever to send X11 xmessage notifications.";
|
||||
};
|
||||
|
||||
display = mkOption {
|
||||
default = ":${toString config.services.xserver.display}";
|
||||
type = types.string;
|
||||
description = "DISPLAY to send X11 notifications to.";
|
||||
};
|
||||
};
|
||||
|
||||
test = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "Whenever to send a test notification on startup.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
defaults = {
|
||||
monitored = mkOption {
|
||||
default = "-a";
|
||||
type = types.separatedString " ";
|
||||
example = "-a -o on -s (S/../.././02|L/../../7/04)";
|
||||
description = ''
|
||||
Common default options for explicitly monitored (listed in
|
||||
<option>services.smartd.devices</option>) devices.
|
||||
|
||||
The default value turns on monitoring of all the things (see
|
||||
<literal>man 5 smartd.conf</literal>).
|
||||
|
||||
The example also turns on SMART Automatic Offline Testing on
|
||||
startup, and schedules short self-tests daily, and long
|
||||
self-tests weekly.
|
||||
'';
|
||||
};
|
||||
|
||||
autodetected = mkOption {
|
||||
default = cfg.defaults.monitored;
|
||||
type = types.separatedString " ";
|
||||
description = ''
|
||||
Like <option>services.smartd.defaults.monitored</option>, but for the
|
||||
autodetected devices.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
devices = mkOption {
|
||||
@ -86,14 +199,9 @@ in
|
||||
example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ];
|
||||
type = types.listOf types.optionSet;
|
||||
options = [ smartdOpts ];
|
||||
description = ''
|
||||
List of devices to monitor. By default -- if this list is empty --,
|
||||
smartd will monitor all devices connected to the machine at the time
|
||||
it's being run. Configuring this option has the added benefit of
|
||||
enabling e-mail notifications to "root" every time smartd detects an
|
||||
error.
|
||||
'';
|
||||
};
|
||||
description = "List of devices to monitor.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
@ -103,12 +211,19 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [ {
|
||||
assertion = cfg.autodetect || cfg.devices != [];
|
||||
message = "smartd can't run with both disabled autodetect and an empty list of devices to monitor.";
|
||||
} ];
|
||||
|
||||
systemd.services.smartd = {
|
||||
description = "S.M.A.R.T. Daemon";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd --no-fork ${smartdFlags}";
|
||||
path = [ pkgs.nettools ]; # for hostname and dnsdomanname calls in smartd
|
||||
|
||||
serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd --no-fork --configfile=${smartdConf}";
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -88,10 +88,7 @@ in
|
||||
|
||||
environment.systemPackages = [ pkgs.nfs-utils ];
|
||||
|
||||
environment.etc = singleton
|
||||
{ source = exports;
|
||||
target = "exports";
|
||||
};
|
||||
environment.etc.exports.source = exports;
|
||||
|
||||
boot.kernelModules = [ "nfsd" ];
|
||||
|
||||
|
@ -1,53 +1,53 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.copy-com;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
|
||||
services.copy-com = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = "
|
||||
Enable the copy.com client.
|
||||
|
||||
The first time copy.com is run, it needs to be configured. Before enabling run
|
||||
copy_console manually.
|
||||
";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = "The user for which copy should run.";
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
default = false;
|
||||
description = "Output more.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.postfix ];
|
||||
|
||||
systemd.services."copy-com-${cfg.user}" = {
|
||||
description = "Copy.com Client";
|
||||
after = [ "network.target" "local-fs.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.copy-com}/bin/copy_console ${if cfg.debug then "-consoleOutput -debugToConsole=dirwatch,path-watch,csm_path,csm -debug -console" else ""}";
|
||||
User = "${cfg.user}";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.copy-com;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
|
||||
services.copy-com = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = "
|
||||
Enable the Copy.com client.
|
||||
NOTE: before enabling the client for the first time, it must be
|
||||
configured by first running CopyConsole (command line) or CopyAgent
|
||||
(graphical) as the appropriate user.
|
||||
";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = "The user for which the Copy.com client should be run.";
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
default = false;
|
||||
description = "Output more (debugging) messages to the console.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.postfix ];
|
||||
|
||||
systemd.services."copy-com-${cfg.user}" = {
|
||||
description = "Copy.com client";
|
||||
after = [ "network.target" "local-fs.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.copy-com}/bin/CopyConsole ${if cfg.debug then "-consoleOutput -debugToConsole=dirwatch,path-watch,csm_path,csm -debug -console" else ""}";
|
||||
User = "${cfg.user}";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,7 @@ in
|
||||
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
|
||||
ExecReload = "${dnsmasq}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
restartTriggers = [ config.environment.etc.hosts.source ];
|
||||
};
|
||||
|
||||
};
|
||||
|
60
nixos/modules/services/networking/fan.nix
Normal file
60
nixos/modules/services/networking/fan.nix
Normal file
@ -0,0 +1,60 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.networking.fan;
|
||||
modprobe = "${config.system.sbin.modprobe}/sbin/modprobe";
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
networking.fan = {
|
||||
|
||||
enable = mkEnableOption "FAN Networking";
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.fanctl ];
|
||||
|
||||
systemd.services.fan = {
|
||||
description = "FAN Networking";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
before = [ "docker.service" ];
|
||||
restartIfChanged = false;
|
||||
preStart = ''
|
||||
if [ ! -f /proc/sys/net/fan/version ]; then
|
||||
${modprobe} ipip
|
||||
if [ ! -f /proc/sys/net/fan/version ]; then
|
||||
echo "The Fan Networking patches have not been applied to this kernel!" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
mkdir -p /var/lib/fan-networking
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = "${pkgs.fanctl}/bin/fanctl up -a";
|
||||
ExecStop = "${pkgs.fanctl}/bin/fanctl down -a";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -420,6 +420,16 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.extraPackages = mkOption {
|
||||
default = [ ];
|
||||
example = [ pkgs.ipset ];
|
||||
description =
|
||||
''
|
||||
Additional packages to be included in the environment of the system
|
||||
as well as the path of networking.firewall.extraCommands.
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.extraStopCommands = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
@ -443,7 +453,7 @@ in
|
||||
|
||||
networking.firewall.trustedInterfaces = [ "lo" ];
|
||||
|
||||
environment.systemPackages = [ pkgs.iptables pkgs.ipset ];
|
||||
environment.systemPackages = [ pkgs.iptables ] ++ cfg.extraPackages;
|
||||
|
||||
boot.kernelModules = map (x: "nf_conntrack_${x}") cfg.connectionTrackingModules;
|
||||
boot.extraModprobeConfig = optionalString (!cfg.autoLoadConntrackHelpers) ''
|
||||
@ -462,7 +472,7 @@ in
|
||||
before = [ "network-pre.target" ];
|
||||
after = [ "systemd-modules-load.service" ];
|
||||
|
||||
path = [ pkgs.iptables pkgs.ipset ];
|
||||
path = [ pkgs.iptables ] ++ cfg.extraPackages;
|
||||
|
||||
# FIXME: this module may also try to load kernel modules, but
|
||||
# containers don't have CAP_SYS_MODULE. So the host system had
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user