setcap-wrapper: Merging with upstream master and resolving conflicts

This commit is contained in:
Parnell Springmeyer 2017-01-25 11:08:05 -08:00
commit bae00e8aa8
No known key found for this signature in database
GPG Key ID: DCCF89258EAD874A
4612 changed files with 200761 additions and 124566 deletions

24
.editorconfig Normal file
View File

@ -0,0 +1,24 @@
# EditorConfig configuration for nixpkgs
# http://EditorConfig.org
# Top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file, utf-8 charset
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8
# see https://nixos.org/nixpkgs/manual/#chap-conventions
# Match nix/ruby files, set indent to spaces with width of two
[*.{nix,rb}]
indent_style = space
indent_size = 2
# Match shell/python/perl scripts, set indent to spaces with width of four
[*.{sh,py,pl}]
indent_style = space
indent_size = 4

View File

@ -28,5 +28,8 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
* Not start with the package name
* Not have a dot at the end
See the nixpkgs manual for more details on how to [Submit changes to nixpkgs](http://hydra.nixos.org/job/nixpkgs/trunk/manual/latest/download-by-type/doc/manual#chap-submitting-changes).
See the nixpkgs manual for more details on how to [Submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes).
## Reviewing contributions
See the nixpkgs manual for more details on how to [Review contributions](https://nixos.org/nixpkgs/manual/#sec-reviewing-contributions).

View File

@ -4,12 +4,12 @@
###### Things done
- [ ] Tested using sandboxing
([nix.useChroot](http://nixos.org/nixos/manual/options.html#opt-nix.useChroot) on NixOS,
or option `build-use-chroot` in [`nix.conf`](http://nixos.org/nix/manual/#sec-conf-file)
([nix.useSandbox](http://nixos.org/nixos/manual/options.html#opt-nix.useSandbox) on NixOS,
or option `build-use-sandbox` in [`nix.conf`](http://nixos.org/nix/manual/#sec-conf-file)
on non-NixOS)
- Built on platform(s)
- [ ] NixOS
- [ ] OS X
- [ ] macOS
- [ ] Linux
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)

View File

@ -1,6 +1,13 @@
{
"userBlacklist": [
"civodul",
"jhasse"
]
"jhasse",
"shlevy"
],
"alwaysNotifyForPaths": [
{ "name": "FRidh", "files": ["pkgs/top-level/python-packages.nix", "pkgs/development/interpreters/python/*", "pkgs/development/python-modules/*" ] },
{ "name": "LnL7", "files": ["pkgs/stdenv/darwin/*", "pkgs/os-specific/darwin/*"] },
{ "name": "copumpkin", "files": ["pkgs/stdenv/darwin/*", "pkgs/os-specific/darwin/apple-source-releases/*"] }
],
"fileBlacklist": ["pkgs/top-level/all-packages.nix"]
}

View File

@ -4,7 +4,7 @@ matrix:
- os: linux
sudo: false
script:
- ./maintainers/scripts/travis-nox-review-pr.sh nixpkgs-verify nixpkgs-manual nixpkgs-tarball
- ./maintainers/scripts/travis-nox-review-pr.sh nixpkgs-verify nixpkgs-manual nixpkgs-tarball nixpkgs-unstable
- ./maintainers/scripts/travis-nox-review-pr.sh nixos-options nixos-manual
- os: linux
sudo: required
@ -15,8 +15,6 @@ matrix:
- os: osx
osx_image: xcode7.3
script: ./maintainers/scripts/travis-nox-review-pr.sh nox pr
git:
depth: 1
env:
global:
- GITHUB_TOKEN=5edaaf1017f691ed34e7f80878f8f5fbd071603f

View File

@ -1 +1 @@
16.09
17.03

View File

@ -1,4 +1,4 @@
Copyright (c) 2003-2016 Eelco Dolstra and the Nixpkgs/NixOS contributors
Copyright (c) 2003-2017 Eelco Dolstra and the Nixpkgs/NixOS contributors
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the

View File

@ -2,8 +2,6 @@
[![Build Status](https://travis-ci.org/NixOS/nixpkgs.svg?branch=master)](https://travis-ci.org/NixOS/nixpkgs)
[![Code Triagers Badge](https://www.codetriage.com/nixos/nixpkgs/badges/users.svg)](https://www.codetriage.com/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/pr?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/issue?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
Nixpkgs is a collection of packages for the [Nix](https://nixos.org/nix/) package
manager. It is periodically built and tested by the [hydra](http://hydra.nixos.org/)
@ -15,12 +13,12 @@ build daemon as so-called channels. To get channel information via git, add
```
For stability and maximum binary package support, it is recommended to maintain
custom changes on top of one of the channels, e.g. `nixos-16.03` for the latest
custom changes on top of one of the channels, e.g. `nixos-16.09` for the latest
release and `nixos-unstable` for the latest successful build of master:
```
% git remote update channels
% git rebase channels/nixos-16.03
% git rebase channels/nixos-16.09
```
For pull-requests, please rebase onto nixpkgs `master`.
@ -34,9 +32,9 @@ For pull-requests, please rebase onto nixpkgs `master`.
* [Manual (NixOS)](https://nixos.org/nixos/manual/)
* [Nix Wiki](https://nixos.org/wiki/) (deprecated, see milestone ["Move the Wiki!"](https://github.com/NixOS/nixpkgs/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Move+the+wiki%21%22))
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Continuous package builds for 16.03 release](https://hydra.nixos.org/jobset/nixos/release-16.03)
* [Continuous package builds for 16.09 release](https://hydra.nixos.org/jobset/nixos/release-16.09)
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Tests for 16.03 release](https://hydra.nixos.org/job/nixos/release-16.03/tested#tabs-constituents)
* [Tests for 16.09 release](https://hydra.nixos.org/job/nixos/release-16.09/tested#tabs-constituents)
Communication:

View File

@ -60,6 +60,10 @@ pkgs.stdenv.mkDerivation {
inputFile = ../pkgs/development/idris-modules/README.md;
outputFile = "languages-frameworks/idris.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/node-packages/README.md;
outputFile = "languages-frameworks/node.xml";
}
+ toDocbook {
inputFile = ../pkgs/development/r-modules/README.md;
outputFile = "languages-frameworks/r.xml";
@ -93,7 +97,9 @@ pkgs.stdenv.mkDerivation {
cp -r $dst/images $dst/epub/OEBPS
echo "application/epub+zip" > mimetype
zip -0Xq "$dst/Nixpkgs Contributors Guide - NixOS community.epub" mimetype
zip -Xr9D "$dst/Nixpkgs Contributors Guide - NixOS community.epub" $dst/epub/*
manual="$dst/nixpkgs-manual.epub"
zip -0Xq "$manual" mimetype
cd $dst/epub && zip -Xr9D "$manual" *
rm -rf $dst/epub
'';
}

View File

@ -8,177 +8,295 @@
The nixpkgs repository has several utility functions to manipulate Nix expressions.
</para>
<section xml:id="sec-pkgs-overridePackages">
<title>pkgs.overridePackages</title>
<section xml:id="sec-overrides">
<title>Overriding</title>
<para>
This function inside the nixpkgs expression (<varname>pkgs</varname>)
can be used to override the set of packages itself.
</para>
<para>
Warning: this function is expensive and must not be used from within
the nixpkgs repository.
</para>
<para>
Example usage:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; {};
newpkgs = pkgs.overridePackages (self: super: {
foo = super.foo.override { ... };
};
in ...</programlisting>
Sometimes one wants to override parts of
<literal>nixpkgs</literal>, e.g. derivation attributes, the results of
derivations or even the whole package set.
</para>
<para>
The resulting <varname>newpkgs</varname> will have the new <varname>foo</varname>
expression, and all other expressions depending on <varname>foo</varname> will also
use the new <varname>foo</varname> expression.
</para>
<section xml:id="sec-pkgs-overridePackages">
<title>pkgs.overridePackages</title>
<para>
The behavior of this function is similar to <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>.
</para>
<para>
The <varname>self</varname> parameter refers to the final package set with the
applied overrides. Using this parameter may lead to infinite recursion if not
used consciously.
</para>
<para>
The <varname>super</varname> parameter refers to the old package set.
It's equivalent to <varname>pkgs</varname> in the above example.
</para>
</section>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>pkgs.overridePackages (self: super: {
foo = super.foo.override { barSupport = true ; };
})</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
})</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
This function inside the nixpkgs expression (<varname>pkgs</varname>)
can be used to override the set of packages itself.
</para>
</note>
<para>
Warning: this function is expensive and must not be used from within
the nixpkgs repository.
</para>
<para>
Example usage:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; {};
newpkgs = pkgs.overridePackages (self: super: {
foo = super.foo.override { ... };
};
in ...</programlisting>
</para>
<para>
The resulting <varname>newpkgs</varname> will have the new <varname>foo</varname>
expression, and all other expressions depending on <varname>foo</varname> will also
use the new <varname>foo</varname> expression.
</para>
<para>
The behavior of this function is similar to <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>.
</para>
<para>
The <varname>self</varname> parameter refers to the final package set with the
applied overrides. Using this parameter may lead to infinite recursion if not
used consciously.
</para>
<para>
The <varname>super</varname> parameter refers to the old package set.
It's equivalent to <varname>pkgs</varname> in the above example.
</para>
<para>
Note that in previous versions of nixpkgs, this method replaced any changes from <link
linkend="sec-modify-via-packageOverrides">config.packageOverrides</link>,
along with that from previous calls if this function was called repeatedly.
Now those previous changes will be preserved so this function can be "chained" meaningfully.
To recover the old behavior, make sure <varname>config.packageOverrides</varname> is unset,
and call this only once off a "freshly" imported nixpkgs:
<programlisting>let
pkgs = import &lt;nixpkgs&gt; { config: {}; };
newpkgs = pkgs.overridePackages ...;
in ...</programlisting>
</para>
</section>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the
derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<programlisting>pkgs.overridePackages (self: super: {
foo = super.foo.override { barSupport = true ; };
})</programlisting>
<programlisting>mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
})</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call
with some default arguments, usually a derivation.
Using <varname>pkgs.foo.override</varname> will call the same function with
the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the
attribute set passed to a <varname>stdenv.mkDerivation</varname> call,
producing a new derivation based on the original one.
This function is available on all derivations produced by the
<varname>stdenv.mkDerivation</varname> function, which is most packages
in the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is
overriden to be true, thus building debug info for
<varname>helloWithDebug</varname>, while all other attributes will be
retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to
the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the
<varname>stdenv.mkDerivation</varname> function, not the generated, raw
Nix derivation. Thus, using <varname>overrideDerivation</varname> will
not work in this case, as it overrides only the attributes of the final
derivation. It is for this reason that <varname>overrideAttrs</varname>
should be preferred in (almost) all cases to
<varname>overrideDerivation</varname>, i.e. to allow using
<varname>sdenv.mkDerivation</varname> to process input arguments, as well
as the fact that it is easier to use (you can use the same attribute
names you see in your Nix code, instead of the ones generated (e.g.
<varname>buildInputs</varname> vs <varname>nativeBuildInputs</varname>,
and involves less typing.
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>You should prefer <varname>overrideAttrs</varname> in almost all
cases, see its documentation for the reasons why.
<varname>overrideDerivation</varname> is not deprecated and will continue
to work, but is less nice to use and does not have as many abilities as
<varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>Do not use this function in Nixpkgs as it evaluates a Derivation
before modifying it, which breaks package abstraction and removes
error-checking of function arguments. In addition, this
evaluation-per-function application incurs a performance penalty,
which can become a problem if many overrides are used.
It is only intended for ad-hoc customisation, such as in
<filename>~/.nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation
based on an existing one by overriding the original's attributes with
the attribute set produced by the specified function.
This function is available on all
derivations defined using the <varname>makeOverridable</varname> function.
Most standard derivation-producing functions, such as
<varname>stdenv.mkDerivation</varname>, are defined using this
function, which means most packages in the nixpkgs expression,
<varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>,
and <varname>patches</varname> of the derivation will be overridden, while
all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of
the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by
the <varname>overrideDerivation</varname> function.
For example, the <varname>name</varname> attribute reference
in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname>
is filled-in *before* the <varname>overrideDerivation</varname> function
modifies the attribute set. This means that overriding the
<varname>name</varname> attribute, in this example, *will not* change the
value of the <varname>url</varname> attribute. Instead, we need to override
both the <varname>name</varname> *and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<section xml:id="sec-generators">
<title>Generators</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result
of a function easily customizable. This utility only makes sense for functions
that accept an argument set and return an attribute set.
Generators are functions that create file formats from nix
data structures, e.g. for configuration files.
There are generators available for: <literal>INI</literal>,
<literal>JSON</literal> and <literal>YAML</literal>
</para>
<para>
Example usage:
<programlisting>f = { a, b }: { result = a+b; }
c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
All generators follow a similar call interface: <code>generatorName
configFunctions data</code>, where <literal>configFunctions</literal> is a
set of user-defined functions that format variable parts of the content.
They each have common defaults, so often they do not need to be set
manually. An example is <code>mkSectionName ? (name: libStr.escape [ "[" "]"
] name)</code> from the <literal>INI</literal> generator. It gets the name
of a section and returns a sanitized name. The default
<literal>mkSectionName</literal> escapes <literal>[</literal> and
<literal>]</literal> with a backslash.
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function
applied with some default arguments. Hence the value of <varname>c.result</varname>
is <literal>3</literal>, in this example.
</para>
<note><para>Nix store paths can be converted to strings by enclosing a
derivation attribute like so: <code>"${drv}"</code>.</para></note>
<para>
The variable <varname>c</varname> however also has some additional functions, like
<link linkend="sec-pkg-override">c.override</link> which can be used to
override the default arguments. In this example the value of
<varname>(c.override { a = 4; }).result</varname> is 6.
Detailed documentation for each generator can be found in
<literal>lib/generators.nix</literal>.
</para>
</section>
@ -295,37 +413,37 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</section>
<section xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<title>pkgs.dockerTools</title>
<para>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#docker-image-specification-v100">
Docker Image Specification v1.0.0
Docker Image Specification v1.0.0
</link>. Docker itself is not used to perform any of the operations done by these
functions.
</para>
</para>
<warning>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are
described below:
The parameters of <varname>buildImage</varname> with relative example values are
described below:
</para>
<example xml:id='ex-dockerTools-buildImage'><title>Docker build</title>
@ -333,11 +451,11 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
@ -356,131 +474,131 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<para>The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal>
device to be available.
Using this parameter requires the <literal>kvm</literal>
device to be available.
</para>
</note>
</note>
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
Docker Image Specification v1.0.0
</link>.
</link>.
</para>
</callout>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
</para>
<para>
Its parameters are described in the example below:
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'><title>Docker pull</title>
@ -498,73 +616,73 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
</para>
<note>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
</note>
</callout>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
In the above example the default values are shown for the variables
<varname>indexUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
In the above example the default values are shown for the variables
<varname>indexUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
</para>
</callout>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
</para>
<note>
<para>
<para>
Using this function requires the <literal>kvm</literal>
device to be available.
</para>
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'><title>Docker export</title>
@ -573,35 +691,35 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'><title>Shadow base files</title>
<programlisting>
buildImage {
@ -620,13 +738,13 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>
</section>
</chapter>

View File

@ -248,7 +248,7 @@ $ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA beamPackages.ibrowse
development. Many times we need to create a
<literal>shell.nix</literal> file and do our development inside
of the environment specified by that file. This file looks a lot
like the packageing described above. The main difference is that
like the packaging described above. The main difference is that
<literal>src</literal> points to project root and we call the
package directly.
</para>

View File

@ -24,7 +24,7 @@ deis = buildGoPackage rec {
sha256 = "1qv9lxqx7m18029lj8cw3k7jngvxs4iciwrypdy0gd2nnghc68sw";
};
goDeps = ./deps.json; <co xml:id='ex-buildGoPackage-3' />
goDeps = ./deps.nix; <co xml:id='ex-buildGoPackage-3' />
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-4' />
}
@ -56,7 +56,9 @@ the following arguments are of special significance to the function:
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goDeps</varname> is where the Go dependencies of a Go program are listed
in a JSON format described below.
as a list of package source identified by Go import path.
It could be imported as a separate <varname>deps.nix</varname> file for
readability. The dependency data structure is described below.
</para>
</callout>
@ -70,23 +72,32 @@ the following arguments are of special significance to the function:
</para>
<para>The <varname>goDeps</varname> attribute should point to a JSON file that defines which Go libraries
are needed and should be included in <varname>GOPATH</varname> for <varname>buildPhase</varname>.
<para>The <varname>goDeps</varname> attribute can be imported from a separate
<varname>nix</varname> file that defines which Go libraries are needed and should
be included in <varname>GOPATH</varname> for <varname>buildPhase</varname>.
</para>
<example xml:id='ex-goDeps'><title>deps.json</title>
<example xml:id='ex-goDeps'><title>deps.nix</title>
<programlisting>
[ <co xml:id='ex-goDeps-1' />
{
"goPackagePath": "gopkg.in/yaml.v2", <co xml:id='ex-goDeps-2' />
"fetch": {
"type": "git", <co xml:id='ex-goDeps-3' />
"url": "https://gopkg.in/yaml.v2",
"rev": "a83829b6f1293c91addabc89d0571c246397bbf4",
"sha256": "1m4dsmk90sbi17571h6pld44zxz7jc4lrnl4f27dpd1l8g5xvjhh"
}
}
{
goPackagePath = "gopkg.in/yaml.v2"; <co xml:id='ex-goDeps-2' />
fetch = {
type = "git"; <co xml:id='ex-goDeps-3' />
url = "https://gopkg.in/yaml.v2";
rev = "a83829b6f1293c91addabc89d0571c246397bbf4";
sha256 = "1m4dsmk90sbi17571h6pld44zxz7jc4lrnl4f27dpd1l8g5xvjhh";
};
}
{
goPackagePath = "github.com/docopt/docopt-go";
fetch = {
type = "git";
url = "https://github.com/docopt/docopt-go";
rev = "784ddc588536785e7299f7272f39101f7faccc3f";
sha256 = "0wwz48jl9fvl1iknvn9dqr4gfy1qs03gxaikrxxp9gry6773v3sj";
};
}
]
</programlisting>
</example>

View File

@ -383,7 +383,7 @@ You can select a particular GHC version to compile with by setting the
Stack choose what GHC version it wants based on the snapshot specified
in `stack.yaml` (only works with Stack >= 1.1.3):
{nixpkgs ? import <nixpkgs> { }, ghc ? nixpkgs.ghc}
{nixpkgs ? import <nixpkgs> { }, ghc ? nixpkgs.ghc}:
with nixpkgs;
@ -633,7 +633,7 @@ Now the builds succeeds.
Of course, in the concrete example of `ghc-events` this whole exercise is not
an ideal solution, because `ghc-events` can analyze the output emitted by any
version of GHC later than 6.12 regardless of the compiler version that was used
to build the `ghc-events' executable, so strictly speaking there's no reason to
to build the `ghc-events` executable, so strictly speaking there's no reason to
prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful.

View File

@ -21,6 +21,7 @@ such as Perl or Haskell. These are described in this chapter.</para>
<xi:include href="idris.xml" /> <!-- generated from ../../pkgs/development/idris-modules/README.md -->
<xi:include href="java.xml" />
<xi:include href="lua.xml" />
<xi:include href="node.xml" /> <!-- generated from ../../pkgs/development/node-packages/README.md -->
<xi:include href="perl.xml" />
<xi:include href="python.xml" />
<xi:include href="qt.xml" />

View File

@ -157,16 +157,16 @@ expression on standard output. For example:
<screen>
$ nix-generate-from-cpan XML::Simple
XMLSimple = buildPerlPackage {
name = "XML-Simple-2.20";
XMLSimple = buildPerlPackage rec {
name = "XML-Simple-2.22";
src = fetchurl {
url = mirror://cpan/authors/id/G/GR/GRANTM/XML-Simple-2.20.tar.gz;
sha256 = "5cff13d0802792da1eb45895ce1be461903d98ec97c9c953bc8406af7294434a";
url = "mirror://cpan/authors/id/G/GR/GRANTM/${name}.tar.gz";
sha256 = "b9450ef22ea9644ae5d6ada086dc4300fa105be050a2030ebd4efd28c198eb49";
};
propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ];
meta = {
description = "Easily read/write XML (esp config files)";
license = "perl";
description = "An API for simple XML files";
license = with stdenv.lib.licenses; [ artistic1 gpl1Plus ];
};
};
</screen>

View File

@ -3,7 +3,7 @@
## User Guide
Several versions of Python are available on Nix as well as a high amount of
packages. The default interpreter is CPython 2.7.
packages. The default interpreter is CPython 3.5.
### Using Python
@ -97,7 +97,7 @@ We will first have a look at how Python packages are packaged on Nix. Then, we w
#### Python packaging on Nix
On Nix all packages are built by functions. The main function in Nix for building Python packages is [`buildPythonPackage`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix).
On Nix all packages are built by functions. The main function in Nix for building Python packages is [`buildPythonPackage`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/interpreters/python/build-python-package.nix).
Let's see how we would build the `toolz` package. According to [`python-packages.nix`](https://raw.githubusercontent.com/NixOS/nixpkgs/master/pkgs/top-level/python-packages.nix) `toolz` is build using
```nix
@ -141,13 +141,15 @@ with import <nixpkgs> {};
pkgs.python35Packages.buildPythonPackage rec {
name = "toolz-${version}";
version = "0.7.4";
version = "0.8.0";
src = pkgs.fetchurl{
url = "mirror://pypi/t/toolz/toolz-${version}.tar.gz";
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
sha256 = "e8451af61face57b7c5d09e71c0d27b8005f001ead56e9fdf470417e5cc6d479";
};
doCheck = false;
meta = {
homepage = "http://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
@ -170,18 +172,18 @@ with import <nixpkgs> {};
( let
toolz = pkgs.python35Packages.buildPythonPackage rec {
name = "toolz-${version}";
version = "0.7.4";
version = "0.8.0";
src = pkgs.fetchurl{
url = "mirror://pypi/t/toolz/toolz-${version}.tar.gz";
sha256 = "43c2c9e5e7a16b6c88ba3088a9bfc82f7db8e13378be7c78d6c14a5f8ed05afd";
sha256 = "e8451af61face57b7c5d09e71c0d27b8005f001ead56e9fdf470417e5cc6d479";
};
doCheck = false;
meta = {
homepage = "http://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
};
};
@ -308,11 +310,10 @@ Note also the line `doCheck = false;`, we explicitly disabled running the test-s
#### Develop local package
As a Python developer you're likely aware of [development mode](http://pythonhosted.org/setuptools/setuptools.html#development-mode) (`python setup.py develop`);
As a Python developer you're likely aware of [development mode](http://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode) (`python setup.py develop`);
instead of installing the package this command creates a special link to the project code.
That way, you can run updated code without having to reinstall after each and every change you make.
Development mode is also available on Nix as [explained](http://nixos.org/nixpkgs/manual/#ssec-python-development) in the Nixpkgs manual.
Let's see how you can use it.
Development mode is also available. Let's see how you can use it.
In the previous Nix expression the source was fetched from an url. We can also refer to a local source instead using
@ -409,36 +410,21 @@ and in this case the `python35` interpreter is automatically used.
### Interpreters
Versions 2.6, 2.7, 3.3, 3.4 and 3.5 of the CPython interpreter are available on
Nix and are available as `python26`, `python27`, `python33`, `python34` and
`python35`. The PyPy interpreter is also available as `pypy`. Currently, the
aliases `python` and `python3` correspond to respectively `python27` and
`python35`. The Nix expressions for the interpreters can be found in
Versions 2.6, 2.7, 3.3, 3.4 and 3.5 of the CPython interpreter are available as respectively
`python26`, `python27`, `python33`, `python34` and `python35`. The PyPy interpreter
is available as `pypy`. The aliases `python2` and `python3` correspond to respectively `python27` and
`python35`. The default interpreter, `python`, maps to `python2`.
The Nix expressions for the interpreters can be found in
`pkgs/development/interpreters/python`.
#### Missing modules standard library
The interpreters `python26` and `python27` do not include modules that
require external dependencies. This is done in order to reduce the closure size.
The following modules need to be added as `buildInput` explicitly:
* `python.modules.bsddb`
* `python.modules.curses`
* `python.modules.curses_panel`
* `python.modules.crypt`
* `python.modules.gdbm`
* `python.modules.sqlite3`
* `python.modules.tkinter`
* `python.modules.readline`
For convenience `python27Full` and `python26Full` are provided with all
modules included.
All packages depending on any Python interpreter get appended
`out/{python.sitePackages}` to `$PYTHONPATH` if such directory
exists.
#### Missing `tkinter` module standard library
To reduce closure size the `Tkinter`/`tkinter` is available as a separate package, `pythonPackages.tkinter`.
#### Attributes on interpreters packages
Each interpreter has the following attributes:
@ -448,18 +434,21 @@ Each interpreter has the following attributes:
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See section *python.buildEnv function* for usage and documentation.
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
- `executable`. Name of the interpreter executable, ie `python3.4`.
- `executable`. Name of the interpreter executable, e.g. `python3.4`.
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
### Building packages and applications
Python packages (libraries) and applications that use `setuptools` or
`distutils` are typically built with respectively the `buildPythonPackage` and
`buildPythonApplication` functions.
Python libraries and applications that use `setuptools` or
`distutils` are typically build with respectively the `buildPythonPackage` and
`buildPythonApplication` functions. These two functions also support installing a `wheel`.
All Python packages reside in `pkgs/top-level/python-packages.nix` and all
applications elsewhere. Some packages are also defined in
applications elsewhere. In case a package is used as both a library and an application,
then the package should be in `pkgs/top-level/python-packages.nix` since only those packages are made
available for all interpreter versions. The preferred location for library expressions is in
`pkgs/development/python-modules`. It is important that these packages are
called in `pkgs/top-level/python-packages.nix` and not elsewhere, to guarantee
called from `pkgs/top-level/python-packages.nix` and not elsewhere, to guarantee
the right version of the package is built.
Based on the packages defined in `pkgs/top-level/python-packages.nix` an
@ -475,15 +464,16 @@ sets are
and the aliases
* `pkgs.pythonPackages` pointing to `pkgs.python27Packages`
* `pkgs.python2Packages` pointing to `pkgs.python27Packages`
* `pkgs.python3Packages` pointing to `pkgs.python35Packages`
* `pkgs.pythonPackages` pointing to `pkgs.python2Packages`
#### `buildPythonPackage` function
The `buildPythonPackage` function is implemented in
`pkgs/development/interpreters/python/build-python-package.nix`
and can be used as:
The following is an example:
twisted = buildPythonPackage {
name = "twisted-8.1.0";
@ -534,7 +524,7 @@ All parameters from `mkDerivation` function are still supported.
* `postShellHook`: Hook to execute commands after `shellHook`.
* `makeWrapperArgs`: A list of strings. Arguments to be passed to `makeWrapper`, which wraps generated binaries. By default, the arguments to `makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling the binary. Additional arguments here can allow a developer to set environment variables which will be available when the binary is run. For example, `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
* `installFlags`: A list of strings. Arguments to be passed to `pip install`. To pass options to `python setup.py install`, use `--install-option`. E.g., `installFlags=["--install-option='--cpp_implementation'"].
* `format`: Format of the source. Options are `setup` for when the source has a `setup.py` and `setuptools` is used to build a wheel, and `wheel` in case the source is already a binary wheel. The default value is `setup`.
* `format`: Format of the source. Valid options are `setuptools` (default), `flit`, `wheel`, and `other`. `setuptools` is for when the source has a `setup.py` and `setuptools` is used to build a wheel, `flit`, in case `flit` should be used to build a wheel, and `wheel` in case a wheel is provided. In case you need to provide your own `buildPhase` and `installPhase` you can use `other`.
* `catchConflicts` If `true`, abort package build if a package name appears more than once in dependency tree. Default is `true`.
* `checkInputs` Dependencies needed for running the `checkPhase`. These are added to `buildInputs` when `doCheck = true`.
@ -669,9 +659,8 @@ when you try to install a second environment.
Create a file, e.g. `build.nix`, with the following expression
```nix
with import <nixpkgs> {};
with python35Packages;
python.withPackages (ps: with ps; [ numpy ipython ])
pkgs.python35.withPackages (ps: with ps; [ numpy ipython ])
```
and install it in your profile with
```
@ -683,14 +672,15 @@ Now you can use the Python interpreter, as well as the extra packages that you a
If you prefer to, you could also add the environment as a package override to the Nixpkgs set.
```
packageOverrides = pkgs: with pkgs; with python35Packages; {
myEnv = python.withPackages (ps: with ps; [ numpy ipython ]);
packageOverrides = pkgs: with pkgs; {
myEnv = python35.withPackages (ps: with ps; [ numpy ipython ]);
};
```
and install it in your profile with
```
nix-env -iA nixos.blogEnv
nix-env -iA nixpkgs.myEnv
```
We're installing using the attribute path and assume the channels is named `nixpkgs`.
Note that I'm using the attribute path here.
#### Environment defined in `/etc/nixos/configuration.nix`
@ -699,7 +689,7 @@ For the sake of completeness, here's another example how to install the environm
```nix
environment.systemPackages = with pkgs; [
(python35Packages.python.withPackages (ps: callPackage ../packages/common-python-packages.nix { pythonPackages = ps; }))
(python35.withPackages(ps: with ps; [ numpy ipython ]))
];
```
@ -711,59 +701,55 @@ should also be done when packaging `A`.
### How to override a Python package?
Recursively updating a package can be done with `pkgs.overridePackages` as explained in the Nixpkgs manual.
Python attribute sets are created for each interpreter version. We will therefore override the attribute set for the interpreter version we're interested.
In the following example we change the name of the package `pandas` to `foo`.
```
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
```
This can be tested with
```
We can override the interpreter and pass `packageOverrides`.
In the following example we rename the `pandas` package and build it.
```nix
with import <nixpkgs> {};
(let
let
python = let
packageOverrides = self: super: {
pandas = super.pandas.override {name="foo";};
};
in pkgs.python35.override {inherit packageOverrides;};
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = (super.python35Packages.override { self = python35Packages;})
// { pandas = super.python35Packages.pandas.override {name = "foo";};
};
});
in newpkgs.python35.withPackages (ps: [ps.blaze])
).env
```
A typical use case is to switch to another version of a certain package. For example, in the Nixpkgs repository we have multiple versions of `django` and `scipy`.
In the following example we use a different version of `scipy`. All packages in `newpkgs` will now use the updated `scipy` version.
in python.pkgs.pandas
```
Using `nix-build` on this expression will build the package `pandas`
but with the new name `foo`.
All packages in the package set will use the renamed package.
A typical use case is to switch to another version of a certain package.
For example, in the Nixpkgs repository we have multiple versions of `django` and `scipy`.
In the following example we use a different version of `scipy` and create an environment that uses it.
All packages in the Python package set will now use the updated `scipy` version.
```nix
with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python35Packages = super.python35Packages.override {
self = python35Packages // { scipy = python35Packages.scipy_0_17;};
(
let
packageOverrides = self: super: {
scipy = super.scipy_0_17;
};
});
in newpkgs.python35.withPackages (ps: [ps.blaze])
in (pkgs.python35.override {inherit packageOverrides;}).withPackages (ps: [ps.blaze])
).env
```
The requested package `blaze` depends upon `pandas` which itself depends on `scipy`.
The requested package `blaze` depends on `pandas` which itself depends on `scipy`.
A similar example but now using `django`
If you want the whole of Nixpkgs to use your modifications, then you can use `pkgs.overridePackages`
as explained in this manual. In the following example we build a `inkscape` using a different version of `numpy`.
```
with import <nixpkgs> {};
(let
newpkgs = pkgs.overridePackages(self: super: rec {
python27Packages = (super.python27Packages.override {self = python27Packages;})
// { django = super.python27Packages.django_1_9; };
});
in newpkgs.python27.withPackages (ps: [ps.django_guardian ])
).env
let
pkgs = import <nixpkgs> {};
newpkgs = pkgs.overridePackages ( pkgsself: pkgssuper: {
python27 = let
packageOverrides = self: super: {
numpy = super.numpy_1_10;
};
in pkgssuper.python27.override {inherit packageOverrides;};
} );
in newpkgs.inkscape
```
### `python setup.py bdist_wheel` cannot create .whl
@ -784,9 +770,9 @@ or the current time:
nix-shell --run "SOURCE_DATE_EPOCH=$(date +%s) python3 setup.py bdist_wheel"
```
or unset:
"""
```
nix-shell --run "unset SOURCE_DATE_EPOCH; python3 setup.py bdist_wheel"
"""
```
### `install_data` / `data_files` problems

View File

@ -35,6 +35,7 @@ texlive.combine {
You can list packages e.g. by <command>nix-repl</command>.
<programlisting>
$ nix-repl
nix-repl> :l &lt;nixpkgs>
nix-repl> texlive.collection-&lt;TAB>
</programlisting>
</para></listitem>

View File

@ -20,6 +20,7 @@
<xi:include href="package-notes.xml" />
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="reviewing-contributions.xml" />
<xi:include href="contributing.xml" />
</book>

View File

@ -45,34 +45,48 @@
<title>File type groups</title>
<para>The support code currently recognizes some particular kinds of outputs and either instructs the build system of the package to put files into their desired outputs or it moves the files during the fixup phase. Each group of file types has an <varname>outputFoo</varname> variable specifying the output name where they should go. If that variable isn't defined by the derivation writer, it is guessed &ndash; a default output name is defined, falling back to other possibilities if the output isn't defined.</para>
<variablelist>
<varlistentry><term><varname>
$outputDev</varname></term><listitem><para>
is for development-only files. These include C(++) headers, pkg-config, cmake and aclocal files. They go to <varname>dev</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
</para></listitem>
</varlistentry>
<varlistentry><term><varname>
$outputBin</varname></term><listitem><para>
is meant for user-facing binaries, typically residing in bin/. They go to <varname>bin</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputLib</varname></term><listitem><para>
is meant for libraries, typically residing in <filename>lib/</filename> and <filename>libexec/</filename>. They go to <varname>lib</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDoc</varname></term><listitem><para>
is for user documentation, typically residing in <filename>share/doc/</filename>. It goes to <varname>doc</varname> or <varname>out</varname> by default.
</para></listitem></varlistentry>
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDocdev</varname></term><listitem><para>
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and man3 pages in there. It goes to <varname>docdev</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
</para></listitem></varlistentry>
$outputDevdoc</varname></term><listitem><para>
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputMan</varname></term><listitem><para>
is for man pages (except for section 3). They go to <varname>man</varname> or <varname>doc</varname> or <varname>$outputBin</varname> by default.
</para></listitem></varlistentry>
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputDevman</varname></term><listitem><para>
is for section 3 man pages. They go to <varname>devman</varname> or <varname>$outputMan</varname> by default.
</para></listitem></varlistentry>
<varlistentry><term><varname>
$outputInfo</varname></term><listitem><para>
is for info pages. They go to <varname>info</varname> or <varname>doc</varname> or <varname>$outputMan</varname> by default.
</para></listitem></varlistentry>
</para></listitem></varlistentry>
</variablelist>
</section>
@ -88,4 +102,3 @@
</section><!--Writing a split derivation-->
</chapter>

View File

@ -382,4 +382,138 @@ it. Place the resulting <filename>package.nix</filename> file into
</section>
<section xml:id="sec-steam">
<title>Steam</title>
<section xml:id="sec-steam-nix">
<title>Steam in Nix</title>
<para>
Steam is distributed as a <filename>.deb</filename> file, for now only
as an i686 package (the amd64 package only has documentation).
When unpacked, it has a script called <filename>steam</filename> that
in ubuntu (their target distro) would go to <filename>/usr/bin
</filename>. When run for the first time, this script copies some
files to the user's home, which include another script that is the
ultimate responsible for launching the steam binary, which is also
in $HOME.
</para>
<para>
Nix problems and constraints:
<itemizedlist>
<listitem><para>We don't have <filename>/bin/bash</filename> and many
scripts point there. Similarly for <filename>/usr/bin/python</filename>
.</para></listitem>
<listitem><para>We don't have the dynamic loader in <filename>/lib
</filename>.</para></listitem>
<listitem><para>The <filename>steam.sh</filename> script in $HOME can
not be patched, as it is checked and rewritten by steam.</para></listitem>
<listitem><para>The steam binary cannot be patched, it's also checked.</para></listitem>
</itemizedlist>
</para>
<para>
The current approach to deploy Steam in NixOS is composing a FHS-compatible
chroot environment, as documented
<link xlink:href="http://sandervanderburg.blogspot.nl/2013/09/composing-fhs-compatible-chroot.html">here</link>.
This allows us to have binaries in the expected paths without disrupting the system,
and to avoid patching them to work in a non FHS environment.
</para>
</section>
<section xml:id="sec-steam-play">
<title>How to play</title>
<para>
For 64-bit systems it's important to have
<programlisting>hardware.opengl.driSupport32Bit = true;</programlisting>
in your <filename>/etc/nixos/configuration.nix</filename>. You'll also need
<programlisting>hardware.pulseaudio.support32Bit = true;</programlisting>
if you are using PulseAudio - this will enable 32bit ALSA apps integration.
To use the Steam controller, you need to add
<programlisting>services.udev.extraRules = ''
SUBSYSTEM=="usb", ATTRS{idVendor}=="28de", MODE="0666"
KERNEL=="uinput", MODE="0660", GROUP="users", OPTIONS+="static_node=uinput"
'';</programlisting>
to your configuration.
</para>
</section>
<section xml:id="sec-steam-troub">
<title>Troubleshooting</title>
<para>
<variablelist>
<varlistentry>
<term>Steam fails to start. What do I do?</term>
<listitem><para>Try to run
<programlisting>strace steam</programlisting>
to see what is causing steam to fail.</para></listitem>
</varlistentry>
<varlistentry>
<term>Using the FOSS Radeon drivers</term>
<listitem><itemizedlist><listitem><para>
The open source radeon drivers need a newer libc++ than is provided
by the default runtime, which leads to a crash on launch. Use
<programlisting>environment.systemPackages = [(pkgs.steam.override { newStdcpp = true; })];</programlisting>
in your config if you get an error like
<programlisting>
libGL error: unable to load driver: radeonsi_dri.so
libGL error: driver pointer missing
libGL error: failed to load driver: radeonsi
libGL error: unable to load driver: swrast_dri.so
libGL error: failed to load driver: swrast</programlisting></para></listitem>
<listitem><para>
Steam ships statically linked with a version of libcrypto that
conflics with the one dynamically loaded by radeonsi_dri.so.
If you get the error
<programlisting>steam.sh: line 713: 7842 Segmentation fault (core dumped)</programlisting>
have a look at <link xlink:href="https://github.com/NixOS/nixpkgs/pull/20269">this pull request</link>.
</para></listitem>
</itemizedlist></listitem></varlistentry>
<varlistentry>
<term>Java</term>
<listitem><orderedlist>
<listitem><para>
There is no java in steam chrootenv by default. If you get a message like
<programlisting>/home/foo/.local/share/Steam/SteamApps/common/towns/towns.sh: line 1: java: command not found</programlisting>
You need to add
<programlisting> steam.override { withJava = true; };</programlisting>
to your configuration.
</para></listitem>
</orderedlist></listitem></varlistentry>
</variablelist>
</para>
</section>
<section xml:id="sec-steam-run">
<title>steam-run</title>
<para>
The FHS-compatible chroot used for steam can also be used to run
other linux games that expect a FHS environment.
To do it, add
<programlisting>pkgs.(steam.override {
nativeOnly = true;
newStdcpp = true;
}).run</programlisting>
to your configuration, rebuild, and run the game with
<programlisting>steam-run ./foo</programlisting>
</para>
</section>
</section>
</chapter>

View File

@ -0,0 +1,393 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-reviewing-contributions">
<title>Reviewing contributions</title>
<warning>
<para>The following section is a draft and reviewing policy is still being
discussed.</para>
</warning>
<para>The nixpkgs projects receives a fairly high number of contributions via
GitHub pull-requests. Reviewing and approving these is an important task and a
way to contribute to the project.</para>
<para>The high change rate of nixpkgs make any pull request that is open for
long enough subject to conflicts that will require extra work from the
submitter or the merger. Reviewing pull requests in a timely manner and being
responsive to the comments is the key to avoid these. Github provides sort
filters that can be used to see the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the <link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull-requests.</para>
<para>When reviewing a pull request, please always be nice and polite.
Controversial changes can lead to controversial opinions, but it is important
to respect every community members and their work.</para>
<para>GitHub provides reactions, they are a simple and quick way to provide
feedback to pull-requests or any comments. The thumb-down reaction should be
used with care and if possible accompanied with some explanations so the
submitter has directions to improve his contribution.</para>
<para>Pull-requests reviews should include a list of what has been reviewed in a
comment, so other reviewers and mergers can know the state of the
review.</para>
<para>All the review template samples provided in this section are generic and
meant as examples. Their usage is optional and the reviewer is free to adapt
them to his liking.</para>
<section><title>Package updates</title>
<para>A package update is the most trivial and common type of pull-request.
These pull-requests mainly consist in updating the version part of the package
name and the source hash.</para>
<para>It can happen that non trivial updates include patches or more complex
changes.</para>
<para>Reviewing process:</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: package (update)</literal> and any topic
label that fit the updated package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit text is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the package maintainers are notified.</para>
<itemizedlist>
<listitem><para>mention-bot usually notify GitHub users based on the
submitted changes, but it can happen that it misses some of the
package maintainers.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
<itemizedlist>
<listitem><para>License can change with version updates, so it should be
checked to be fitting upstream license.</para></listitem>
<listitem><para>If the package has no maintainer, a maintainer must be
set. This can be the update submitter or a community member that
accepts to take maintainership of the package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Building the package locally.</para>
<itemizedlist>
<listitem><para>Pull-requests are often targeted to the master or staging
branch so building the pull-request locally as it is submitted can
trigger a large amount of source builds.</para>
<para>It is possible to rebase the changes on nixos-unstable or
nixpkgs-unstable for easier review by running the following commands
from a nixpkgs clone.
<screen>
$ git remote add channels https://github.com/NixOS/nixpkgs-channels.git <co
xml:id='reviewing-rebase-1' />
$ git fetch channels nixos-unstable <co xml:id='reviewing-rebase-2' />
$ git fetch origin pull/PRNUMBER/head <co xml:id='reviewing-rebase-3' />
$ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
xml:id='reviewing-rebase-4' />
</screen>
<calloutlist>
<callout arearefs='reviewing-rebase-1'>
<para>This should be done only once to be able to fetch channel
branches from the nixpkgs-channels repository.</para>
</callout>
<callout arearefs='reviewing-rebase-2'>
<para>Fetching the nixos-unstable branch.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Fetching the pull-request changes, <varname>PRNUMBER</varname>
is the number at the end of the pull-request title and
<varname>BASEBRANCH</varname> the base branch of the
pull-request.</para>
</callout>
<callout arearefs='reviewing-rebase-3'>
<para>Rebasing the pull-request changes to the nixos-unstable
branch.</para>
</callout>
</calloutlist>
</para>
</listitem>
<listitem>
<para>The <link xlink:href="https://github.com/madjar/nox">nox</link>
tool can be used to review a pull-request content in a single command.
It doesn't rebase on a channel branch so it might trigger multiple
source builds. <varname>PRNUMBER</varname> should be replaced by the
number at the end of the pull-request title.</para>
<screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
</screen>
</listitem>
</itemizedlist>
</listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
<example><title>Sample template for a package update review</title>
<screen>
##### Reviewed points
- [ ] package name fits guidelines
- [ ] package version fits guidelines
- [ ] package build on ARCHITECTURE
- [ ] executables tested on ARCHITECTURE
- [ ] all depending packages build
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>New packages</title>
<para>New packages are a common type of pull-requests. These pull requests
consists in adding a new nix-expression for a package.</para>
<para>Reviewing process:</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: package (new)</literal> and any topic
label that fit the new package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the package versioning is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the commit name is fitting the
guidelines.</para></listitem>
<listitem><para>Ensure that the meta field contains correct
information.</para>
<itemizedlist>
<listitem><para>License must be checked to be fitting upstream
license.</para></listitem>
<listitem><para>Platforms should be set or the package will not get binary
substitutes.</para></listitem>
<listitem><para>A maintainer must be set, this can be the package
submitter or a community member that accepts to take maintainership of
the package.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the code contains no typos.</para></listitem>
<listitem><para>Ensure the package source.</para>
<itemizedlist>
<listitem><para>Mirrors urls should be used when
available.</para></listitem>
<listitem><para>The most appropriate function should be used (e.g.
packages from GitHub should use
<literal>fetchFromGitHub</literal>).</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Building the package locally.</para></listitem>
<listitem><para>Running every binary.</para></listitem>
</itemizedlist>
<example><title>Sample template for a new package review</title>
<screen>
##### Reviewed points
- [ ] package path fits guidelines
- [ ] package name fits guidelines
- [ ] package version fits guidelines
- [ ] package build on ARCHITECTURE
- [ ] executables tested on ARCHITECTURE
- [ ] `meta.description` is set and fits guidelines
- [ ] `meta.license` fits upstream license
- [ ] `meta.platforms` is set
- [ ] `meta.maintainers` is set
- [ ] build time only dependencies are declared in `nativeBuildInputs`
- [ ] source is fetched using the appropriate function
- [ ] phases are respected
- [ ] patches that are remotely available are fetched with `fetchpatch`
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>Module updates</title>
<para>Module updates are submissions changing modules in some ways. These often
contains changes to the options or introduce new options.</para>
<para>Reviewing process</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: module (update)</literal> and any topic
label that fit the module.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module maintainers are notified.</para>
<itemizedlist>
<listitem><para>Mention-bot notify GitHub users based on the submitted
changes, but it can happen that it miss some of the package
maintainers.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that option changes are backward compatible.</para>
<itemizedlist>
<listitem><para><literal>mkRenamedOptionModule</literal> and
<literal>mkAliasOptionModule</literal> functions provide way to make
option changes backward compatible.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that removed options are declared with
<literal>mkRemovedOptionModule</literal></para></listitem>
<listitem><para>Ensure that changes that are not backward compatible are
mentioned in release notes.</para></listitem>
<listitem><para>Ensure that documentations affected by the change is
updated.</para></listitem>
</itemizedlist>
<example><title>Sample template for a module update review</title>
<screen>
##### Reviewed points
- [ ] changes are backward compatible
- [ ] removed options are declared with `mkRemovedOptionModule`
- [ ] changes that are not backward compatible are documented in release notes
- [ ] module tests succeed on ARCHITECTURE
- [ ] options types are appropriate
- [ ] options description is set
- [ ] options example is provided
- [ ] documentation affected by the changes is updated
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>New modules</title>
<para>New modules submissions introduce a new module to NixOS.</para>
<itemizedlist>
<listitem><para>Add labels to the pull-request. (Requires commit
rights)</para>
<itemizedlist>
<listitem><para><literal>8.has: module (new)</literal> and any topic label
that fit the module.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module tests, if any, are
succeeding.</para></listitem>
<listitem><para>Ensure that the introduced options are correct.</para>
<itemizedlist>
<listitem><para>Type should be appropriate (string related types differs
in their merging capabilities, <literal>optionSet</literal> and
<literal>string</literal> types are deprecated).</para></listitem>
<listitem><para>Description, default and example should be
provided.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that module <literal>meta</literal> field is
present</para>
<itemizedlist>
<listitem><para>Maintainers should be declared in
<literal>meta.maintainers</literal>.</para></listitem>
<listitem><para>Module documentation should be declared with
<literal>meta.doc</literal>.</para></listitem>
</itemizedlist>
</listitem>
<listitem><para>Ensure that the module respect other modules
functionality.</para>
<itemizedlist>
<listitem><para>For example, enabling a module should not open firewall
ports by default.</para></listitem>
</itemizedlist>
</listitem>
</itemizedlist>
<example><title>Sample template for a new module review</title>
<screen>
##### Reviewed points
- [ ] module path fits the guidelines
- [ ] module tests succeed on ARCHITECTURE
- [ ] options have appropriate types
- [ ] options have default
- [ ] options have example
- [ ] options have descriptions
- [ ] No unneeded package is added to system.environmentPackages
- [ ] meta.maintainers is set
- [ ] module documentation is declared in meta.doc
##### Possible improvements
##### Comments
</screen></example>
</section>
<section><title>Other submissions</title>
<para>Other type of submissions requires different reviewing steps.</para>
<para>If you consider having enough knowledge and experience in a topic and
would like to be a long-term reviewer for related submissions, please contact
the current reviewers for that topic. They will give you information about the
reviewing process.
The main reviewers for a topic can be hard to find as there is no list, but
checking past pull-requests to see who reviewed or git-blaming the code to see
who committed to that topic can give some hints.</para>
<para>Container system, boot system and library changes are some examples of the
pull requests fitting this category.</para>
</section>
<section><title>Merging pull-requests</title>
<para>It is possible for community members that have enough knowledge and
experience on a special topic to contribute by merging pull requests.</para>
<para>TODO: add the procedure to request merging rights.</para>
<!--
The following paragraph about how to deal with unactive contributors is just a
proposition and should be modified to what the community agrees to be the right
policy.
<para>Please note that contributors with commit rights unactive for more than
three months will have their commit rights revoked.</para>
-->
<para>In a case a contributor leaves definitively the Nix community, he should
create an issue or notify the mailing list with references of packages and
modules he maintains so the maintainership can be taken over by other
contributors.</para>
</section>
</chapter>

View File

@ -27,7 +27,7 @@ stdenv.mkDerivation {
name = "libfoo-1.2.3";
src = fetchurl {
url = http://example.org/libfoo-1.2.3.tar.bz2;
md5 = "e1ec107956b6ddcb0b8b0679367e9ac9";
sha256 = "0x2g1jqygyr5wiwg4ma1nd7w4ydpy82z9gkcv8vh2v8dn3y58v5m";
};
}</programlisting>
@ -988,6 +988,41 @@ set debug-file-directory ~/.nix-profile/lib/debug
</section>
<section xml:id="ssec-installCheck-phase"><title>The installCheck phase</title>
<para>The installCheck phase checks whether the package was installed
correctly by running its test suite against the installed directories.
The default <function>installCheck</function> calls <command>make
installcheck</command>.</para>
<variablelist>
<title>Variables controlling the installCheck phase</title>
<varlistentry>
<term><varname>doInstallCheck</varname></term>
<listitem><para>If set to a non-empty string, the installCheck phase is
executed, otherwise it is skipped (default). Thus you should set
<programlisting>doInstallCheck = true;</programlisting>
in the derivation to enable install checks.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>preInstallCheck</varname></term>
<listitem><para>Hook executed at the start of the installCheck
phase.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postInstallCheck</varname></term>
<listitem><para>Hook executed at the end of the installCheck
phase.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="ssec-distribution-phase"><title>The distribution
phase</title>
@ -1196,13 +1231,12 @@ echo @foo@
<term><function>stripHash</function>
<replaceable>path</replaceable></term>
<listitem><para>Strips the directory and hash part of a store
path, storing the name part in the environment variable
<literal>strippedName</literal>. For example:
path, outputting the name part to <literal>stdout</literal>.
For example:
<programlisting>
stripHash "/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
# prints coreutils-8.24
echo $strippedName
stripHash "/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
</programlisting>
If you wish to store the result in another variable, then the
@ -1210,7 +1244,7 @@ echo $strippedName
<programlisting>
name="/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
someVar=$(stripHash $name; echo $strippedName)
someVar=$(stripHash $name)
</programlisting>
</para></listitem>

View File

@ -296,12 +296,17 @@ rec {
/* Converts a store path to a fake derivation. */
toDerivation = path:
let path' = builtins.storePath path; in
{ type = "derivation";
name = builtins.unsafeDiscardStringContext (builtins.substring 33 (-1) (baseNameOf path'));
outPath = path';
outputs = [ "out" ];
};
let
path' = builtins.storePath path;
res =
{ type = "derivation";
name = builtins.unsafeDiscardStringContext (builtins.substring 33 (-1) (baseNameOf path'));
outPath = path';
outputs = [ "out" ];
out = res;
outputName = "out";
};
in res;
/* If `cond' is true, return the attribute set `as',
@ -386,7 +391,7 @@ rec {
);
in f [] [rhs lhs];
/* A recursive variant of the update operator //. The recusion
/* A recursive variant of the update operator //. The recursion
stops when one of the attribute values is not an attribute set,
in which case the right hand side value takes precedence over the
left hand side value.
@ -455,7 +460,7 @@ rec {
getDev = getOutput "dev";
/* Pick the outputs of packages to place in buildInputs */
chooseDevOutputs = drvs: builtins.map (drv: if drv.outputUnspecified or false then drv.dev or drv else drv) drvs;
chooseDevOutputs = drvs: builtins.map getDev drvs;
/*** deprecated stuff ***/

View File

@ -56,16 +56,18 @@ rec {
ff = f origArgs;
overrideWith = newArgs: origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs);
in
if builtins.isAttrs ff then (ff //
{ override = newArgs: makeOverridable f (overrideWith newArgs);
overrideDerivation = fdrv:
makeOverridable (args: overrideDerivation (f args) fdrv) origArgs;
})
else if builtins.isFunction ff then
{ override = newArgs: makeOverridable f (overrideWith newArgs);
__functor = self: ff;
overrideDerivation = throw "overrideDerivation not yet supported for functors";
}
if builtins.isAttrs ff then (ff // {
override = newArgs: makeOverridable f (overrideWith newArgs);
overrideDerivation = fdrv:
makeOverridable (args: overrideDerivation (f args) fdrv) origArgs;
${if ff ? overrideAttrs then "overrideAttrs" else null} = fdrv:
makeOverridable (args: (f args).overrideAttrs fdrv) origArgs;
})
else if builtins.isFunction ff then {
override = newArgs: makeOverridable f (overrideWith newArgs);
__functor = self: ff;
overrideDerivation = throw "overrideDerivation not yet supported for functors";
}
else ff;

View File

@ -1,27 +1,47 @@
let
let
# trivial, often used functions
trivial = import ./trivial.nix;
# datatypes
attrsets = import ./attrsets.nix;
lists = import ./lists.nix;
strings = import ./strings.nix;
stringsWithDeps = import ./strings-with-deps.nix;
attrsets = import ./attrsets.nix;
# packaging
customisation = import ./customisation.nix;
maintainers = import ./maintainers.nix;
meta = import ./meta.nix;
sources = import ./sources.nix;
# module system
modules = import ./modules.nix;
options = import ./options.nix;
types = import ./types.nix;
meta = import ./meta.nix;
debug = import ./debug.nix;
misc = import ./deprecated.nix;
maintainers = import ./maintainers.nix;
# constants
licenses = import ./licenses.nix;
platforms = import ./platforms.nix;
systems = import ./systems.nix;
customisation = import ./customisation.nix;
licenses = import ./licenses.nix;
# misc
debug = import ./debug.nix;
generators = import ./generators.nix;
misc = import ./deprecated.nix;
# domain-specific
sandbox = import ./sandbox.nix;
fetchers = import ./fetchers.nix;
in
{ inherit trivial lists strings stringsWithDeps attrsets sources options
modules types meta debug maintainers licenses platforms systems sandbox;
{ inherit trivial
attrsets lists strings stringsWithDeps
customisation maintainers meta sources
modules options types
licenses platforms systems
debug generators misc
sandbox fetchers;
}
# !!! don't include everything at top-level; perhaps only the most
# commonly used functions.

12
lib/fetchers.nix Normal file
View File

@ -0,0 +1,12 @@
# snippets that can be shared by mutliple fetchers (pkgs/build-support)
{
proxyImpureEnvVars = [
# We borrow these environment variables from the caller to allow
# easy proxy configuration. This is impure, but a fixed-output
# derivation like fetchurl is allowed to do so since its result is
# by definition pure.
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
];
}

93
lib/generators.nix Normal file
View File

@ -0,0 +1,93 @@
/* Functions that generate widespread file
* formats from nix data structures.
*
* They all follow a similar interface:
* generator { config-attrs } data
*
* Tests can be found in ./tests.nix
* Documentation in the manual, #sec-generators
*/
with import ./trivial.nix;
let
libStr = import ./strings.nix;
libAttr = import ./attrsets.nix;
flipMapAttrs = flip libAttr.mapAttrs;
in
rec {
/* Generate a line of key k and value v, separated by
* character sep. If sep appears in k, it is escaped.
* Helper for synaxes with different separators.
*
* mkKeyValueDefault ":" "f:oo" "bar"
* > "f\:oo:bar"
*/
mkKeyValueDefault = sep: k: v:
"${libStr.escape [sep] k}${sep}${toString v}";
/* Generate a key-value-style config file from an attrset.
*
* mkKeyValue is the same as in toINI.
*/
toKeyValue = {
mkKeyValue ? mkKeyValueDefault "="
}: attrs:
let mkLine = k: v: mkKeyValue k v + "\n";
in libStr.concatStrings (libAttr.mapAttrsToList mkLine attrs);
/* Generate an INI-style config file from an
* attrset of sections to an attrset of key-value pairs.
*
* generators.toINI {} {
* foo = { hi = "${pkgs.hello}"; ciao = "bar"; };
* baz = { "also, integers" = 42; };
* }
*
*> [baz]
*> also, integers=42
*>
*> [foo]
*> ciao=bar
*> hi=/nix/store/y93qql1p5ggfnaqjjqhxcw0vqw95rlz0-hello-2.10
*
* The mk* configuration attributes can generically change
* the way sections and key-value strings are generated.
*
* For more examples see the test cases in ./tests.nix.
*/
toINI = {
# apply transformations (e.g. escapes) to section names
mkSectionName ? (name: libStr.escape [ "[" "]" ] name),
# format a setting line from key and value
mkKeyValue ? mkKeyValueDefault "="
}: attrsOfAttrs:
let
# map function to string for each key val
mapAttrsToStringsSep = sep: mapFn: attrs:
libStr.concatStringsSep sep
(libAttr.mapAttrsToList mapFn attrs);
mkSection = sectName: sectValues: ''
[${mkSectionName sectName}]
'' + toKeyValue { inherit mkKeyValue; } sectValues;
in
# map input to ini sections
mapAttrsToStringsSep "\n" mkSection attrsOfAttrs;
/* Generates JSON from an arbitrary (non-function) value.
* For more information see the documentation of the builtin.
*/
toJSON = {}: builtins.toJSON;
/* YAML has been a strict superset of JSON since 1.2, so we
* use toJSON. Before it only had a few differences referring
* to implicit typing rules, so it should work with older
* parsers as well.
*/
toYAML = {}@args: toJSON args;
}

View File

@ -65,6 +65,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Boost Software License 1.0";
};
beerware = spdx {
spdxId = "Beerware";
fullName = ''Beerware License'';
};
bsd2 = spdx {
spdxId = "BSD-2-Clause";
fullName = ''BSD 2-clause "Simplified" License'';
@ -105,6 +110,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Creative Commons Attribution Non Commercial Share Alike 4.0";
};
cc-by-nd-30 = spdx {
spdxId = "CC-BY-ND-3.0";
fullName = "Creative Commons Attribution-No Derivative Works v3.00";
};
cc-by-sa-25 = spdx {
spdxId = "CC-BY-SA-2.5";
fullName = "Creative Commons Attribution Share Alike 2.5";
@ -439,6 +449,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Sleepycat License";
};
smail = {
shortName = "smail";
fullName = "SMAIL General Public License";
url = http://metadata.ftp-master.debian.org/changelogs/main/d/debianutils/debianutils_4.8.1_copyright;
};
tcltk = spdx {
spdxId = "TCL";
fullName = "TCL/TK License";
@ -470,6 +486,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "The Unlicense";
};
upl = {
fullName = "Universal Permissive License";
url = "https://oss.oracle.com/licenses/upl/";
};
vim = spdx {
spdxId = "Vim";
fullName = "Vim License";

View File

@ -10,8 +10,10 @@
aaronschif = "Aaron Schif <aaronschif@gmail.com>";
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
abigailbuccaneer = "Abigail Bunyan <abigailbuccaneer@gmail.com>";
aboseley = "Adam Boseley <adam.boseley@gmail.com>";
abuibrahim = "Ruslan Babayev <ruslan@babayev.com>";
acowley = "Anthony Cowley <acowley@gmail.com>";
adev = "Adrien Devresse <adev@adev.name>";
Adjective-Object = "Maxwell Huang-Hobbs <mhuan13@gmail.com>";
adnelson = "Allen Nelson <ithinkican@gmail.com>";
@ -28,24 +30,30 @@
all = "Nix Committers <nix-commits@lists.science.uu.nl>";
ambrop72 = "Ambroz Bizjak <ambrop7@gmail.com>";
amiddelk = "Arie Middelkoop <amiddelk@gmail.com>";
amiloradovsky = "Andrew Miloradovsky <miloradovsky@gmail.com>";
amorsillo = "Andrew Morsillo <andrew.morsillo@gmail.com>";
AndersonTorres = "Anderson Torres <torres.anderson.85@gmail.com>";
anderspapitto = "Anders Papitto <anderspapitto@gmail.com>";
andres = "Andres Loeh <ksnixos@andres-loeh.de>";
andrewrk = "Andrew Kelley <superjoe30@gmail.com>";
andsild = "Anders Sildnes <andsild@gmail.com>";
aneeshusa = "Aneesh Agrawal <aneeshusa@gmail.com>";
antono = "Antono Vasiljev <self@antono.info>";
apeyroux = "Alexandre Peyroux <alex@px.io>";
ardumont = "Antoine R. Dumont <eniotna.t@gmail.com>";
aristid = "Aristid Breitkreuz <aristidb@gmail.com>";
arobyn = "Alexei Robyn <shados@shados.net>";
artuuge = "Artur E. Ruuge <artuuge@gmail.com>";
ashalkhakov = "Artyom Shalkhakov <artyom.shalkhakov@gmail.com>";
aske = "Kirill Boltaev <aske@fmap.me>";
asppsa = "Alastair Pharo <asppsa@gmail.com>";
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
asymmetric = "Lorenzo Manacorda <lorenzo@mailbox.org>";
aszlig = "aszlig <aszlig@redmoonstudios.org>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
avnik = "Alexander V. Nikolaev <avn@avnik.info>";
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
bachp = "Pascal Bach <pascal.bach@nextrem.ch>";
badi = "Badi' Abdul-Wahid <abdulwahidc@gmail.com>";
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
Baughn = "Svein Ove Aas <sveina@gmail.com>";
@ -81,39 +89,52 @@
chris-martin = "Chris Martin <ch.martin@gmail.com>";
chrisjefferson = "Christopher Jefferson <chris@bubblescope.net>";
christopherpoole = "Christopher Mark Poole <mail@christopherpoole.net>";
ckampka = "Christian Kampka <christian@kampka.net>";
cko = "Christine Koppelt <christine.koppelt@gmail.com>";
cleverca22 = "Michael Bishop <cleverca22@gmail.com>";
cmcdragonkai = "Roger Qiu <roger.qiu@matrix.ai>";
cmfwyp = "cmfwyp <cmfwyp@riseup.net>";
coconnor = "Corey O'Connor <coreyoconnor@gmail.com>";
codsl = "codsl <codsl@riseup.net>";
codyopel = "Cody Opel <codyopel@gmail.com>";
colemickens = "Cole Mickens <cole.mickens@gmail.com>";
copumpkin = "Dan Peebles <pumpkingod@gmail.com>";
corngood = "David McFarland <corngood@gmail.com>";
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
couchemar = "Andrey Pavlov <couchemar@yandex.ru>";
cransom = "Casey Ransom <cransom@hubns.net>";
cryptix = "Henry Bubert <cryptix@riseup.net>";
CrystalGamma = "Jona Stubbe <nixos@crystalgamma.de>";
cstrahan = "Charles Strahan <charles@cstrahan.com>";
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
DamienCassou = "Damien Cassou <damien@cassou.me>";
danbst = "Danylo Hlynskyi <abcz2.uprola@gmail.com>";
danielfullmer = "Daniel Fullmer <danielrf12@gmail.com>";
dasuxullebt = "Christoph-Simon Senjak <christoph.senjak@googlemail.com>";
davidak = "David Kleuker <post@davidak.de>";
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
davorb = "Davor Babic <davor@davor.se>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
dbrock = "Daniel Brockman <daniel@brockman.se>";
deepfire = "Kosyrev Serge <_deepfire@feelingofgreen.ru>";
demin-dmitriy = "Dmitriy Demin <demindf@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
DerTim1 = "Tim Digel <tim.digel@active-group.de>";
desiderius = "Didier J. Devroye <didier@devroye.name>";
devhell = "devhell <\"^\"@regexmail.net>";
dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>";
dfoxfranke = "Daniel Fox Franke <dfoxfranke@gmail.com>";
dgonyeo = "Derek Gonyeo <derek@gonyeo.com>";
dipinhora = "Dipin Hora <dipinhora+github@gmail.com>";
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
dochang = "Desmond O. Chang <dochang@gmail.com>";
domenkozar = "Domen Kozar <domen@dev.si>";
doublec = "Chris Double <chris.double@double.co.nz>";
dpaetzel = "David Pätzel <david.a.paetzel@gmail.com>";
drets = "Dmytro Rets <dmitryrets@gmail.com>";
drewkett = "Andrew Burkett <burkett.andrew@gmail.com>";
dtzWill = "Will Dietz <nix@wdtz.org>";
e-user = "Alexander Kahl <nixos@sodosopa.io>";
ebzzry = "Rommel Martinez <ebzzry@gmail.com>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
eduarrrd = "Eduard Bachmakov <e.bachmakov@gmail.com>";
@ -123,6 +144,7 @@
ehmry = "Emery Hemingway <emery@vfemail.net>";
eikek = "Eike Kettner <eike.kettner@posteo.de>";
elasticdog = "Aaron Bull Schaefer <aaron@elasticdog.com>";
eleanor = "Dejan Lukan <dejan@proteansec.com>";
elitak = "Eric Litak <elitak@gmail.com>";
ellis = "Ellis Whitehead <nixos@ellisw.net>";
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
@ -130,6 +152,7 @@
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <esz@posteo.de>";
ethercrow = "Dmitry Ivanov <ethercrow@gmail.com>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
expipiplus1 = "Joe Hermaszewski <nix@monoid.al>";
@ -158,32 +181,40 @@
giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
gleber = "Gleb Peregud <gleber.p@gmail.com>";
globin = "Robin Gloster <mail@glob.in>";
gnidorah = "Alex Ivanov <yourbestfriend@opmbx.org>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
Gonzih = "Max Gonzih <gonzih@gmail.com>";
goodrone = "Andrew Trachenko <goodrone@gmail.com>";
gpyh = "Yacine Hmito <yacine.hmito@gmail.com>";
grahamc = "Graham Christensen <graham@grahamc.com>";
gridaphobe = "Eric Seidel <eric@seidel.io>";
guibert = "David Guibert <david.guibert@gmail.com>";
guillaumekoenig = "Guillaume Koenig <guillaume.edward.koenig@gmail.com>";
guyonvarch = "Joris Guyonvarch <joris@guyonvarch.me>";
hakuch = "Jesse Haber-Kucharsky <hakuch@gmail.com>";
havvy = "Ryan Scheel <ryan.havvy@gmail.com>";
hbunke = "Hendrik Bunke <bunke.hendrik@gmail.com>";
hce = "Hans-Christian Esperer <hc@hcesperer.org>";
henrytill = "Henry Till <henrytill@gmail.com>";
hiberno = "Christian Lask <hiberno@hiberno.net>";
hinton = "Tom Hinton <t@larkery.com>";
hrdinka = "Christoph Hrdinka <c.nix@hrdinka.at>";
iand675 = "Ian Duncan <ian@iankduncan.com>";
ianwookim = "Ian-Woo Kim <ianwookim@gmail.com>";
domenkozar = "Domen Kozar <domen@dev.si>";
igsha = "Igor Sharonov <igor.sharonov@gmail.com>";
ikervagyok = "Balázs Lengyel <ikervagyok@gmail.com>";
ivan-tkatchev = "Ivan Tkatchev <tkatchev@gmail.com>";
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
javaguirre = "Javier Aguirre <contacto@javaguirre.net>";
jb55 = "William Casarin <bill@casarin.me>";
jbedo = "Justin Bedő <cu@cua0.org>";
jcumming = "Jack Cummings <jack@mudshark.org>";
jdagilliland = "Jason Gilliland <jdagilliland@gmail.com>";
jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>";
jerith666 = "Matt McHenry <github@matt.mchenryfamily.org>";
jfb = "James Felix Black <james@yamtime.com>";
jgeerds = "Jascha Geerds <jascha@jgeerds.name>";
jgertm = "Tim Jaeger <jger.tm@gmail.com>";
jgillich = "Jakob Gillich <jakob@gillich.me>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
@ -191,17 +222,22 @@
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>";
joko = "Ioannis Koutras <ioannis.koutras@gmail.com>";
jonafato = "Jon Banafato <jon@jonafato.com>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jraygauthier = "Raymond Gauthier <jraygauthier@gmail.com>";
juliendehos = "Julien Dehos <dehos@lisic.univ-littoral.fr>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jwilberding = "Jordan Wilberding <jwilberding@afiniate.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kaiha = "Kai Harries <kai.harries@gmail.com>";
kamilchm = "Kamil Chmielewski <kamil.chm@gmail.com>";
kampfschlaefer = "Arnold Krille <arnold@arnoldarts.de>";
kevincox = "Kevin Cox <kevincox@kevincox.ca>";
khumba = "Bryan Gardiner <bog@khumba.net>";
KibaFox = "Kiba Fox <kiba.fox@foxypossibilities.com>";
kierdavis = "Kier Davis <kierdavis@gmail.com>";
kkallio = "Karn Kallio <tierpluspluslists@gmail.com>";
knedlsepp = "Josef Kemetmüller <josef.kemetmueller@gmail.com>";
koral = "Koral <koral@mailoo.org>";
kovirobi = "Kovacsics Robert <kovirobi@gmail.com>";
kragniz = "Louis Taylor <louis@kragniz.eu>";
@ -209,22 +245,25 @@
lassulus = "Lassulus <lassulus@gmail.com>";
layus = "Guillaume Maudoux <layus.on@gmail.com>";
ldesgoui = "Lucas Desgouilles <ldesgoui@gmail.com>";
league = "Christopher League <league@contrapunctus.net>";
lebastr = "Alexander Lebedev <lebastr@gmail.com>";
leenaars = "Michiel Leenaars <ml.software@leenaa.rs>";
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
lewo = "Antoine Eiche <lewo@abesis.fr>";
lheckemann = "Linus Heckemann <git@sphalerite.org>";
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
lihop = "Leroy Hopson <nixos@leroy.geek.nz>";
linquize = "Linquize <linquize@yahoo.com.hk>";
linus = "Linus Arver <linusarver@gmail.com>";
lnl7 = "Daiderd Jordan <daiderd@gmail.com>";
loskutov = "Ignat Loskutov <ignat.loskutov@gmail.com>";
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
lowfatcomputing = "Andreas Wagner <andreas.wagner@lowfatcomputing.org>";
lsix = "Lancelot SIX <lsix@lancelotsix.com>";
lucas8 = "Luc Chabassier <luc.linux@mailoo.org>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
luispedro = "Luis Pedro Coelho <luis@luispedro.org>";
lukasepple = "Lukas Epple <post@lukasepple.de>";
lukego = "Luke Gorrie <luke@snabb.co>";
lw = "Sergey Sofeychuk <lw@fmap.me>";
madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
@ -240,25 +279,32 @@
martingms = "Martin Gammelsæter <martin@mg.am>";
matejc = "Matej Cotman <cotman.matej@gmail.com>";
mathnerd314 = "Mathnerd314 <mathnerd314.gph+hs@gmail.com>";
matthewbauer = "Matthew Bauer <mjbauer95@gmail.com>";
matthiasbeyer = "Matthias Beyer <mail@beyermatthias.de>";
maurer = "Matthew Maurer <matthew.r.maurer+nix@gmail.com>";
mbakke = "Marius Bakke <mbakke@fastmail.com>";
matthewbauer = "Matthew Bauer <mjbauer95@gmail.com>";
mbbx6spp = "Susan Potter <me@susanpotter.net>";
mbe = "Brandon Edens <brandonedens@gmail.com>";
mboes = "Mathieu Boespflug <mboes@tweag.net>";
mcmtroffaes = "Matthias C. M. Troffaes <matthias.troffaes@gmail.com>";
mdaiter = "Matthew S. Daiter <mdaiter8121@gmail.com>";
meditans = "Carlo Nucera <meditans@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
mguentner = "Maximilian Güntner <code@klandest.in>";
mic92 = "Jörg Thalheim <joerg@higgsboson.tk>";
michaelpj = "Michael Peyton Jones <michaelpj@gmail.com>";
michalrus = "Michal Rus <m@michalrus.com>";
michelk = "Michel Kuhlmann <michel@kuhlmanns.info>";
mikefaille = "Michaël Faille <michael@faille.io>";
mimadrid = "Miguel Madrid <mimadrid@ucm.es>";
mingchuan = "Ming Chuan <ming@culpring.com>";
mirdhyn = "Merlin Gaillard <mirdhyn@gmail.com>";
mirrexagon = "Andrew Abbott <mirrexagon@mirrexagon.com>";
mjanczyk = "Marcin Janczyk <m@dragonvr.pl>";
mlieberman85 = "Michael Lieberman <mlieberman85@gmail.com>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mog = "Matthew O'Gorman <mog-lists@rldn.net>";
montag451 = "montag451 <montag451@laposte.net>";
moosingin3space = "Nathan Moos <moosingin3space@gmail.com>";
moretea = "Maarten Hoogendoorn <maarten@moretea.nl>";
mornfall = "Petr Ročkai <me@mornfall.net>";
@ -266,6 +312,7 @@
mounium = "Katona László <muoniurn@gmail.com>";
MP2E = "Cray Elliott <MP2E@archlinux.us>";
mpscholten = "Marc Scholten <marc@mpscholten.de>";
mpsyco = "Francis St-Amour <fr.st-amour@gmail.com>";
msackman = "Matthew Sackman <matthew@wellquite.org>";
mschristiansen = "Mikkel Christiansen <mikkel@rheosystems.com>";
msteen = "Matthijs Steen <emailmatthijs@gmail.com>";
@ -273,23 +320,29 @@
mudri = "James Wood <lamudri@gmail.com>";
muflax = "Stefan Dorn <mail@muflax.com>";
myrl = "Myrl Hex <myrl.0xf@gmail.com>";
namore = "Roman Naumann <namor@hemio.de>";
nand0p = "Fernando Jose Pando <nando@hex7.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
Nate-Devv = "Nathan Moore <natedevv@gmail.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
nckx = "Tobias Geerinckx-Rice <tobias.geerinckx.rice@gmail.com>";
nequissimus = "Tim Steinbach <tim@nequissimus.com>";
nfjinjing = "Jinjing Wang <nfjinjing@gmail.com>";
nhooyr = "Anmol Sethi <anmol@aubble.com>";
nicknovitski = "Nick Novitski <nixpkgs@nicknovitski.com>";
nico202 = "Nicolò Balzarotti <anothersms@gmail.com>";
notthemessiah = "Brian Cohen <brian.cohen.88@gmail.com>";
NikolaMandic = "Ratko Mladic <nikola@mandic.email>";
notthemessiah = "Brian Cohen <brian.cohen.88@gmail.com>";
np = "Nicolas Pouillard <np.nix@nicolaspouillard.fr>";
nslqqq = "Nikita Mikhailov <nslqqq@gmail.com>";
obadz = "obadz <obadz-nixos@obadz.com>";
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
odi = "Oliver Dunkl <oliver.dunkl@gmail.com>";
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
oida = "oida <oida@posteo.de>";
okasu = "Okasu <oka.sux@gmail.com>";
olcai = "Erik Timan <dev@timan.info>";
olejorgenb = "Ole Jørgen Brønner <olejorgenb@yahoo.no>";
orbekk = "KJ Ørbekk <kjetil.orbekk@gmail.com>";
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
osener = "Ozan Sener <ozan@ozansener.com>";
otwieracz = "Slawomir Gonet <slawek@otwiera.cz>";
@ -300,6 +353,7 @@
palo = "Ingolf Wanger <palipalo9@googlemail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
pawelpacana = "Paweł Pacana <pawel.pacana@gmail.com>";
periklis = "theopompos@gmail.com";
pesterhazy = "Paulus Esterhazy <pesterhazy@gmail.com>";
peterhoeg = "Peter Hoeg <peter@hoeg.com>";
peti = "Peter Simons <simons@cryp.to>";
@ -316,11 +370,14 @@
plcplc = "Philip Lykke Carlsen <plcplc@gmail.com>";
pmahoney = "Patrick Mahoney <pat@polycrystal.org>";
pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>";
polyrod = "Maurizio Di Pietro <dc1mdp@gmail.com>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
primeos = "Michael Weiss <dev.primeos@gmail.com>";
profpatsch = "Profpatsch <mail@profpatsch.de>";
proglodyte = "Proglodyte <proglodyte23@gmail.com>";
pshendry = "Paul Hendry <paul@pshendry.com>";
psibi = "Sibi <sibi@psibi.in>";
pstn = "Philipp Steinpaß <philipp@xndr.de>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
pxc = "Patrick Callahan <patrick.callahan@latitudeengineering.com>";
@ -331,14 +388,17 @@
rardiol = "Ricardo Ardissone <ricardo.ardissone@gmail.com>";
rasendubi = "Alexey Shmalko <rasen.dubi@gmail.com>";
raskin = "Michael Raskin <7c6f434c@mail.ru>";
rbasso = "Rafael Basso <rbasso@sharpgeeks.net>";
redbaron = "Maxim Ivanov <ivanov.maxim@gmail.com>";
redvers = "Redvers Davies <red@infect.me>";
refnil = "Martin Lavoie <broemartino@gmail.com>";
regnat = "Théophane Hufschmitt <regnat@regnat.ovh>";
relrod = "Ricky Elrod <ricky@elrod.me>";
renzo = "Renzo Carbonara <renzocarbonara@gmail.com>";
retrry = "Tadas Barzdžius <retrry@gmail.com>";
rick68 = "Wei-Ming Yang <rick68@gmail.com>";
rickynils = "Rickard Nilsson <rickynils@gmail.com>";
rlupton20 = "Richard Lupton <richard.lupton@gmail.com>";
rnhmjoj = "Michele Guerini Rocco <micheleguerinirocco@me.com>";
rob = "Rob Vermaas <rob.vermaas@gmail.com>";
robberer = "Longrin Wischnewski <robberer@freakmail.de>";
@ -347,19 +407,21 @@
roblabla = "Robin Lambertz <robinlambertz+dev@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
ronny = "Ronny Pfannschmidt <nixos@ronnypfannschmidt.de>";
rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
rushmorem = "Rushmore Mushambi <rushmore@webenchanter.com>";
rvl = "Rodney Lorrimar <dev+nix@rodney.id.au>";
rvlander = "Gaëtan André <rvlander@gaetanandre.eu>";
ryanartecona = "Ryan Artecona <ryanartecona@gmail.com>";
ryantm = "Ryan Mulligan <ryan@ryantm.com>";
ryansydnor = "Ryan Sydnor <ryan.t.sydnor@gmail.com>";
ryantm = "Ryan Mulligan <ryan@ryantm.com>";
rycee = "Robert Helgesson <robert@rycee.net>";
ryneeverett = "Ryne Everett <ryneeverett@gmail.com>";
s1lvester = "Markus Silvester <s1lvester@bockhacker.me>";
samuelrivas = "Samuel Rivas <samuelrivas@gmail.com>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
schmitthenner = "Fabian Schmitthenner <development@schmitthenner.eu>";
schneefux = "schneefux <schneefux+nixos_pkg@schneefux.xyz>";
schristo = "Scott Christopher <schristopher@konputa.com>";
scolobb = "Sergiu Ivanov <sivanov@colimite.fr>";
sepi = "Raffael Mancini <raffael@mancini.lu>";
@ -376,15 +438,17 @@
skeidel = "Sven Keidel <svenkeidel@gmail.com>";
skrzyp = "Jakub Skrzypnik <jot.skrzyp@gmail.com>";
sleexyz = "Sean Lee <freshdried@gmail.com>";
smironov = "Sergey Mironov <ierton@gmail.com>";
smironov = "Sergey Mironov <grrwlf@gmail.com>";
solson = "Scott Olson <scott@solson.me>";
spacefrogg = "Michael Raitza <spacefrogg-nixos@meterriblecrew.net>";
spencerjanssen = "Spencer Janssen <spencerjanssen@gmail.com>";
spinus = "Tomasz Czyż <tomasz.czyz@gmail.com>";
sprock = "Roger Mason <rmason@mun.ca>";
spwhitt = "Spencer Whitt <sw@swhitt.me>";
srhb = "Sarah Brofeldt <sbrofeldt@gmail.com>";
SShrike = "Severen Redwood <severen@shrike.me>";
stephenmw = "Stephen Weinberg <stephen@q5comm.com>";
sternenseemann = "Lukas Epple <post@lukasepple.de>";
steveej = "Stefan Junker <mail@stefanjunker.de>";
swarren83 = "Shawn Warren <shawn.w.warren@gmail.com>";
swistak35 = "Rafał Łasocha <me@swistak35.com>";
@ -412,18 +476,25 @@
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
tv = "Tomislav Viljetić <tv@shackspace.de>";
tvestelind = "Tomas Vestelind <tomas.vestelind@fripost.org>";
tvorog = "Marsel Zaripov <marszaripov@gmail.com>";
twey = "James Twey Kay <twey@twey.co.uk>";
uralbash = "Svintsov Dmitry <root@uralbash.ru>";
urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>";
uwap = "uwap <me@uwap.name>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vanzef = "Ivan Solyankin <vanzef@gmail.com>";
vbgl = "Vincent Laporte <Vincent.Laporte@gmail.com>";
vbmithr = "Vincent Bernardoff <vb@luminar.eu.org>";
vcunat = "Vladimír Čunát <vcunat@gmail.com>";
vdemeester = "Vincent Demeester <vincent@sbr.pm>";
veprbl = "Dmitry Kalinkin <veprbl@gmail.com>";
viric = "Lluís Batlle i Rossell <viric@viric.name>";
vizanto = "Danny Wilson <danny@prime.vc>";
vklquevs = "vklquevs <vklquevs@gmail.com>";
vlstill = "Vladimír Štill <xstill@fi.muni.cz>";
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
volhovm = "Mikhail Volkhov <volhovm.cs@gmail.com>";
volth = "Jaroslavas Pocepko <jaroslavas@volth.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
vrthra = "Rahul Gopinath <rahul@gopinath.org>";
wedens = "wedens <kirill.wedens@gmail.com>";
@ -437,12 +508,14 @@
wscott = "Wayne Scott <wsc9tt@gmail.com>";
wyvie = "Elijah Rum <elijahrum@gmail.com>";
yarr = "Dmitry V. <savraz@gmail.com>";
yochai = "Yochai <yochai@titat.info>";
yurrriq = "Eric Bailey <eric@ericb.me>";
z77z = "Marco Maggesi <maggesi@math.unifi.it>";
zagy = "Christian Zagrodnick <cz@flyingcircus.io>";
zauberpony = "Elmar Athmer <elmar@athmer.org>";
zef = "Zef Hemel <zef@zef.me>";
zimbatm = "zimbatm <zimbatm@zimbatm.com>";
zohl = "Al Zohali <zohl@fmap.me>";
zoomulator = "Kim Simmons <zoomulator@gmail.com>";
amiloradovsky = "Andrew Miloradovsky <miloradovsky@gmail.com>";
zraexy = "David Mell <zraexy@gmail.com>";
}

View File

@ -1,4 +1,5 @@
with import ./lists.nix;
with import ./strings.nix;
with import ./trivial.nix;
with import ./attrsets.nix;
with import ./options.nix;
@ -230,12 +231,20 @@ rec {
correspond to the definition of 'loc' in 'opt.file'. */
mergeOptionDecls = loc: opts:
foldl' (res: opt:
if opt.options ? default && res ? default ||
opt.options ? example && res ? example ||
opt.options ? description && res ? description ||
opt.options ? apply && res ? apply ||
# Accept to merge options which have identical types.
opt.options ? type && res ? type && opt.options.type.name != res.type.name
let t = res.type;
t' = opt.options.type;
mergedType = t.typeMerge t'.functor;
typesMergeable = mergedType != null;
typeSet = if (bothHave "type") && typesMergeable
then { type = mergedType; }
else {};
bothHave = k: opt.options ? ${k} && res ? ${k};
in
if bothHave "default" ||
bothHave "example" ||
bothHave "description" ||
bothHave "apply" ||
(bothHave "type" && (! typesMergeable))
then
throw "The option `${showOption loc}' in `${opt.file}' is already declared in ${showFiles res.declarations}."
else
@ -257,7 +266,7 @@ rec {
in opt.options // res //
{ declarations = res.declarations ++ [opt.file];
options = submodules;
}
} // typeSet
) { inherit loc; declarations = []; options = []; } opts;
/* Merge all the definitions of an option to produce the final
@ -366,10 +375,13 @@ rec {
if def._type or "" == "merge" then
concatMap dischargeProperties def.contents
else if def._type or "" == "if" then
if def.condition then
dischargeProperties def.content
if isBool def.condition then
if def.condition then
dischargeProperties def.content
else
[ ]
else
[ ]
throw "mkIf called with a non-Boolean condition"
else
[ def ];
@ -421,12 +433,14 @@ rec {
options = opt.options or
(throw "Option `${showOption loc'}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
f = tp:
let optionSetIn = type: (tp.name == type) && (tp.functor.wrapped.name == "optionSet");
in
if tp.name == "option set" || tp.name == "submodule" then
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
else if tp.name == "attribute set of option sets" then types.attrsOf (types.submodule options)
else if tp.name == "list or attribute set of option sets" then types.loaOf (types.submodule options)
else if tp.name == "list of option sets" then types.listOf (types.submodule options)
else if tp.name == "null or option set" then types.nullOr (types.submodule options)
else if optionSetIn "attrsOf" then types.attrsOf (types.submodule options)
else if optionSetIn "loaOf" then types.loaOf (types.submodule options)
else if optionSetIn "listOf" then types.listOf (types.submodule options)
else if optionSetIn "nullOr" then types.nullOr (types.submodule options)
else tp;
in
if opt.type.getSubModules or null == null
@ -545,6 +559,84 @@ rec {
use = builtins.trace "Obsolete option `${showOption from}' is used. It was renamed to `${showOption to}'.";
};
/* Return a module that causes a warning to be shown if any of the "from"
option is defined; the defined values can be used in the "mergeFn" to set
the "to" value.
This function can be used to merge multiple options into one that has a
different type.
"mergeFn" takes the module "config" as a parameter and must return a value
of "to" option type.
mkMergedOptionModule
[ [ "a" "b" "c" ]
[ "d" "e" "f" ] ]
[ "x" "y" "z" ]
(config:
let value = p: getAttrFromPath p config;
in
if (value [ "a" "b" "c" ]) == true then "foo"
else if (value [ "d" "e" "f" ]) == true then "bar"
else "baz")
- options.a.b.c is a removed boolean option
- options.d.e.f is a removed boolean option
- options.x.y.z is a new str option that combines a.b.c and d.e.f
functionality
This show a warning if any a.b.c or d.e.f is set, and set the value of
x.y.z to the result of the merge function
*/
mkMergedOptionModule = from: to: mergeFn:
{ config, options, ... }:
{
options = foldl recursiveUpdate {} (map (path: setAttrByPath path (mkOption {
visible = false;
# To use the value in mergeFn without triggering errors
default = "_mkMergedOptionModule";
})) from);
config = {
warnings = filter (x: x != "") (map (f:
let val = getAttrFromPath f config;
opt = getAttrFromPath f options;
in
optionalString
(val != "_mkMergedOptionModule")
"The option `${showOption f}' defined in ${showFiles opt.files} has been changed to `${showOption to}' that has a different type. Please read `${showOption to}' documentation and update your configuration accordingly."
) from);
} // setAttrByPath to (mkMerge
(optional
(any (f: (getAttrFromPath f config) != "_mkMergedOptionModule") from)
(mergeFn config)));
};
/* Single "from" version of mkMergedOptionModule.
Return a module that causes a warning to be shown if the "from" option is
defined; the defined value can be used in the "mergeFn" to set the "to"
value.
This function can be used to change an option into another that has a
different type.
"mergeFn" takes the module "config" as a parameter and must return a value of
"to" option type.
mkChangedOptionModule [ "a" "b" "c" ] [ "x" "y" "z" ]
(config:
let value = getAttrFromPath [ "a" "b" "c" ] config;
in
if value > 100 then "high"
else "normal")
- options.a.b.c is a removed int option
- options.x.y.z is a new str option that supersedes a.b.c
This show a warning if a.b.c is set, and set the value of x.y.z to the
result of the change function
*/
mkChangedOptionModule = from: to: changeFn:
mkMergedOptionModule [ from ] to changeFn;
/* Like mkRenamedOptionModule, but doesn't show a warning. */
mkAliasOptionModule = from: to: doRename {
inherit from to;

View File

@ -92,7 +92,7 @@ rec {
internal = opt.internal or false;
visible = opt.visible or true;
readOnly = opt.readOnly or false;
type = opt.type.name or null;
type = opt.type.description or null;
}
// (if opt ? example then { example = scrubOptionValue opt.example; } else {})
// (if opt ? default then { default = scrubOptionValue opt.default; } else {})

View File

@ -12,19 +12,19 @@ rec {
# Bring in a path as a source, filtering out all Subversion and CVS
# directories, as well as backup files (*~).
cleanSource =
let filter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out Subversion and CVS directories.
(type == "directory" && (baseName == ".git" || baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
# Filter out backup files.
lib.hasSuffix "~" baseName ||
# Filter out generates files.
lib.hasSuffix ".o" baseName ||
lib.hasSuffix ".so" baseName ||
# Filter out nix-build result symlinks
(type == "symlink" && lib.hasPrefix "result" baseName)
);
in src: builtins.filterSource filter src;
cleanSourceFilter = name: type: let baseName = baseNameOf (toString name); in ! (
# Filter out Subversion and CVS directories.
(type == "directory" && (baseName == ".git" || baseName == ".svn" || baseName == "CVS" || baseName == ".hg")) ||
# Filter out backup files.
lib.hasSuffix "~" baseName ||
# Filter out generates files.
lib.hasSuffix ".o" baseName ||
lib.hasSuffix ".so" baseName ||
# Filter out nix-build result symlinks
(type == "symlink" && lib.hasPrefix "result" baseName)
);
cleanSource = builtins.filterSource cleanSourceFilter;
# Get all files ending with the specified suffices from the given

View File

@ -130,4 +130,94 @@ runTests {
expected = false;
};
/* Generator tests */
# these tests assume attributes are converted to lists
# in alphabetical order
testMkKeyValueDefault = {
expr = generators.mkKeyValueDefault ":" "f:oo" "bar";
expected = ''f\:oo:bar'';
};
testToKeyValue = {
expr = generators.toKeyValue {} {
key = "value";
"other=key" = "baz";
};
expected = ''
key=value
other\=key=baz
'';
};
testToINIEmpty = {
expr = generators.toINI {} {};
expected = "";
};
testToINIEmptySection = {
expr = generators.toINI {} { foo = {}; bar = {}; };
expected = ''
[bar]
[foo]
'';
};
testToINIDefaultEscapes = {
expr = generators.toINI {} {
"no [ and ] allowed unescaped" = {
"and also no = in keys" = 42;
};
};
expected = ''
[no \[ and \] allowed unescaped]
and also no \= in keys=42
'';
};
testToINIDefaultFull = {
expr = generators.toINI {} {
"section 1" = {
attribute1 = 5;
x = "Me-se JarJar Binx";
};
"foo[]" = {
"he\\h=he" = "this is okay";
};
};
expected = ''
[foo\[\]]
he\h\=he=this is okay
[section 1]
attribute1=5
x=Me-se JarJar Binx
'';
};
/* right now only invocation check */
testToJSONSimple =
let val = {
foobar = [ "baz" 1 2 3 ];
};
in {
expr = generators.toJSON {} val;
# trival implementation
expected = builtins.toJSON val;
};
/* right now only invocation check */
testToYAMLSimple =
let val = {
list = [ { one = 1; } { two = 2; } ];
all = 42;
};
in {
expr = generators.toYAML {} val;
# trival implementation
expected = builtins.toJSON val;
};
}

View File

@ -53,6 +53,31 @@ rec {
# argument, but it's nice this way if several uses of `extends` are cascaded.
extends = f: rattrs: self: let super = rattrs self; in super // f self super;
# Create an overridable, recursive attribute set. For example:
#
# nix-repl> obj = makeExtensible (self: { })
#
# nix-repl> obj
# { __unfix__ = «lambda»; extend = «lambda»; }
#
# nix-repl> obj = obj.extend (self: super: { foo = "foo"; })
#
# nix-repl> obj
# { __unfix__ = «lambda»; extend = «lambda»; foo = "foo"; }
#
# nix-repl> obj = obj.extend (self: super: { foo = super.foo + " + "; bar = "bar"; foobar = self.foo + self.bar; })
#
# nix-repl> obj
# { __unfix__ = «lambda»; bar = "bar"; extend = «lambda»; foo = "foo + "; foobar = "foo + bar"; }
makeExtensible = makeExtensibleWithCustomName "extend";
# Same as `makeExtensible` but the name of the extending attribute is
# customized.
makeExtensibleWithCustomName = extenderName: rattrs:
fix' rattrs // {
${extenderName} = f: makeExtensibleWithCustomName extenderName (extends f rattrs);
};
# Flip the order of the arguments of a binary function.
flip = f: a: b: f b a;

View File

@ -17,10 +17,43 @@ rec {
};
# Default type merging function
# takes two type functors and return the merged type
defaultTypeMerge = f: f':
let wrapped = f.wrapped.typeMerge f'.wrapped.functor;
payload = f.binOp f.payload f'.payload;
in
# cannot merge different types
if f.name != f'.name
then null
# simple types
else if (f.wrapped == null && f'.wrapped == null)
&& (f.payload == null && f'.payload == null)
then f.type
# composed types
else if (f.wrapped != null && f'.wrapped != null) && (wrapped != null)
then f.type wrapped
# value types
else if (f.payload != null && f'.payload != null) && (payload != null)
then f.type payload
else null;
# Default type functor
defaultFunctor = name: {
inherit name;
type = types."${name}" or null;
wrapped = null;
payload = null;
binOp = a: b: null;
};
isOptionType = isType "option-type";
mkOptionType =
{ # Human-readable representation of the type.
{ # Human-readable representation of the type, should be equivalent to
# the type function name.
name
, # Description of the type, defined recursively by embedding the the wrapped type if any.
description ? null
, # Function applied to each definition that should return true if
# its type-correct, false otherwise.
check ? (x: true)
@ -36,12 +69,26 @@ rec {
getSubOptions ? prefix: {}
, # List of modules if any, or null if none.
getSubModules ? null
, # Function for building the same option type with a different list of
, # Function for building the same option type with a different list of
# modules.
substSubModules ? m: null
, # Function that merge type declarations.
# internal, takes a functor as argument and returns the merged type.
# returning null means the type is not mergeable
typeMerge ? defaultTypeMerge functor
, # The type functor.
# internal, representation of the type as an attribute set.
# name: name of the type
# type: type function.
# wrapped: the type wrapped in case of compound types.
# payload: values of the type, two payloads of the same type must be
# combinable with the binOp binary operation.
# binOp: binary operation that merge two payloads of the same type.
functor ? defaultFunctor name
}:
{ _type = "option-type";
inherit name check merge getSubOptions getSubModules substSubModules;
inherit name check merge getSubOptions getSubModules substSubModules typeMerge functor;
description = if description == null then name else description;
};
@ -52,29 +99,39 @@ rec {
};
bool = mkOptionType {
name = "boolean";
name = "bool";
description = "boolean";
check = isBool;
merge = mergeEqualOption;
};
int = mkOptionType {
name = "integer";
int = mkOptionType rec {
name = "int";
description = "integer";
check = isInt;
merge = mergeOneOption;
};
str = mkOptionType {
name = "string";
name = "str";
description = "string";
check = isString;
merge = mergeOneOption;
};
# Merge multiple definitions by concatenating them (with the given
# separator between the values).
separatedString = sep: mkOptionType {
name = "string";
separatedString = sep: mkOptionType rec {
name = "separatedString";
description = "string";
check = isString;
merge = loc: defs: concatStringsSep sep (getValues defs);
functor = (defaultFunctor name) // {
payload = sep;
binOp = sepLhs: sepRhs:
if sepLhs == sepRhs then sepLhs
else null;
};
};
lines = separatedString "\n";
@ -86,7 +143,8 @@ rec {
string = separatedString "";
attrs = mkOptionType {
name = "attribute set";
name = "attrs";
description = "attribute set";
check = isAttrs;
merge = loc: foldl' (res: def: mergeAttrs res def.value) {};
};
@ -114,8 +172,9 @@ rec {
# drop this in the future:
list = builtins.trace "`types.list' is deprecated; use `types.listOf' instead" types.listOf;
listOf = elemType: mkOptionType {
name = "list of ${elemType.name}s";
listOf = elemType: mkOptionType rec {
name = "listOf";
description = "list of ${elemType.description}s";
check = isList;
merge = loc: defs:
map (x: x.value) (filter (x: x ? value) (concatLists (imap (n: def:
@ -132,10 +191,12 @@ rec {
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["*"]);
getSubModules = elemType.getSubModules;
substSubModules = m: listOf (elemType.substSubModules m);
functor = (defaultFunctor name) // { wrapped = elemType; };
};
attrsOf = elemType: mkOptionType {
name = "attribute set of ${elemType.name}s";
attrsOf = elemType: mkOptionType rec {
name = "attrsOf";
description = "attribute set of ${elemType.description}s";
check = isAttrs;
merge = loc: defs:
mapAttrs (n: v: v.value) (filterAttrs (n: v: v ? value) (zipAttrsWith (name: defs:
@ -147,6 +208,7 @@ rec {
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name>"]);
getSubModules = elemType.getSubModules;
substSubModules = m: attrsOf (elemType.substSubModules m);
functor = (defaultFunctor name) // { wrapped = elemType; };
};
# List or attribute set of ...
@ -165,18 +227,21 @@ rec {
def;
listOnly = listOf elemType;
attrOnly = attrsOf elemType;
in mkOptionType {
name = "list or attribute set of ${elemType.name}s";
in mkOptionType rec {
name = "loaOf";
description = "list or attribute set of ${elemType.description}s";
check = x: isList x || isAttrs x;
merge = loc: defs: attrOnly.merge loc (imap convertIfList defs);
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name?>"]);
getSubModules = elemType.getSubModules;
substSubModules = m: loaOf (elemType.substSubModules m);
functor = (defaultFunctor name) // { wrapped = elemType; };
};
# List or element of ...
loeOf = elemType: mkOptionType {
name = "element or list of ${elemType.name}s";
loeOf = elemType: mkOptionType rec {
name = "loeOf";
description = "element or list of ${elemType.description}s";
check = x: isList x || elemType.check x;
merge = loc: defs:
let
@ -189,18 +254,22 @@ rec {
else if !isString res then
throw "The option `${showOption loc}' does not have a string value, in ${showFiles (getFiles defs)}."
else res;
functor = (defaultFunctor name) // { wrapped = elemType; };
};
uniq = elemType: mkOptionType {
inherit (elemType) name check;
uniq = elemType: mkOptionType rec {
name = "uniq";
inherit (elemType) description check;
merge = mergeOneOption;
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: uniq (elemType.substSubModules m);
functor = (defaultFunctor name) // { wrapped = elemType; };
};
nullOr = elemType: mkOptionType {
name = "null or ${elemType.name}";
nullOr = elemType: mkOptionType rec {
name = "nullOr";
description = "null or ${elemType.description}";
check = x: x == null || elemType.check x;
merge = loc: defs:
let nrNulls = count (def: def.value == null) defs; in
@ -211,6 +280,7 @@ rec {
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: nullOr (elemType.substSubModules m);
functor = (defaultFunctor name) // { wrapped = elemType; };
};
submodule = opts:
@ -236,6 +306,12 @@ rec {
args = { name = ""; }; }).options;
getSubModules = opts';
substSubModules = m: submodule m;
functor = (defaultFunctor name) // {
# Merging of submodules is done as part of mergeOptionDecls, as we have to annotate
# each submodule with its location.
payload = [];
binOp = lhs: rhs: [];
};
};
enum = values:
@ -245,23 +321,43 @@ rec {
else if builtins.isInt v then builtins.toString v
else ''<${builtins.typeOf v}>'';
in
mkOptionType {
name = "one of ${concatMapStringsSep ", " show values}";
mkOptionType rec {
name = "enum";
description = "one of ${concatMapStringsSep ", " show values}";
check = flip elem values;
merge = mergeOneOption;
functor = (defaultFunctor name) // { payload = values; binOp = a: b: unique (a ++ b); };
};
either = t1: t2: mkOptionType {
name = "${t1.name} or ${t2.name}";
either = t1: t2: mkOptionType rec {
name = "either";
description = "${t1.description} or ${t2.description}";
check = x: t1.check x || t2.check x;
merge = mergeOneOption;
merge = loc: defs:
let
defList = map (d: d.value) defs;
in
if all (x: t1.check x) defList
then t1.merge loc defs
else if all (x: t2.check x) defList
then t2.merge loc defs
else mergeOneOption loc defs;
typeMerge = f':
let mt1 = t1.typeMerge (elemAt f'.wrapped 0).functor;
mt2 = t2.typeMerge (elemAt f'.wrapped 1).functor;
in
if (name == f'.name) && (mt1 != null) && (mt2 != null)
then functor.type mt1 mt2
else null;
functor = (defaultFunctor name) // { wrapped = [ t1 t2 ]; };
};
# Obsolete alternative to configOf. It takes its option
# declarations from the options attribute of containing option
# declaration.
optionSet = mkOptionType {
name = /* builtins.trace "types.optionSet is deprecated; use types.submodule instead" */ "option set";
name = builtins.trace "types.optionSet is deprecated; use types.submodule instead" "optionSet";
description = "option set";
};
# Augment the given type with an additional type check function.

View File

@ -1,6 +1,8 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p coreutils findutils gnused nix wget
set -efuo pipefail
SRCS=
if [ -d "$1" ]; then
SRCS="$(pwd)/$1/srcs.nix"

View File

@ -38,6 +38,12 @@ while test -n "$1"; do
nix-build $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr tarball --show-trace
;;
nixpkgs-unstable)
echo "=== Checking nixpkgs unstable job"
nix-instantiate $TRAVIS_BUILD_DIR/pkgs/top-level/release.nix --attr unstable --show-trace
;;
nixpkgs-lint)
echo "=== Checking nixpkgs lint"

131
maintainers/scripts/update.nix Executable file
View File

@ -0,0 +1,131 @@
{ package ? null
, maintainer ? null
}:
# TODO: add assert statements
let
pkgs = import ./../../default.nix { };
packagesWith = cond: return: set:
pkgs.lib.flatten
(pkgs.lib.mapAttrsToList
(name: pkg:
let
result = builtins.tryEval (
if pkgs.lib.isDerivation pkg && cond name pkg
then [(return name pkg)]
else if pkg.recurseForDerivations or false || pkg.recurseForRelease or false
then packagesWith cond return pkg
else []
);
in
if result.success then result.value
else []
)
set
);
packagesWithUpdateScriptAndMaintainer = maintainer':
let
maintainer =
if ! builtins.hasAttr maintainer' pkgs.lib.maintainers then
builtins.throw "Maintainer with name `${maintainer'} does not exist in `lib/maintainers.nix`."
else
builtins.getAttr maintainer' pkgs.lib.maintainers;
in
packagesWith (name: pkg: builtins.hasAttr "updateScript" pkg &&
(if builtins.hasAttr "maintainers" pkg.meta
then (if builtins.isList pkg.meta.maintainers
then builtins.elem maintainer pkg.meta.maintainers
else maintainer == pkg.meta.maintainers
)
else false
)
)
(name: pkg: pkg)
pkgs;
packageByName = name:
let
package = pkgs.lib.attrByPath (pkgs.lib.splitString "." name) null pkgs;
in
if package == null then
builtins.throw "Package with an attribute name `${name}` does not exists."
else if ! builtins.hasAttr "updateScript" package then
builtins.throw "Package with an attribute name `${name}` does have an `passthru.updateScript` defined."
else
package;
packages =
if package != null then
[ (packageByName package) ]
else if maintainer != null then
packagesWithUpdateScriptAndMaintainer maintainer
else
builtins.throw "No arguments provided.\n\n${helpText}";
helpText = ''
Please run:
% nix-shell maintainers/scripts/update.nix --argstr maintainer garbas
to run all update scripts for all packages that lists \`garbas\` as a maintainer
and have \`updateScript\` defined, or:
% nix-shell maintainers/scripts/update.nix --argstr package garbas
to run update script for specific package.
'';
runUpdateScript = package: ''
echo -ne " - ${package.name}: UPDATING ..."\\r
${package.updateScript} &> ${(builtins.parseDrvName package.name).name}.log
CODE=$?
if [ "$CODE" != "0" ]; then
echo " - ${package.name}: ERROR "
echo ""
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
echo ""
cat ${(builtins.parseDrvName package.name).name}.log
echo ""
echo "--- SHOWING ERROR LOG FOR ${package.name} ----------------------"
exit $CODE
else
rm ${(builtins.parseDrvName package.name).name}.log
fi
echo " - ${package.name}: DONE. "
'';
in pkgs.stdenv.mkDerivation {
name = "nixpkgs-update-script";
buildCommand = ''
echo ""
echo "----------------------------------------------------------------"
echo ""
echo "Not possible to update packages using \`nix-build\`"
echo ""
echo "${helpText}"
echo "----------------------------------------------------------------"
exit 1
'';
shellHook = ''
echo ""
echo "Going to be running update for following packages:"
echo "${builtins.concatStringsSep "\n" (map (x: " - ${x.name}") packages)}"
echo ""
read -n1 -r -p "Press space to continue..." confirm
if [ "$confirm" = "" ]; then
echo ""
echo "Running update for:"
${builtins.concatStringsSep "\n" (map runUpdateScript packages)}
echo ""
echo "Packages updated!"
exit 0
else
echo "Aborting!"
exit 1
fi
'';
}

View File

@ -101,15 +101,15 @@ cleaner_script="$(echo "$name_list_canonical" | denormalize_name |
# Add github usernames
if [ -n "$NIXPKGS_GITHUB_NAME_CACHE" ]; then
github_adder_script="$(echo "$github_name_list" |
github_adder_script="$(mktemp)"
echo "$github_name_list" |
grep -E "$(echo "$name_list_canonical" | cut -f 2 |
tr '\n' '|' )" |
sort | uniq |
sed -re 's/(.*)\t(.*)/s| \1$| \1\t\2|g;/' |
denormalize_name
)"
denormalize_name > "$github_adder_script"
else
github_adder_script=''
github_adder_script='/dev/null'
fi
echo "$name_list" | denormalize_name
@ -118,5 +118,5 @@ echo
echo "$git_data" | cut -f 1 |
sed -e "$cleaner_script" |
sort | uniq -c | sort -k1n | sed -re "$github_adder_script" |
sort | uniq -c | sort -k1n | sed -rf "$github_adder_script" |
sed -re 's/^ *([0-9]+) /\1\t/'

View File

@ -47,4 +47,12 @@ where <literal>eth0</literal> should be replaced with the desired
external interface. Note that <literal>ve-+</literal> is a wildcard
that matches all container interfaces.</para>
<para>If you are using Network Manager, you need to explicitly prevent
it from managing container interfaces:
<programlisting>
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
</programlisting>
</para>
</section>

View File

@ -42,29 +42,30 @@ construction, so without them,
elements.)</para>
<para>Even greater customisation is possible using the function
<varname>overrideDerivation</varname>. While the
<varname>overrideAttrs</varname>. While the
<varname>override</varname> mechanism above overrides the arguments of
a package function, <varname>overrideDerivation</varname> allows
changing the <emphasis>result</emphasis> of the function. This
permits changing any aspect of the package, such as the source code.
a package function, <varname>overrideAttrs</varname> allows
changing the <emphasis>attributes</emphasis> passed to <literal>mkDerivation</literal>.
This permits changing any aspect of the package, such as the source code.
For instance, if you want to override the source code of Emacs, you
can say:
<programlisting>
environment.systemPackages =
[ (pkgs.lib.overrideDerivation pkgs.emacs (attrs: {
name = "emacs-25.0-pre";
src = /path/to/my/emacs/tree;
}))
];
environment.systemPackages = [
(pkgs.emacs.overrideAttrs (oldAttrs: {
name = "emacs-25.0-pre";
src = /path/to/my/emacs/tree;
}))
];
</programlisting>
Here, <varname>overrideDerivation</varname> takes the Nix derivation
Here, <varname>overrideAttrs</varname> takes the Nix derivation
specified by <varname>pkgs.emacs</varname> and produces a new
derivation in which the originals <literal>name</literal> and
<literal>src</literal> attribute have been replaced by the given
values. The original attributes are accessible via
<varname>attrs</varname>.</para>
values by re-calling <literal>stdenv.mkDerivation</literal>.
The original attributes are accessible via the function argument,
which is conventionally named <varname>oldAttrs</varname>.</para>
<para>The overrides shown above are not global. They do not affect
the original package; other packages in Nixpkgs continue to depend on

View File

@ -12,8 +12,15 @@ can disable IPv6 support globally by setting:
<programlisting>
networking.enableIPv6 = false;
</programlisting>
</programlisting></para>
<para>You can disable IPv6 on a single interface using a normal sysctl (in this
example, we use interface <varname>eth0</varname>):
<programlisting>
boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
</programlisting>
</para>
</section>

View File

@ -129,7 +129,7 @@ default; run <literal>nix-env -i nix-repl</literal> to get it. A
typical use:
<screen>
$ nix-repl '&lt;nixos>'
$ nix-repl '&lt;nixpkgs/nixos>'
nix-repl> config.networking.hostName
"mandark"

View File

@ -16,12 +16,22 @@ networking.networkmanager.enable = true;
some desktop managers (e.g., GNOME) enable NetworkManager
automatically for you.</para>
<para>All users that should have permission to change network settings
must belong to the <code>networkmanager</code> group.</para>
<para>All users that should have permission to change network settings must
belong to the <code>networkmanager</code> group:
<programlisting>
users.extraUsers.youruser.extraGroups = [ "networkmanager" ];
</programlisting>
</para>
<para>NetworkManager is controlled using either <command>nmcli</command> or
<command>nmtui</command> (curses-based terminal user interface). See their
manual pages for details on their usage. Some desktop environments (GNOME, KDE)
have their own configuration tools for NetworkManager.</para>
<note><para><code>networking.networkmanager</code> and
<code>networking.wireless</code> can not be enabled at the same time:
you can still connect to the wireless networks using
<code>networking.wireless</code> (WPA Supplicant) cannot be enabled at the same
time: you can still connect to the wireless networks using
NetworkManager.</para></note>
</section>

View File

@ -94,14 +94,11 @@ let
"--stringparam chunk.toc ${toc}"
];
olinkDB = stdenv.mkDerivation {
name = "manual-olinkdb";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
olinkDB = runCommand "manual-olinkdb"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
}
''
${copySources}
xsltproc \
@ -133,15 +130,14 @@ let
</targetset>
EOF
'';
};
in rec {
# The NixOS options in JSON format.
optionsJSON = stdenv.mkDerivation {
name = "options-json";
buildCommand = ''
optionsJSON = runCommand "options-json"
{ meta.description = "List of NixOS options in JSON format";
}
''
# Export list of options in different format.
dst=$out/share/doc/nixos
mkdir -p $dst
@ -154,18 +150,14 @@ in rec {
echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "List of NixOS options in JSON format";
};
# Generate the NixOS manual.
manual = stdenv.mkDerivation {
name = "nixos-manual";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
manual = runCommand "nixos-manual"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"];
}
''
${copySources}
# Check the validity of the manual sources.
@ -192,20 +184,12 @@ in rec {
echo "doc manual $dst" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"];
};
manualEpub = stdenv.mkDerivation {
name = "nixos-manual-epub";
inherit sources;
buildInputs = [ libxml2 libxslt zip ];
buildCommand = ''
manualEpub = runCommand "nixos-manual-epub"
{ inherit sources;
buildInputs = [ libxml2 libxslt zip ];
}
''
${copySources}
# Check the validity of the manual sources.
@ -234,17 +218,15 @@ in rec {
mkdir -p $out/nix-support
echo "doc-epub manual $manual" >> $out/nix-support/hydra-build-products
'';
};
# Generate the NixOS manpages.
manpages = stdenv.mkDerivation {
name = "nixos-manpages";
inherit sources;
buildInputs = [ libxml2 libxslt ];
buildCommand = ''
manpages = runCommand "nixos-manpages"
{ inherit sources;
buildInputs = [ libxml2 libxslt ];
allowedReferences = ["out"];
}
''
${copySources}
# Check the validity of the man pages sources.
@ -264,7 +246,4 @@ in rec {
./man-pages.xml
'';
allowedReferences = ["out"];
};
}

View File

@ -14,8 +14,10 @@ NixOS.</para>
<xi:include href="sources.xml" />
<xi:include href="writing-modules.xml" />
<xi:include href="building-parts.xml" />
<xi:include href="writing-documentation.xml" />
<xi:include href="building-nixos.xml" />
<xi:include href="nixos-tests.xml" />
<xi:include href="testing-installer.xml" />
<xi:include href="releases.xml" />
</part>

View File

@ -31,9 +31,9 @@ options = {
<varlistentry>
<term><varname>type</varname></term>
<listitem>
<para>The type of the option (see below). It may be omitted,
but thats not advisable since it may lead to errors that are
hard to diagnose.</para>
<para>The type of the option (see <xref linkend='sec-option-types' />).
It may be omitted, but thats not advisable since it may lead to errors
that are hard to diagnose.</para>
</listitem>
</varlistentry>
@ -65,86 +65,92 @@ options = {
</para>
<para>Here is a non-exhaustive list of option types:
<section xml:id="sec-option-declarations-eot"><title>Extensible Option
Types</title>
<variablelist>
<para>Extensible option types is a feature that allow to extend certain types
declaration through multiple module files.
This feature only work with a restricted set of types, namely
<literal>enum</literal> and <literal>submodules</literal> and any composed
forms of them.</para>
<varlistentry>
<term><varname>types.bool</varname></term>
<listitem>
<para>A Boolean.</para>
<para>Extensible option types can be used for <literal>enum</literal> options
that affects multiple modules, or as an alternative to related
<literal>enable</literal> options.</para>
<para>As an example, we will take the case of display managers. There is a
central display manager module for generic display manager options and a
module file per display manager backend (slim, kdm, gdm ...).
</para>
<para>There are two approach to this module structure:
<itemizedlist>
<listitem><para>Managing the display managers independently by adding an
enable option to every display manager module backend. (NixOS)</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>types.int</varname></term>
<listitem>
<para>An integer.</para>
<listitem><para>Managing the display managers in the central module by
adding an option to select which display manager backend to use.</para>
</listitem>
</varlistentry>
</itemizedlist>
</para>
<varlistentry>
<term><varname>types.str</varname></term>
<listitem>
<para>A string.</para>
</listitem>
</varlistentry>
<para>Both approachs have problems.</para>
<para>Making backends independent can quickly become hard to manage. For
display managers, there can be only one enabled at a time, but the type
system can not enforce this restriction as there is no relation between
each backend <literal>enable</literal> option. As a result, this restriction
has to be done explicitely by adding assertions in each display manager
backend module.</para>
<varlistentry>
<term><varname>types.lines</varname></term>
<listitem>
<para>A string. If there are multiple definitions, they are
concatenated, with newline characters in between.</para>
</listitem>
</varlistentry>
<para>On the other hand, managing the display managers backends in the
central module will require to change the central module option every time
a new backend is added or removed.</para>
<varlistentry>
<term><varname>types.path</varname></term>
<listitem>
<para>A path, defined as anything that, when coerced to a
string, starts with a slash. This includes derivations.</para>
</listitem>
</varlistentry>
<para>By using extensible option types, it is possible to create a placeholder
option in the central module (<xref linkend='ex-option-declaration-eot-service'
/>), and to extend it in each backend module (<xref
linkend='ex-option-declaration-eot-backend-slim' />, <xref
linkend='ex-option-declaration-eot-backend-kdm' />).</para>
<para>As a result, <literal>displayManager.enable</literal> option values can
be added without changing the main service module file and the type system
automatically enforce that there can only be a single display manager
enabled.</para>
<varlistentry>
<term><varname>types.package</varname></term>
<listitem>
<para>A derivation (such as <literal>pkgs.hello</literal>) or a
store path (such as
<filename>/nix/store/1ifi1cfbfs5iajmvwgrbmrnrw3a147h9-hello-2.10</filename>).</para>
</listitem>
</varlistentry>
<example xml:id='ex-option-declaration-eot-service'><title>Extensible type
placeholder in the service module</title>
<screen>
services.xserver.displayManager.enable = mkOption {
description = "Display manager to use";
type = with types; nullOr (enum [ ]);
};</screen></example>
<varlistentry>
<term><varname>types.listOf</varname> <replaceable>t</replaceable></term>
<listitem>
<para>A list of elements of type <replaceable>t</replaceable>
(e.g., <literal>types.listOf types.str</literal> is a list of
strings). Multiple definitions are concatenated together.</para>
</listitem>
</varlistentry>
<example xml:id='ex-option-declaration-eot-backend-slim'><title>Extending
<literal>services.xserver.displayManager.enable</literal> in the
<literal>slim</literal> module</title>
<screen>
services.xserver.displayManager.enable = mkOption {
type = with types; nullOr (enum [ "slim" ]);
};</screen></example>
<varlistentry>
<term><varname>types.attrsOf</varname> <replaceable>t</replaceable></term>
<listitem>
<para>A set of elements of type <replaceable>t</replaceable>
(e.g., <literal>types.attrsOf types.int</literal> is a set of
name/value pairs, the values being integers).</para>
</listitem>
</varlistentry>
<example xml:id='ex-option-declaration-eot-backend-kdm'><title>Extending
<literal>services.foo.backend</literal> in the <literal>kdm</literal>
module</title>
<screen>
services.xserver.displayManager.enable = mkOption {
type = with types; nullOr (enum [ "kdm" ]);
};</screen></example>
<varlistentry>
<term><varname>types.nullOr</varname> <replaceable>t</replaceable></term>
<listitem>
<para>Either the value <literal>null</literal> or something of
type <replaceable>t</replaceable>.</para>
</listitem>
</varlistentry>
<para>The placeholder declaration is a standard <literal>mkOption</literal>
declaration, but it is important that extensible option declarations only use
the <literal>type</literal> argument.</para>
</variablelist>
You can also create new types using the function
<varname>mkOptionType</varname>. See
<filename>lib/types.nix</filename> in Nixpkgs for details.</para>
<para>Extensible option types work with any of the composed variants of
<literal>enum</literal> such as
<literal>with types; nullOr (enum [ "foo" "bar" ])</literal>
or <literal>with types; listOf (enum [ "foo" "bar" ])</literal>.</para>
</section>
</section>

View File

@ -0,0 +1,446 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-option-types">
<title>Options Types</title>
<para>Option types are a way to put constraints on the values a module option
can take.
Types are also responsible of how values are merged in case of multiple
value definitions.</para>
<section><title>Basic Types</title>
<para>Basic types are the simplest available types in the module system.
Basic types include multiple string types that mainly differ in how
definition merging is handled.</para>
<variablelist>
<varlistentry>
<term><varname>types.bool</varname></term>
<listitem><para>A boolean, its values can be <literal>true</literal> or
<literal>false</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.int</varname></term>
<listitem><para>An integer.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.path</varname></term>
<listitem><para>A filesystem path, defined as anything that when coerced to
a string starts with a slash. Even if derivations can be considered as
path, the more specific <literal>types.package</literal> should be
preferred.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.package</varname></term>
<listitem><para>A derivation or a store path.</para></listitem>
</varlistentry>
</variablelist>
<para>String related types:</para>
<variablelist>
<varlistentry>
<term><varname>types.str</varname></term>
<listitem><para>A string. Multiple definitions cannot be
merged.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.lines</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a new
line <literal>"\n"</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.commas</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a comma
<literal>","</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.envVar</varname></term>
<listitem><para>A string. Multiple definitions are concatenated with a
collon <literal>":"</literal>.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Value Types</title>
<para>Value types are type that take a value parameter. The only value type
in the library is <literal>enum</literal>.</para>
<variablelist>
<varlistentry>
<term><varname>types.enum</varname> <replaceable>l</replaceable></term>
<listitem><para>One element of the list <replaceable>l</replaceable>, e.g.
<literal>types.enum [ "left" "right" ]</literal>. Multiple definitions
cannot be merged.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.separatedString</varname>
<replaceable>sep</replaceable></term>
<listitem><para>A string with a custom separator
<replaceable>sep</replaceable>, e.g. <literal>types.separatedString
"|"</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.submodule</varname> <replaceable>o</replaceable></term>
<listitem><para>A set of sub options <replaceable>o</replaceable>.
<replaceable>o</replaceable> can be an attribute set or a function
returning an attribute set. Submodules are used in composed types to
create modular options. Submodule are detailed in <xref
linkend='section-option-types-submodule' />.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Composed Types</title>
<para>Composed types are types that take a type as parameter. <literal>listOf
int</literal> and <literal>either int str</literal> are examples of
composed types.</para>
<variablelist>
<varlistentry>
<term><varname>types.listOf</varname> <replaceable>t</replaceable></term>
<listitem><para>A list of <replaceable>t</replaceable> type, e.g.
<literal>types.listOf int</literal>. Multiple definitions are merged
with list concatenation.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.attrsOf</varname> <replaceable>t</replaceable></term>
<listitem><para>An attribute set of where all the values are of
<replaceable>t</replaceable> type. Multiple definitions result in the
joined attribute set.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.loaOf</varname> <replaceable>t</replaceable></term>
<listitem><para>An attribute set or a list of <replaceable>t</replaceable>
type. Multiple definitions are merged according to the
value.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.nullOr</varname> <replaceable>t</replaceable></term>
<listitem><para><literal>null</literal> or type
<replaceable>t</replaceable>. Multiple definitions are merged according
to type <replaceable>t</replaceable>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.uniq</varname> <replaceable>t</replaceable></term>
<listitem><para>Ensures that type <replaceable>t</replaceable> cannot be
merged. It is used to ensure option definitions are declared only
once.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>types.either</varname> <replaceable>t1</replaceable>
<replaceable>t2</replaceable></term>
<listitem><para>Type <replaceable>t1</replaceable> or type
<replaceable>t2</replaceable>, e.g. <literal>with types; either int
str</literal>. Multiple definitions cannot be
merged.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id='section-option-types-submodule'><title>Submodule</title>
<para>Submodule is a very powerful type that defines a set of sub-options that
are handled like a separate module.
It is especially interesting when used with composed types like
<literal>attrsOf</literal> or <literal>listOf</literal>.</para>
<para>The submodule type take a parameter <replaceable>o</replaceable>, that
should be a set, or a function returning a set with an
<literal>options</literal> key defining the sub-options.
The option set can be defined directly (<xref linkend='ex-submodule-direct'
/>) or as reference (<xref linkend='ex-submodule-reference' />).</para>
<para>Submodule option definitions are type-checked accordingly to the options
declarations. It is possible to declare submodule options inside a submodule
sub-options for even higher modularity.</para>
<example xml:id='ex-submodule-direct'><title>Directly defined submodule</title>
<screen>
options.mod = mkOption {
name = "mod";
description = "submodule example";
type = with types; listOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-reference'><title>Submodule defined as a
reference</title>
<screen>
let
modOptions = {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = int;
};
};
};
in
options.mod = mkOption {
description = "submodule example";
type = with types; listOf (submodule modOptions);
};</screen></example>
<section><title>Composed with <literal>listOf</literal></title>
<para>When composed with <literal>listOf</literal>, submodule allows multiple
definitions of the submodule option set.</para>
<example xml:id='ex-submodule-listof-declaration'><title>Declaration of a list
of submodules</title>
<screen>
options.mod = mkOption {
description = "submodule example";
type = with types; listOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-listof-definition'><title>Definition of a list of
submodules</title>
<screen>
config.mod = [
{ foo = 1; bar = "one"; }
{ foo = 2; bar = "two"; }
];</screen></example>
</section>
<section><title>Composed with <literal>attrsOf</literal></title>
<para>When composed with <literal>attrsOf</literal>, submodule allows multiple
named definitions of the submodule option set.</para>
<example xml:id='ex-submodule-attrsof-declaration'><title>Declaration of
attribute sets of submodules</title>
<screen>
options.mod = mkOption {
description = "submodule example";
type = with types; attrsOf (submodule {
options = {
foo = mkOption {
type = int;
};
bar = mkOption {
type = str;
};
};
});
};</screen></example>
<example xml:id='ex-submodule-attrsof-definition'><title>Declaration of
attribute sets of submodules</title>
<screen>
config.mod.one = { foo = 1; bar = "one"; };
config.mod.two = { foo = 2; bar = "two"; };</screen></example>
</section>
</section>
<section><title>Extending types</title>
<para>Types are mainly characterized by their <literal>check</literal> and
<literal>merge</literal> functions.</para>
<variablelist>
<varlistentry>
<term><varname>check</varname></term>
<listitem><para>The function to type check the value. Takes a value as
parameter and return a boolean.
It is possible to extend a type check with the
<literal>addCheck</literal> function (<xref
linkend='ex-extending-type-check-1' />), or to fully override the
check function (<xref linkend='ex-extending-type-check-2' />).</para>
<example xml:id='ex-extending-type-check-1'><title>Adding a type check</title>
<screen>
byte = mkOption {
description = "An integer between 0 and 255.";
type = addCheck (x: x &gt;= 0 &amp;&amp; x &lt;= 255) types.int;
};</screen></example>
<example xml:id='ex-extending-type-check-2'><title>Overriding a type
check</title>
<screen>
nixThings = mkOption {
description = "words that start with 'nix'";
type = types.str // {
check = (x: lib.hasPrefix "nix" x)
};
};</screen></example>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>merge</varname></term>
<listitem><para>Function to merge the options values when multiple values
are set.
The function takes two parameters, <literal>loc</literal> the option path as a
list of strings, and <literal>defs</literal> the list of defined values as a
list.
It is possible to override a type merge function for custom
needs.</para></listitem>
</varlistentry>
</variablelist>
</section>
<section><title>Custom Types</title>
<para>Custom types can be created with the <literal>mkOptionType</literal>
function.
As type creation includes some more complex topics such as submodule handling,
it is recommended to get familiar with <filename
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/types.nix">types.nix</filename>
code before creating a new type.</para>
<para>The only required parameter is <literal>name</literal>.</para>
<variablelist>
<varlistentry>
<term><varname>name</varname></term>
<listitem><para>A string representation of the type function
name.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>definition</varname></term>
<listitem><para>Description of the type used in documentation. Give
information of the type and any of its arguments.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>check</varname></term>
<listitem><para>A function to type check the definition value. Takes the
definition value as a parameter and returns a boolean indicating the
type check result, <literal>true</literal> for success and
<literal>false</literal> for failure.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>merge</varname></term>
<listitem><para>A function to merge multiple definitions values. Takes two
parameters:</para>
<variablelist>
<varlistentry>
<term><replaceable>loc</replaceable></term>
<listitem><para>The option path as a list of strings, e.g.
<literal>["boot" "loader "grub"
"enable"]</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><replaceable>defs</replaceable></term>
<listitem><para>The list of sets of defined <literal>value</literal>
and <literal>file</literal> where the value was defined, e.g.
<literal>[ { file = "/foo.nix"; value = 1; } { file = "/bar.nix";
value = 2 } ]</literal>. The <literal>merge</literal> function
should return the merged value or throw an error in case the
values are impossible or not meant to be merged.</para></listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>getSubOptions</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function generate sub-options documentation. It takes
the current option prefix as a list and return the set of sub-options.
Usually defined in a recursive manner by adding a term to the prefix,
e.g. <literal>prefix: elemType.getSubOptions (prefix ++
[<replaceable>"prefix"</replaceable>])</literal> where
<replaceable>"prefix"</replaceable> is the newly added
prefix.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>getSubModules</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function should return the type parameters submodules.
If the type parameter is called <literal>elemType</literal>, the
function should just recursively look into submodules by returning
<literal>elemType.getSubModules;</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>substSubModules</varname></term>
<listitem><para>For composed types that can take a submodule as type
parameter, this function can be used to substitute the parameter of a
submodule type. It takes a module as parameter and return the type with
the submodule options substituted. It is usally defined as a type
function call with a recursive call to
<literal>substSubModules</literal>, e.g for a type
<literal>composedType</literal> that take an <literal>elemtype</literal>
type parameter, this function should be defined as <literal>m:
composedType (elemType.substSubModules m)</literal>.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>typeMerge</varname></term>
<listitem><para>A function to merge multiple type declarations. Takes the
type to merge <literal>functor</literal> as parameter. A
<literal>null</literal> return value means that type cannot be
merged.</para>
<variablelist>
<varlistentry>
<term><replaceable>f</replaceable></term>
<listitem><para>The type to merge
<literal>functor</literal>.</para></listitem>
</varlistentry>
</variablelist>
<para>Note: There is a generic <literal>defaultTypeMerge</literal> that
work with most of value and composed types.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>functor</varname></term>
<listitem><para>An attribute set representing the type. It is used for type
operations and has the following keys:</para>
<variablelist>
<varlistentry>
<term><varname>type</varname></term>
<listitem><para>The type function.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>wrapped</varname></term>
<listitem><para>Holds the type parameter for composed types.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><varname>payload</varname></term>
<listitem><para>Holds the value parameter for value types.
The types that have a <literal>payload</literal> are the
<literal>enum</literal>, <literal>separatedString</literal> and
<literal>submodule</literal> types.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>binOp</varname></term>
<listitem><para>A binary operation that can merge the payloads of two
same types. Defined as a function that take two payloads as
parameters and return the payloads merged.</para></listitem>
</varlistentry>
</variablelist>
</listitem>
</varlistentry>
</variablelist>
</section>
</section>

View File

@ -0,0 +1,241 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="ch-releases">
<title>Releases</title>
<section xml:id="release-process">
<title>Release process</title>
<para>
Going through an example of releasing NixOS 15.09:
</para>
<section xml:id="one-month-before-the-beta">
<title>One month before the beta</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Send an email to nix-dev mailinglist as a warning about upcoming beta "feature freeze" in a month.
</para>
</listitem>
<listitem>
<para>
Discuss with Eelco Dolstra and the community (via IRC, ML) about what will reach the deadline.
Any issue or Pull Request targeting the release should have assigned milestone.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="at-beta-release-time">
<title>At beta release time</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Rename <literal>rl-unstable.xml</literal> -&gt;
<literal>rl-1509.xml</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>git tag -a -m &quot;Release 15.09-beta&quot; 15.09-beta &amp;&amp; git push --tags</literal>
</para>
</listitem>
<listitem>
<para>
From the master branch run <literal>git checkout -B release-15.09</literal>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-org-configurations/pull/18">
Make sure channel is created at http://nixos.org/channels/.
</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/settings/branches">
Lock the branch on github (so developers cant force push)
</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/compare/bdf161ed8d21...6b63c4616790">bump
<literal>system.defaultChannel</literal> attribute in
<literal>nixos/modules/misc/version.nix</literal></link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/d6b08acd1ccac0d9d502c4b635e00b04d3387f06">update
<literal>versionSuffix</literal> in
<literal>nixos/release.nix</literal></link>, use
<literal>git log --format=%an|wc -l</literal> to get commit
count
</para>
</listitem>
<listitem>
<para>
<literal>echo -n &quot;16.03&quot; &gt; .version</literal> in
master.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/b8a4095003e27659092892a4708bb3698231a842">pick
a new name for unstable branch.</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/13559">Create
an issue for tracking Zero Hydra Failures progress. ZHF is an effort
to get build failures down to zero.</link>
</para>
</listitem>
<listitem>
<para>
Use https://lwn.net/Vulnerabilities/ and
<link xlink:href="https://github.com/NixOS/nixpkgs/search?utf8=%E2%9C%93&amp;q=vulnerabilities&amp;type=Issues">triage vulnerabilities in an issue</link>.
</para>
</listitem>
<listitem>
<para>
Create two Hydra jobsets: release-15.09 and release-15.09-small with <literal>stableBranch</literal> set to false
</para>
</listitem>
<listitem>
<para>
Edit changelog at
<literal>nixos/doc/manual/release-notes/rl-1509.xml</literal>
(double check desktop versions are noted)
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Get all new NixOS modules
<literal>git diff release-14.12..release-15.09 nixos/modules/module-list.nix|grep ^+</literal>
</para>
</listitem>
<listitem>
<para>
Note systemd, kernel, glibc and Nix upgrades.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
</section>
<section xml:id="before-the-final-release">
<title>Before the final release</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Release Nix (currently only Eelco Dolstra can do that).
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/53710c752a85f00658882531bc90a23a3d1287e4">
Make sure fallback is updated.
</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/40fd9ae3ac8048758abdcfc7d28a78b5f22fe97e">
Update README.md with new stable NixOS version information.
</link>
</para>
</listitem>
<listitem>
<para>
Change <literal>stableBranch</literal> to true and wait for channel to update.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="at-final-release-time">
<title>At final release time</title>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>git tag -s -a -m &quot;Release 15.09&quot; 15.09</literal>
</para>
</listitem>
<listitem>
<para>
Update http://nixos.org/nixos/download.html and http://nixos.org/nixos/manual in https://github.com/NixOS/nixos-org-configurations
</para>
</listitem>
<listitem>
<para>
Get number of commits for the release:
<literal>git log release-14.04..release-14.12 --format=%an|wc -l</literal>
</para>
</listitem>
<listitem>
<para>
Commits by contributor:
<literal>git log release-14.04..release-14.12 --format=%an|sort|uniq -c|sort -rn</literal>
</para>
</listitem>
<listitem>
<para>
Send an email to nix-dev to announce the release with above information. Best to check how previous email was formulated
to see what needs to be included.
</para>
</listitem>
</itemizedlist>
</section>
</section>
<section xml:id="release-schedule">
<title>Release schedule</title>
<informaltable>
<tgroup cols="2">
<colspec align="left" />
<colspec align="left" />
<thead>
<row>
<entry>
Date
</entry>
<entry>
Event
</entry>
</row>
</thead>
<tbody>
<row>
<entry>
2016-07-25
</entry>
<entry>
Send email to nix-dev about upcoming branch-off
</entry>
</row>
<row>
<entry>
2016-09-01
</entry>
<entry>
<literal>release-16.09</literal> branch and corresponding jobsets are created,
change freeze
</entry>
</row>
<row>
<entry>
2016-09-30
</entry>
<entry>
NixOS 16.09 released
</entry>
</row>
</tbody>
</tgroup>
</informaltable>
</section>
</chapter>

View File

@ -0,0 +1,147 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-writing-documentation">
<title>Writing NixOS Documentation</title>
<para>
As NixOS grows, so too does the need for a catalogue and explanation
of its extensive functionality. Collecting pertinent information
from disparate sources and presenting it in an accessible style
would be a worthy contribution to the project.
</para>
<section>
<title>Building the Manual</title>
<para>
The DocBook sources of the <xref linkend="book-nixos-manual"/> are in the
<link xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual"><filename>nixos/doc/manual</filename></link>
subdirectory of the Nixpkgs repository. If you make modifications to
the manual, it's important to build it before committing. You can do
that as follows:
<screen>nix-build nixos/release.nix -A manual.x86_64-linux</screen>
</para>
<para>
When this command successfully finishes, it will tell you where the
manual got generated. The HTML will be accessible through the
<filename>result</filename> symlink at
<filename>./result/share/doc/nixos/index.html</filename>.
</para>
</section>
<section>
<title>Editing DocBook XML</title>
<para>
For general information on how to write in DocBook, see
<link xlink:href="http://www.docbook.org/tdg5/en/html/docbook.html">
DocBook 5: The Definitive Guide</link>.
</para>
<para>
Emacs nXML Mode is very helpful for editing DocBook XML because it
validates the document as you write, and precisely locates
errors. To use it, see <xref linkend="sec-emacs-docbook-xml"/>.
</para>
<para>
<link xlink:href="http://pandoc.org">Pandoc</link> can generate
DocBook XML from a multitude of formats, which makes a good starting
point.
<example xml:id="ex-pandoc-xml-conv">
<title>Pandoc invocation to convert GitHub-Flavoured MarkDown to DocBook 5 XML</title>
<screen>pandoc -f markdown_github -t docbook5 docs.md -o my-section.md</screen>
</example>
Pandoc can also quickly convert a single
<filename>section.xml</filename> to HTML, which is helpful when
drafting.
</para>
<para>
Sometimes writing valid DocBook is simply too difficult. In this
case, submit your documentation updates in a <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/new">GitHub
Issue</link> and someone will handle the conversion to XML for you.
</para>
</section>
<section>
<title>Creating a Topic</title>
<para>
You can use an existing topic as a basis for the new topic or create a topic from scratch.
</para>
<para>
Keep the following guidelines in mind when you create and add a topic:
<itemizedlist>
<listitem><para>
The NixOS <link xlink:href="http://www.docbook.org/tdg5/en/html/book.html"><tag>book</tag></link>
element is in <filename>nixos/doc/manual/manual.xml</filename>.
It includes several
<link xlink:href="http://www.docbook.org/tdg5/en/html/book.html"><tag>part</tag>s</link>
which are in subdirectories.
</para></listitem>
<listitem><para>
Store the topic file in the same directory as the <tag>part</tag>
to which it belongs. If your topic is about configuring a NixOS
module, then the XML file can be stored alongside the module
definition <filename>nix</filename> file.
</para></listitem>
<listitem><para>
If you include multiple words in the file name, separate the words
with a dash. For example: <filename>ipv6-config.xml</filename>.
</para></listitem>
<listitem><para>
Make sure that the <tag>xml:id</tag> value is unique. You can use
abbreviations if the ID is too long. For example:
<varname>nixos-config</varname>.
</para></listitem>
<listitem><para>
Determine whether your topic is a chapter or a section. If you are
unsure, open an existing topic file and check whether the main
element is chapter or section.
</para></listitem>
</itemizedlist>
</para>
</section>
<section>
<title>Adding a Topic to the Book</title>
<para>
Open the parent XML file and add an <varname>xi:include</varname>
element to the list of chapters with the file name of the topic that
you created. If you created a <tag>section</tag>, you add the file to
the <tag>chapter</tag> file. If you created a <tag>chapter</tag>, you
add the file to the <tag>part</tag> file.
</para>
<para>
If the topic is about configuring a NixOS module, it can be
automatically included in the manual by using the
<varname>meta.doc</varname> attribute. See <xref
linkend="sec-meta-attributes"/> for an explanation.
</para>
</section>
</chapter>

View File

@ -176,6 +176,7 @@ in {
</example>
<xi:include href="option-declarations.xml" />
<xi:include href="option-types.xml" />
<xi:include href="option-def.xml" />
<xi:include href="meta-attributes.xml" />

View File

@ -32,7 +32,7 @@ running NixOS system through several other means:
<listitem>
<para>Using AMIs for Amazons EC2. To find one for your region
and instance type, please refer to the <link
xlink:href="https://github.com/NixOS/nixops/blob/master/nix/ec2-amis.nix">list
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/ec2-amis.nix">list
of most recent AMIs</link>.</para>
</listitem>
<listitem>

View File

@ -101,6 +101,11 @@ channel by running
which is equivalent to the more verbose <literal>nix-channel --update
nixos; nixos-rebuild switch</literal>.</para>
<note><para>Channels are set per user. This means that running <literal>
nix-channel --add</literal> as a non root user (or without sudo) will not
affect configuration in <literal>/etc/nixos/configuration.nix</literal>
</para></note>
<warning><para>It is generally safe to switch back and forth between
channels. The only exception is that a newer NixOS may also have a
newer Nix version, which may involve an upgrade of Nixs database

View File

@ -68,7 +68,7 @@ desired operation. It must be one of the following:
<listitem>
<para>Build and activate the new configuration, and make it the
boot default. That is, the configuration is added to the GRUB
boot menu as the default meny entry, so that subsequent reboots
boot menu as the default menu entry, so that subsequent reboots
will boot the system into the new configuration. Previous
configurations activated with <command>nixos-rebuild
switch</command> or <command>nixos-rebuild boot</command> remain

View File

@ -9,6 +9,7 @@
<para>This section lists the release notes for each stable version of NixOS
and current unstable revision.</para>
<xi:include href="rl-1703.xml" />
<xi:include href="rl-1609.xml" />
<xi:include href="rl-1603.xml" />
<xi:include href="rl-1509.xml" />

View File

@ -471,7 +471,7 @@ in
<listitem> <para>
A newly packaged TeX Live 2015 is provided in <literal>pkgs.texlive</literal>,
split into 6500 nix packages. For basic user documentation see
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive-new/default.nix#L1"
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive/default.nix#L1"
>the source</link>.
Beware of <link xlink:href="https://github.com/NixOS/nixpkgs/issues/9757"
>an issue</link> when installing a too large package set.

View File

@ -4,7 +4,7 @@
version="5.0"
xml:id="sec-release-16.09">
<title>Release 16.09 (“Flounder”, 2016/09/??)</title>
<title>Release 16.09 (“Flounder”, 2016/09/30)</title>
<para>In addition to numerous new and upgraded packages, this release
has the following highlights: </para>
@ -12,22 +12,45 @@ has the following highlights: </para>
<itemizedlist>
<listitem>
<para>PXE "netboot" media has landed in <link xlink:href="https://github.com/NixOS/nixpkgs/pull/14740" />.
See <xref linkend="sec-booting-from-pxe" /> for documentation.</para>
<para>Many NixOS configurations and Nix packages now use
significantly less disk space, thanks to the <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/7117">extensive
work on closure size reduction</link>. For example, the closure
size of a minimal NixOS container went down from ~424 MiB in 16.03
to ~212 MiB in 16.09, while the closure size of Firefox went from
~651 MiB to ~259 MiB.</para>
</listitem>
<listitem>
<para>Xorg-server-1.18.*. If you choose <literal>"ati_unfree"</literal> driver,
1.17.* is still used due to ABI incompatibility.</para>
<para>To improve security, packages are now <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/12895">built
using various hardening features</link>. See the Nixpkgs manual
for more information.</para>
</listitem>
<listitem>
<para>Support for PXE netboot. See <xref
linkend="sec-booting-from-pxe" /> for documentation.</para>
</listitem>
<listitem>
<para>X.org server 1.18. If you use the
<literal>ati_unfree</literal> driver, 1.17 is still used due to an
ABI incompatibility.</para>
</listitem>
<listitem>
<para>This release is based on Glibc 2.24, GCC 5.4.0 and systemd
231. The default Linux kernel remains 4.4.</para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
<itemizedlist>
<listitem><para><literal>(this will get automatically generated at release time)</literal></para></listitem>
</itemizedlist>
<itemizedlist>
<listitem><para><literal>(this will get automatically generated at release time)</literal></para></listitem>
</itemizedlist>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:</para>
@ -36,7 +59,8 @@ following incompatible changes:</para>
<listitem>
<para>A large number of packages have been converted to use the multiple outputs feature
of Nix to greatly reduce the amount of required disk space. This may require changes
of Nix to greatly reduce the amount of required disk space, as
mentioned above. This may require changes
to any custom packages to make them build again; see the relevant chapter in the
Nixpkgs manual for more information. (Additional caveat to packagers: some packaging conventions
related to multiple-output packages
@ -45,6 +69,25 @@ following incompatible changes:</para>
</para>
</listitem>
<listitem>
<para>Previous versions of Nixpkgs had support for all versions of the LTS
Haskell package set. That support has been dropped. The previously provided
<literal>haskell.packages.lts-x_y</literal> package sets still exist in
name to aviod breaking user code, but these package sets don't actually
contain the versions mandated by the corresponding LTS release. Instead,
our package set it loosely based on the latest available LTS release, i.e.
LTS 7.x at the time of this writing. New releases of NixOS and Nixpkgs will
drop those old names entirely. <link
xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020585.html">The
motivation for this change</link> has been discussed at length on the
<literal>nix-dev</literal> mailing list and in <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/14897">Github issue
#14897</link>. Development strategies for Haskell hackers who want to rely
on Nix and NixOS have been described in <link
xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020642.html">another
nix-dev article</link>.</para>
</listitem>
<listitem>
<para>Shell aliases for systemd sub-commands
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/15598">were dropped</link>:
@ -58,16 +101,20 @@ following incompatible changes:</para>
</listitem>
<listitem>
<para>/var/setuid-wrappers/
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/18124">is now a symlink so
it can be atomically updated</link>
and it's not mounted as tmpfs anymore since setuid binaries are located on /run/ as tmpfs.
<para>
<literal>/var/empty</literal> is now immutable. Activation script runs <command>chattr +i</command>
to forbid any modifications inside the folder. See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/18365">
the pull request</link> for what bugs this caused.
</para>
</listitem>
<listitem>
<para>Gitlab's maintainence script gitlab-runner was removed and split up into the more clearer
gitlab-run and gitlab-rake scripts because gitlab-runner is a component of Gitlab CI.</para>
<para>Gitlab's maintainance script
<command>gitlab-runner</command> was removed and split up into the
more clearer <command>gitlab-run</command> and
<command>gitlab-rake</command> scripts, because
<command>gitlab-runner</command> is a component of Gitlab
CI.</para>
</listitem>
<listitem>
@ -80,16 +127,43 @@ following incompatible changes:</para>
<listitem>
<para><literal>fonts.fontconfig.ultimate.rendering</literal> was removed
because our presets were obsolete for some time. New presets are hardcoded
into freetype; one selects a preset via <literal>fonts.fontconfig.ultimate.preset</literal>.
into FreeType; you can select a preset via <literal>fonts.fontconfig.ultimate.preset</literal>.
You can customize those presets via ordinary environment variables, using
<literal>environment.variables</literal>.</para>
</listitem>
<listitem>
<para>The <literal>audit</literal> service is no longer enabled by default.
Use <literal>security.audit.enable = true;</literal> to explicitly enable it.</para>
Use <literal>security.audit.enable = true</literal> to explicitly enable it.</para>
</listitem>
<listitem>
<para>
<literal>pkgs.linuxPackages.virtualbox</literal> now contains only the
kernel modules instead of the VirtualBox user space binaries.
If you want to reference the user space binaries, you have to use the new
<literal>pkgs.virtualbox</literal> instead.
</para>
</listitem>
<listitem>
<para><literal>goPackages</literal> was replaced with separated Go
applications in appropriate <literal>nixpkgs</literal>
categories. Each Go package uses its own dependency set. There's
also a new <literal>go2nix</literal> tool introduced to generate a
Go package definition from its Go source automatically.</para>
</listitem>
<listitem>
<para><literal>services.mongodb.extraConfig</literal> configuration format
was changed to YAML.</para>
</listitem>
<listitem>
<para>
PHP has been upgraded to 7.0
</para>
</listitem>
</itemizedlist>
@ -105,6 +179,60 @@ following incompatible changes:</para>
functionality. See <xref linkend="sec-grsecurity" /> for documentation
</para></listitem>
<listitem><para>Special filesystems, like <literal>/proc</literal>,
<literal>/run</literal> and others, now have the same mount options
as recommended by systemd and are unified across different places in
NixOS. Mount options are updated during <command>nixos-rebuild
switch</command> if possible. One benefit from this is improved
security — most such filesystems are now mounted with
<literal>noexec</literal>, <literal>nodev</literal> and/or
<literal>nosuid</literal> options.</para></listitem>
<listitem><para>The reverse path filter was interfering with DHCPv4 server
operation in the past. An exception for DHCPv4 and a new option to log
packets that were dropped due to the reverse path filter was added
(<literal>networking.firewall.logReversePathDrops</literal>) for easier
debugging.</para></listitem>
<listitem><para>Containers configuration within
<literal>containers.&lt;name&gt;.config</literal> is <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/17365">now
properly typed and checked</link>. In particular, partial
configurations are merged correctly.</para></listitem>
<listitem>
<para>The directory container setuid wrapper programs,
<filename>/var/setuid-wrappers</filename>, <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/18124">is now
updated atomically to prevent failures if the switch to a new
configuration is interrupted.</link></para>
</listitem>
<listitem>
<para><literal>services.xserver.startGnuPGAgent</literal>
has been removed due to GnuPG 2.1.x bump. See <link
xlink:href="https://github.com/NixOS/nixpkgs/commit/5391882ebd781149e213e8817fba6ac3c503740c">
how to achieve similar behavior</link>. You might need to
<literal>pkill gpg-agent</literal> after the upgrade
to prevent a stale agent being in the way.
</para>
</listitem>
<listitem><para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/e561edc322d275c3687fec431935095cfc717147">
Declarative users could share the uid due to the bug in
the script handling conflict resolution.
</link>
</para></listitem>
<listitem><para>
Gummi boot has been replaced using systemd-boot.
</para></listitem>
<listitem><para>
Hydra package and NixOS module were added for convenience.
</para></listitem>
</itemizedlist>

View File

@ -0,0 +1,106 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-17.03">
<title>Release 17.03 (“XXX”, 2017/03/??)</title>
<para>In addition to numerous new and upgraded packages, this release
has the following highlights: </para>
<itemizedlist>
<listitem>
<para></para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:</para>
<itemizedlist>
<listitem>
<para></para>
</listitem>
</itemizedlist>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:</para>
<itemizedlist>
<listitem>
<para>
<literal>gnome</literal> alias has been removed along with
<literal>gtk</literal>, <literal>gtkmm</literal> and several others.
Now you need to use versioned attributes, like <literal>gnome3</literal>.
</para>
</listitem>
<listitem>
<para>
The attribute name of the Radicale daemon has been changed from
<literal>pythonPackages.radicale</literal> to
<literal>radicale</literal>.
</para>
</listitem>
<listitem>
<para>
The Yama LSM is now enabled by default in the kernel,
which prevents ptracing non-child processes.
This means you will not be able to attach gdb to an existing process,
but will need to start that process from gdb (so it is a child).
</para>
</listitem>
<listitem>
<para>
The <literal>stripHash</literal> bash function in <literal>stdenv</literal>
changed according to its documentation; it now outputs the stripped name to
<literal>stdout</literal> instead of putting it in the variable
<literal>strippedName</literal>.
</para>
</listitem>
<listitem>
<para>PHP now scans for extra configuration .ini files in /etc/php.d
instead of /etc. This prevents accidentally loading non-PHP .ini files
that may be in /etc.
</para>
</listitem>
<listitem>
<para>
Parsoid service now uses YAML configuration format.
<literal>service.parsoid.interwikis</literal> is now called
<literal>service.parsoid.wikis</literal> and is a list of either API URLs
or attribute sets as specified in parsoid's documentation.
</para>
</listitem>
<listitem>
<para>
<literal>Ntpd</literal> was replaced by
<literal>systemd-timesyncd</literal> as the default service to synchronize
system time with a remote NTP server. The old behavior can be restored by
setting <literal>services.ntp.enable</literal> to <literal>true</literal>.
Upstream time servers for all NTP implementations are now configured using
<literal>networking.timeServers</literal>.
</para>
</listitem>
</itemizedlist>
<para>Other notable improvements:</para>
<itemizedlist>
<listitem>
<para>Module type system have a new extensible option types feature that
allow to extend certain types, such as enum, through multiple option
declarations of the same option across multiple modules.
</para>
</listitem>
</itemizedlist>
</section>

View File

@ -9,6 +9,8 @@ rec {
inherit pkgs;
qemu = pkgs.qemu_test;
# Build a virtual network from an attribute set `{ machine1 =
# config1; ... machineN = configN; }', where `machineX' is the
@ -27,6 +29,7 @@ rec {
[ ../modules/virtualisation/qemu-vm.nix
../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
{ key = "no-manual"; services.nixosManual.enable = false; }
{ key = "qemu"; system.build.qemu = qemu; }
] ++ optional minimal ../modules/testing/minimal-kernel.nix;
extraArgs = { inherit nodes; };
};

View File

@ -1,4 +1,4 @@
# From an end-user configuration file (`configuration'), build a NixOS
# From an end-user configuration file (`configuration.nix'), build a NixOS
# configuration object (`config') from which we can retrieve option
# values.

View File

@ -27,6 +27,10 @@
, name ? "nixos-disk-image"
# This prevents errors while checking nix-store validity, see
# https://github.com/NixOS/nix/issues/1134
, fixValidity ? true
, format ? "raw"
}:
@ -61,9 +65,6 @@ pkgs.vmTools.runInLinuxVM (
# Create an empty filesystem and mount it.
mkfs.${fsType} -L nixos $rootDisk
${optionalString (fsType == "ext4") ''
tune2fs -c 0 -i 0 $rootDisk
''}
mkdir /mnt
mount $rootDisk /mnt
@ -71,9 +72,11 @@ pkgs.vmTools.runInLinuxVM (
printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
# Add missing size/hash fields to the database. FIXME:
# exportReferencesGraph should provide these directly.
${config.nix.package.out}/bin/nix-store --verify --check-contents --option build-users-group ""
${if fixValidity then ''
# Add missing size/hash fields to the database. FIXME:
# exportReferencesGraph should provide these directly.
${config.nix.package.out}/bin/nix-store --verify --check-contents --option build-users-group ""
'' else ""}
# In case the bootloader tries to write to /dev/sda…
ln -s vda /dev/xvda
@ -97,7 +100,9 @@ pkgs.vmTools.runInLinuxVM (
umount /mnt
# Do a fsck to make sure resize2fs works.
fsck.${fsType} -f -y $rootDisk
# Make sure resize2fs works
${optionalString (fsType == "ext4") ''
tune2fs -c 0 -i 0 $rootDisk
''}
''
)

View File

@ -25,6 +25,6 @@ stdenv.mkDerivation {
# Generate the squashfs image.
mksquashfs nix-path-registration $storePaths $out \
-keep-as-directory -all-root
-keep-as-directory -all-root -b 1048576 -comp xz -Xdict-size 100%
'';
}

View File

@ -52,9 +52,10 @@ $extraCommands
mkdir -p $out/tarball
tar cvJf $out/tarball/$fileName.tar.xz * $extraArgs
rm env-vars
tar --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner -cvJf $out/tarball/$fileName.tar.xz * $extraArgs
mkdir -p $out/nix-support
echo $system > $out/nix-support/system
echo "file system-tarball $out/tarball/$fileName.tar.xz" > $out/nix-support/hydra-build-products

View File

@ -504,6 +504,31 @@ sub screenshot {
}, { image => $name } );
}
# Get the text of TTY<n>
sub getTTYText {
my ($self, $tty) = @_;
my ($status, $out) = $self->execute("fold -w 80 /dev/vcs${tty}");
return $out;
}
# Wait until TTY<n>'s text matches a particular regular expression
sub waitUntilTTYMatches {
my ($self, $tty, $regexp) = @_;
$self->nest("waiting for $regexp to appear on tty $tty", sub {
retry sub {
return 1 if $self->getTTYText($tty) =~ /$regexp/;
}
});
}
# Debugging: Dump the contents of the TTY<n>
sub dumpTTYContents {
my ($self, $tty) = @_;
$self->execute("fold -w 80 /dev/vcs${tty} | systemd-cat");
}
# Take a screenshot and return the result as text using optical character
# recognition.
@ -586,11 +611,37 @@ sub copyFileFromHost {
}
my %charToKey = (
'!' => "shift-0x02",
'@' => "shift-0x03",
'#' => "shift-0x04",
'$' => "shift-0x05",
'%' => "shift-0x06",
'^' => "shift-0x07",
'&' => "shift-0x08",
'*' => "shift-0x09",
'(' => "shift-0x0A",
')' => "shift-0x0B",
'-' => "0x0C", '_' => "shift-0x0C",
'=' => "0x0D", '+' => "shift-0x0D",
'[' => "0x1A", '{' => "shift-0x1A",
']' => "0x1B", '}' => "shift-0x1B",
';' => "0x27", ':' => "shift-0x27",
'\'' => "0x28", '"' => "shift-0x28",
'`' => "0x29", '~' => "shift-0x29",
'\\' => "0x2B", '|' => "shift-0x2B",
',' => "0x33", '<' => "shift-0x33",
'.' => "0x34", '>' => "shift-0x34",
'/' => "0x35", '?' => "shift-0x35",
' ' => "spc",
"\n" => "ret",
);
sub sendKeys {
my ($self, @keys) = @_;
foreach my $key (@keys) {
$key = "spc" if $key eq " ";
$key = "ret" if $key eq "\n";
$key = $charToKey{$key} if exists $charToKey{$key};
$self->sendMonitorCommand("sendkey $key");
}
}

View File

@ -8,6 +8,7 @@ use IO::Pty;
use Logger;
use Cwd;
use POSIX qw(_exit dup2);
use Time::HiRes qw(clock_gettime CLOCK_MONOTONIC);
$SIG{PIPE} = 'IGNORE'; # because Unix domain sockets may die unexpectedly
@ -179,7 +180,12 @@ END {
$log->close();
}
my $now1 = clock_gettime(CLOCK_MONOTONIC);
runTests;
my $now2 = clock_gettime(CLOCK_MONOTONIC);
printf STDERR "test script finished in %.2fs\n", $now2 - $now1;
exit ($nrSucceeded < $nrTests ? 1 : 0);

View File

@ -29,7 +29,7 @@ rec {
cp ${./test-driver/Logger.pm} $libDir/Logger.pm
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${lib.makeBinPath [ qemu_kvm vde2 netpbm coreutils ]}" \
--prefix PATH : "${lib.makeBinPath [ qemu vde2 netpbm coreutils ]}" \
--prefix PERL5LIB : "${with perlPackages; lib.makePerlPath [ TermReadLineGnu XMLWriter IOTty FileSlurp ]}:$out/lib/perl5/site_perl"
'';
};
@ -93,7 +93,7 @@ rec {
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
ocrProg = tesseract.override { enableLanguages = [ "eng" ]; };
ocrProg = tesseract;
# Generate onvenience wrappers for running the test driver
# interactively with the specified network, and for starting the
@ -157,9 +157,7 @@ rec {
${coreutils}/bin/mkdir -p $TMPDIR
cd $TMPDIR
$origBuilder $origArgs
exit $?
exec $origBuilder $origArgs
'';
testScript = ''
@ -172,9 +170,22 @@ rec {
'';
vmRunCommand = writeText "vm-run" ''
xchg=vm-state-client/xchg
${coreutils}/bin/mkdir $out
${coreutils}/bin/mkdir -p vm-state-client/xchg
export > vm-state-client/xchg/saved-env
${coreutils}/bin/mkdir -p $xchg
for i in $passAsFile; do
i2=''${i}Path
_basename=$(${coreutils}/bin/basename ''${!i2})
${coreutils}/bin/cp ''${!i2} $xchg/$_basename
eval $i2=/tmp/xchg/$_basename
${coreutils}/bin/ls -la $xchg
done
unset i i2 _basename
export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env
unset xchg
export tests='${testScript}'
${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm
''; # */

View File

@ -1,4 +1,8 @@
#! /bin/sh -e
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p qemu ec2_ami_tools jq ec2_api_tools awscli
# To start with do: nix-shell -p awscli --run "aws configure"
set -o pipefail
#set -x
@ -15,7 +19,7 @@ rm -f ec2-amis.nix
types="hvm pv"
stores="ebs s3"
regions="eu-west-1 eu-central-1 us-east-1 us-west-1 us-west-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
regions="eu-west-1 eu-central-1 us-east-1 us-east-2 us-west-1 us-west-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
for type in $types; do
link=$stateDir/$type
@ -57,7 +61,7 @@ for type in $types; do
ami=$(aws ec2 copy-image \
--region "$region" \
--source-region "$prevRegion" --source-image-id "$prevAmi" \
--name "$name" --description "$description" | json -q .ImageId)
--name "$name" --description "$description" | jq -r '.ImageId')
if [ "$ami" = null ]; then break; fi
else

View File

@ -17,12 +17,10 @@ with lib;
where tools such as <command>gdb</command> can find them.
If you need debug symbols for a package that doesn't
provide them by default, you can enable them as follows:
<!-- FIXME: ugly, see #10721 -->
<programlisting>
nixpkgs.config.packageOverrides = pkgs: {
hello = overrideDerivation pkgs.hello (attrs: {
outputs = attrs.outputs or ["out"] ++ ["debug"];
buildInputs = attrs.buildInputs ++ [&lt;nixpkgs/pkgs/build-support/setup-hooks/separate-debug-info.sh>];
hello = pkgs.hello.overrideAttrs (oldAttrs: {
separateDebugInfo = true;
});
};
</programlisting>

View File

@ -301,9 +301,7 @@ in
};
style = mkOption {
type = types.str // {
check = flip elem ["none" "slight" "medium" "full"];
};
type = types.enum ["none" "slight" "medium" "full"];
default = "full";
description = ''
TrueType hinting style, one of <literal>none</literal>,
@ -329,9 +327,7 @@ in
default = "rgb";
type = types.enum ["rgb" "bgr" "vrgb" "vbgr" "none"];
description = ''
Subpixel order, one of <literal>none</literal>,
<literal>rgb</literal>, <literal>bgr</literal>,
<literal>vrgb</literal>, or <literal>vbgr</literal>.
Subpixel order.
'';
};
@ -339,9 +335,7 @@ in
default = "default";
type = types.enum ["none" "default" "light" "legacy"];
description = ''
FreeType LCD filter, one of <literal>none</literal>,
<literal>default</literal>, <literal>light</literal>, or
<literal>legacy</literal>.
FreeType LCD filter.
'';
};

View File

@ -11,17 +11,27 @@ with lib;
# TODO: find another name for it.
fonts = mkOption {
type = types.listOf types.path;
default = [];
example = literalExample "[ pkgs.dejavu_fonts ]";
description = "List of primary font paths.";
};
enableDefaultFonts = mkOption {
type = types.bool;
default = false;
description = ''
Enable a basic set of fonts providing several font styles
and families and reasonable coverage of Unicode.
'';
};
};
};
config = {
fonts.fonts =
fonts.fonts = mkIf config.fonts.enableDefaultFonts
[
pkgs.xorg.fontbhlucidatypewriter100dpi
pkgs.xorg.fontbhlucidatypewriter75dpi

View File

@ -7,11 +7,11 @@ with lib;
gnu = mkOption {
type = types.bool;
default = false;
description =
'' When enabled, GNU software is chosen by default whenever a there is
a choice between GNU and non-GNU software (e.g., GNU lsh
vs. OpenSSH).
'';
description = ''
When enabled, GNU software is chosen by default whenever a there is
a choice between GNU and non-GNU software (e.g., GNU lsh
vs. OpenSSH).
'';
};
};

View File

@ -44,8 +44,9 @@ in
consolePackages = mkOption {
type = types.listOf types.package;
default = with pkgs.kbdKeymaps; [ dvp neo ];
defaultText = ''with pkgs.kbdKeymaps; [ dvp neo ]'';
description = ''
List of additional packages that provide console fonts, keymaps and
List of additional packages that provide console fonts, keymaps and
other resources.
'';
};

View File

@ -29,6 +29,19 @@ in
'';
};
networking.hostConf = lib.mkOption {
type = types.lines;
default = "multi on";
example = ''
multi on
reorder on
trim lan
'';
description = ''
The contents of <filename>/etc/host.conf</filename>. See also <citerefentry><refentrytitle>host.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>.
'';
};
networking.dnsSingleRequest = lib.mkOption {
type = types.bool;
default = false;
@ -44,7 +57,7 @@ in
networking.dnsExtensionMechanism = lib.mkOption {
type = types.bool;
default = false;
default = true;
description = ''
Enable the <code>edns0</code> option in <filename>resolv.conf</filename>. With
that option set, <code>glibc</code> supports use of the extension mechanisms for
@ -71,6 +84,18 @@ in
'';
};
networking.timeServers = mkOption {
default = [
"0.nixos.pool.ntp.org"
"1.nixos.pool.ntp.org"
"2.nixos.pool.ntp.org"
"3.nixos.pool.ntp.org"
];
description = ''
The set of NTP servers from which to synchronise.
'';
};
networking.proxy = {
default = lib.mkOption {
@ -171,6 +196,9 @@ in
${cfg.extraHosts}
'';
# /etc/host.conf: resolver configuration file
"host.conf".text = cfg.hostConf;
# /etc/resolvconf.conf: Configuration for openresolv.
"resolvconf.conf".text =
''
@ -223,16 +251,16 @@ in
# Install the proxy environment variables
environment.sessionVariables = cfg.proxy.envVars;
# The ip-up target is started when we have IP connectivity. So
# services that depend on IP connectivity (like ntpd) should be
# pulled in by this target.
systemd.targets.ip-up.description = "Services Requiring IP Connectivity";
# The ip-up target is kept for backwards compatibility.
# New services should use systemd upstream targets:
# See https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/
systemd.targets.ip-up.description = "Services Requiring IP Connectivity (deprecated)";
# This is needed when /etc/resolv.conf is being overriden by networkd
# and other configurations. If the file is destroyed by an environment
# activation then it must be rebuilt so that applications which interface
# with /etc/resolv.conf directly don't break.
system.activationScripts.resolvconf = stringAfter [ "etc" "tmpfs" "var" ]
system.activationScripts.resolvconf = stringAfter [ "etc" "specialfs" "var" ]
''
# Systemd resolved controls its own resolv.conf
rm -f /run/resolvconf/interfaces/systemd

View File

@ -9,10 +9,28 @@ let
inherit (config.services.avahi) nssmdns;
inherit (config.services.samba) nsswins;
ldap = (config.users.ldap.enable && config.users.ldap.nsswitch);
sssd = config.services.sssd.enable;
in
hostArray = [ "files" "mymachines" ]
++ optionals nssmdns [ "mdns_minimal [!UNAVAIL=return]" ]
++ optionals nsswins [ "wins" ]
++ [ "dns" ]
++ optionals nssmdns [ "mdns" ]
++ ["myhostname" ];
{
passwdArray = [ "files" ]
++ optional sssd "sss"
++ optionals ldap [ "ldap" ]
++ [ "mymachines" ];
shadowArray = [ "files" ]
++ optional sssd "sss"
++ optionals ldap [ "ldap" ];
servicesArray = [ "files" ]
++ optional sssd "sss";
in {
options = {
# NSS modules. Hacky!
@ -39,24 +57,26 @@ in
# Name Service Switch configuration file. Required by the C
# library. !!! Factor out the mdns stuff. The avahi module
# should define an option used by this module.
environment.etc."nsswitch.conf".text =
''
passwd: files ${optionalString ldap "ldap"}
group: files ${optionalString ldap "ldap"}
shadow: files ${optionalString ldap "ldap"}
hosts: files ${optionalString nssmdns "mdns_minimal [NOTFOUND=return]"} dns ${optionalString nssmdns "mdns"} ${optionalString nsswins "wins"} myhostname mymachines
networks: files dns
ethers: files
services: files
protocols: files
'';
environment.etc."nsswitch.conf".text = ''
passwd: ${concatStringsSep " " passwdArray}
group: ${concatStringsSep " " passwdArray}
shadow: ${concatStringsSep " " shadowArray}
hosts: ${concatStringsSep " " hostArray}
networks: files
ethers: files
services: ${concatStringsSep " " servicesArray}
protocols: files
rpc: files
'';
# Systemd provides nss-myhostname to ensure that our hostname
# always resolves to a valid IP address. It returns all locally
# configured IP addresses, or ::1 and 127.0.0.2 as
# fallbacks. Systemd also provides nss-mymachines to return IP
# addresses of local containers.
system.nssModules = [ config.systemd.package ];
system.nssModules = [ config.systemd.package.out ];
};
}

View File

@ -69,7 +69,7 @@ in
config = mkIf cfg.enable {
# FIXME: Implement powersave governor for sandy bridge or later Intel CPUs
# Leftover for old setups, should be set by nixos-generate-config now
powerManagement.cpuFreqGovernor = mkDefault "ondemand";
systemd.targets.post-resume = {

View File

@ -212,6 +212,7 @@ in {
# Allow PulseAudio to get realtime priority using rtkit.
security.rtkit.enable = true;
systemd.packages = [ cfg.package ];
})
(mkIf hasZeroconf {
@ -227,31 +228,14 @@ in {
target = "pulse/default.pa";
source = myConfigFile;
};
systemd.user = {
services.pulseaudio = {
description = "PulseAudio Server";
# NixOS doesn't support "Also" so we bring it in manually
wantedBy = [ "default.target" ];
serviceConfig = {
Type = "notify";
ExecStart = binaryNoDaemon;
Restart = "on-failure";
RestartSec = "500ms";
};
environment = { DISPLAY = ":${toString config.services.xserver.display}"; };
restartIfChanged = true;
};
sockets.pulseaudio = {
description = "PulseAudio Socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
Priority = 6;
Backlog = 5;
ListenStream = "%t/pulse/native";
};
};
};
})

View File

@ -41,7 +41,7 @@ in
strings. The latter is concatenated, interspersed with colon
characters.
'';
type = types.attrsOf (types.loeOf types.str);
type = with types; attrsOf (either str (listOf str));
apply = mapAttrs (n: v: if isList v then concatStringsSep ":" v else v);
};

View File

@ -23,7 +23,7 @@ in
strings. The latter is concatenated, interspersed with colon
characters.
'';
type = types.attrsOf (types.loeOf types.str);
type = with types; attrsOf (either str (listOf str));
apply = mapAttrs (n: v: if isList v then concatStringsSep ":" v else v);
};

View File

@ -34,10 +34,10 @@ let
config.programs.ssh.package
pkgs.perl
pkgs.procps
pkgs.rsync
pkgs.strace
pkgs.su
pkgs.time
pkgs.texinfoInteractive
pkgs.utillinux
pkgs.which # 88K size
];
@ -76,7 +76,7 @@ in
extraOutputsToInstall = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "doc" "info" "docdev" ];
example = [ "doc" "info" "devdoc" ];
description = "List of additional package outputs to be symlinked into <filename>/run/current-system/sw</filename>.";
};
@ -104,7 +104,6 @@ in
"/etc/xdg"
"/etc/gtk-2.0"
"/etc/gtk-3.0"
"/info"
"/lib" # FIXME: remove and update debug-info.nix
"/sbin"
"/share/applications"
@ -112,7 +111,6 @@ in
"/share/doc"
"/share/emacs"
"/share/icons"
"/share/info"
"/share/menus"
"/share/mime"
"/share/nano"
@ -120,6 +118,7 @@ in
"/share/terminfo"
"/share/themes"
"/share/vim-plugins"
"/share/vulkan"
];
system.path = pkgs.buildEnv {

View File

@ -37,14 +37,15 @@ in
environment.sessionVariables.TZDIR = "/etc/zoneinfo";
# This way services are restarted when tzdata changes.
systemd.globalEnvironment.TZDIR = tzdir;
environment.etc.localtime =
{ source = "${tzdir}/${config.time.timeZone}";
{ source = "/etc/zoneinfo/${config.time.timeZone}";
mode = "direct-symlink";
};
environment.etc.zoneinfo.source = "${pkgs.tzdata}/share/zoneinfo";
environment.etc.zoneinfo.source = tzdir;
};

View File

@ -131,13 +131,12 @@ let
};
subUidRanges = mkOption {
type = types.listOf types.optionSet;
type = with types; listOf (submodule subordinateUidRange);
default = [];
example = [
{ startUid = 1000; count = 1; }
{ startUid = 100001; count = 65534; }
];
options = [ subordinateUidRange ];
description = ''
Subordinate user ids that user is allowed to use.
They are set into <filename>/etc/subuid</filename> and are used
@ -146,13 +145,12 @@ let
};
subGidRanges = mkOption {
type = types.listOf types.optionSet;
type = with types; listOf (submodule subordinateGidRange);
default = [];
example = [
{ startGid = 100; count = 1; }
{ startGid = 1001; count = 999; }
];
options = [ subordinateGidRange ];
description = ''
Subordinate group ids that user is allowed to use.
They are set into <filename>/etc/subgid</filename> and are used
@ -310,32 +308,36 @@ let
};
subordinateUidRange = {
startUid = mkOption {
type = types.int;
description = ''
Start of the range of subordinate user ids that user is
allowed to use.
'';
};
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate user ids'';
options = {
startUid = mkOption {
type = types.int;
description = ''
Start of the range of subordinate user ids that user is
allowed to use.
'';
};
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate user ids'';
};
};
};
subordinateGidRange = {
startGid = mkOption {
type = types.int;
description = ''
Start of the range of subordinate group ids that user is
allowed to use.
'';
};
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate group ids'';
options = {
startGid = mkOption {
type = types.int;
description = ''
Start of the range of subordinate group ids that user is
allowed to use.
'';
};
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate group ids'';
};
};
};
@ -428,7 +430,7 @@ in {
users.users = mkOption {
default = {};
type = types.loaOf types.optionSet;
type = with types; loaOf (submodule userOpts);
example = {
alice = {
uid = 1234;
@ -444,7 +446,6 @@ in {
Additional user accounts to be created automatically by the system.
This can also be used to set options for root.
'';
options = [ userOpts ];
};
users.groups = mkOption {
@ -453,11 +454,10 @@ in {
{ students.gid = 1001;
hackers = { };
};
type = types.loaOf types.optionSet;
type = with types; loaOf (submodule groupOpts);
description = ''
Additional groups to be created automatically by the system.
'';
options = [ groupOpts ];
};
# FIXME: obsolete - will remove.

View File

@ -14,7 +14,7 @@ let
name = "mesa-drivers+txc-${p.mesa_drivers.version}";
paths =
[ p.mesa_drivers
p.mesa_noglu # mainly for libGL
p.mesa_drivers.out # mainly for libGL
(if cfg.s3tcSupport then p.libtxc_dxtn else p.libtxc_dxtn_s2tc)
];
};
@ -135,6 +135,12 @@ in
environment.sessionVariables.LD_LIBRARY_PATH =
[ "/run/opengl-driver/lib" "/run/opengl-driver-32/lib" ];
environment.extraInit = ''
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/run/opengl-driver/share
'' + optionalString cfg.driSupport32Bit ''
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/run/opengl-driver-32/share
'';
hardware.opengl.package = mkDefault (makePackage pkgs);
hardware.opengl.package32 = mkDefault (makePackage pkgs_i686);

View File

@ -0,0 +1,54 @@
# This module provides the proprietary AMDGPU-PRO drivers.
{ config, lib, pkgs, pkgs_i686, ... }:
with lib;
let
drivers = config.services.xserver.videoDrivers;
enabled = elem "amdgpu-pro" drivers;
package = config.boot.kernelPackages.amdgpu-pro;
package32 = pkgs_i686.linuxPackages.amdgpu-pro.override { libsOnly = true; kernel = null; };
opengl = config.hardware.opengl;
in
{
config = mkIf enabled {
services.xserver.drivers = singleton
{ name = "amdgpu"; modules = [ package ]; libPath = [ package ]; };
hardware.opengl.package = package;
hardware.opengl.package32 = package32;
boot.extraModulePackages = [ package ];
boot.blacklistedKernelModules = [ "radeon" ];
hardware.firmware = [ package ];
system.activationScripts.setup-amdgpu-pro = ''
mkdir -p /run/lib
ln -sfn ${package}/lib ${package.libCompatDir}
'' + optionalString opengl.driSupport32Bit ''
ln -sfn ${package32}/lib ${package32.libCompatDir}
'';
environment.etc = {
"amd/amdrc".source = package + "/etc/amd/amdrc";
"amd/amdapfxx.blb".source = package + "/etc/amd/amdapfxx.blb";
"gbm/gbm.conf".source = package + "/etc/gbm/gbm.conf";
"OpenCL/vendors/amdocl64.icd".source = package + "/etc/OpenCL/vendors/amdocl64.icd";
} // optionalAttrs opengl.driSupport32Bit {
"OpenCL/vendors/amdocl32.icd".source = package32 + "/etc/OpenCL/vendors/amdocl32.icd";
};
};
}

View File

@ -13,6 +13,8 @@ let
useDisplayDevice = cfg.connectDisplay;
};
useBbswitch = cfg.pmMethod == "bbswitch";
primus = pkgs.primus.override {
inherit useNvidia;
};
@ -22,58 +24,69 @@ in
{
options = {
hardware.bumblebee.enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable the bumblebee daemon to manage Optimus hybrid video cards.
This should power off secondary GPU until its use is requested
by running an application with optirun.
hardware.bumblebee = {
Only nvidia driver is supported so far.
'';
};
hardware.bumblebee.group = mkOption {
default = "wheel";
example = "video";
type = types.str;
description = ''Group for bumblebee socket'';
};
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enable the bumblebee daemon to manage Optimus hybrid video cards.
This should power off secondary GPU until its use is requested
by running an application with optirun.
'';
};
hardware.bumblebee.connectDisplay = mkOption {
default = false;
type = types.bool;
description = ''
Set to true if you intend to connect your discrete card to a
monitor. This option will set up your Nvidia card for EDID
discovery and to turn on the monitor signal.
group = mkOption {
default = "wheel";
example = "video";
type = types.str;
description = ''Group for bumblebee socket'';
};
Only nvidia driver is supported so far.
'';
};
connectDisplay = mkOption {
default = false;
type = types.bool;
description = ''
Set to true if you intend to connect your discrete card to a
monitor. This option will set up your Nvidia card for EDID
discovery and to turn on the monitor signal.
Only nvidia driver is supported so far.
'';
};
driver = mkOption {
default = "nvidia";
type = types.enum [ "nvidia" "nouveau" ];
description = ''
Set driver used by bumblebeed. Supported are nouveau and nvidia.
'';
};
pmMethod = mkOption {
default = "auto";
type = types.enum [ "auto" "bbswitch" "nouveau" "switcheroo" "none" ];
description = ''
Set preferred power management method for unused card.
'';
};
hardware.bumblebee.driver = mkOption {
default = "nvidia";
type = types.enum [ "nvidia" "nouveau" ];
description = ''
Set driver used by bumblebeed. Supported are nouveau and nvidia.
'';
};
};
config = mkIf config.hardware.bumblebee.enable {
boot.blacklistedKernelModules = [ "nouveau" "nvidia" ];
boot.kernelModules = [ "bbswitch" ];
boot.extraModulePackages = [ kernel.bbswitch ] ++ optional useNvidia kernel.nvidia_x11;
config = mkIf cfg.enable {
boot.blacklistedKernelModules = [ "nvidia-drm" "nvidia" "nouveau" ];
boot.kernelModules = optional useBbswitch [ "bbswitch" ];
boot.extraModulePackages = optional useBbswitch kernel.bbswitch ++ optional useNvidia kernel.nvidia_x11;
environment.systemPackages = [ bumblebee primus ];
systemd.services.bumblebeed = {
description = "Bumblebee Hybrid Graphics Switcher";
wantedBy = [ "display-manager.service" ];
path = [ kernel.bbswitch bumblebee ];
wantedBy = [ "multi-user.target" ];
before = [ "display-manager.service" ];
serviceConfig = {
ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver}";
ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver} --pm-method ${cfg.pmMethod}";
};
};
};

View File

@ -0,0 +1,61 @@
{ config, lib, ... }:
with lib;
let
enabled = elem "displaylink" config.services.xserver.videoDrivers;
displaylink = config.boot.kernelPackages.displaylink;
in
{
config = mkIf enabled {
boot.extraModulePackages = [ displaylink ];
boot.kernelModules = [ "evdi" ];
# Those are taken from displaylink-installer.sh and from Arch Linux AUR package.
services.udev.extraRules = ''
ACTION=="add", SUBSYSTEM=="usb", ATTR{idVendor}=="17e9", ATTR{bNumInterfaces}=="*5", TAG+="uaccess"
'';
powerManagement.powerDownCommands = ''
#flush any bytes in pipe
while read -n 1 -t 1 SUSPEND_RESULT < /tmp/PmMessagesPort_out; do : ; done;
#suspend DisplayLinkManager
echo "S" > /tmp/PmMessagesPort_in
#wait until suspend of DisplayLinkManager finish
read -n 1 -t 10 SUSPEND_RESULT < /tmp/PmMessagesPort_out
'';
powerManagement.resumeCommands = ''
#resume DisplayLinkManager
echo "R" > /tmp/PmMessagesPort_in
'';
systemd.services.displaylink = {
description = "DisplayLink Manager Service";
after = [ "display-manager.service" ];
wantedBy = [ "graphical.target" ];
serviceConfig = {
ExecStart = "${displaylink}/bin/DisplayLinkManager";
Restart = "always";
RestartSec = 5;
};
preStart = ''
mkdir -p /var/log/displaylink
'';
};
};
}

View File

@ -3,26 +3,27 @@
with lib;
let
cfg = config.i18n.inputMethod;
gtk2_cache = pkgs.stdenv.mkDerivation {
preferLocalBuild = true;
allowSubstitutes = false;
name = "gtk2-immodule.cache";
buildInputs = [ pkgs.gtk cfg.package ];
buildCommand = ''
gtk2_cache = pkgs.runCommand "gtk2-immodule.cache"
{ preferLocalBuild = true;
allowSubstitutes = false;
buildInputs = [ pkgs.gtk2 cfg.package ];
}
''
mkdir -p $out/etc/gtk-2.0/
GTK_PATH=${cfg.package}/lib/gtk-2.0/ gtk-query-immodules-2.0 > $out/etc/gtk-2.0/immodules.cache
'';
};
gtk3_cache = pkgs.stdenv.mkDerivation {
preferLocalBuild = true;
allowSubstitutes = false;
name = "gtk3-immodule.cache";
buildInputs = [ pkgs.gtk3 cfg.package ];
buildCommand = ''
gtk3_cache = pkgs.runCommand "gtk3-immodule.cache"
{ preferLocalBuild = true;
allowSubstitutes = false;
buildInputs = [ pkgs.gtk3 cfg.package ];
}
''
mkdir -p $out/etc/gtk-3.0/
GTK_PATH=${cfg.package}/lib/gtk-3.0/ gtk-query-immodules-3.0 > $out/etc/gtk-3.0/immodules.cache
'';
};
in
{
options.i18n = {

View File

@ -56,8 +56,18 @@ i18n.inputMethod = {
<listitem><para>Table (<literal>ibus-engines.table</literal>): An input method
that load tables of input methods.</para></listitem>
<listitem><para>table-others (<literal>ibus-engines.table-others</literal>):
Various table-based input methods.</para></listitem>
Various table-based input methods. To use this, and any other table-based
input methods, it must appear in the list of engines along with
<literal>table</literal>. For example:
<programlisting>
ibus.engines = with pkgs.ibus-engines; [ table table-others ];
</programlisting>
</para></listitem>
</itemizedlist>
<para>To use any input method, the package must be added in the configuration,
as shown above, and also (after running <literal>nixos-rebuild</literal>) the
input method must be added from IBus' preference dialog.</para>
</section>
<section><title>Fcitx</title>

View File

@ -20,10 +20,9 @@ in
example = literalExample "with pkgs.fcitx-engines; [ mozc hangul ]";
description =
let
engines =
lib.concatStringsSep ", "
(map (name: "<literal>${name}</literal>")
(lib.attrNames pkgs.fcitx-engines));
enginesDrv = filterAttrs (const isDerivation) pkgs.fcitx-engines;
engines = concatStringsSep ", "
(map (name: "<literal>${name}</literal>") (attrNames enginesDrv));
in
"Enabled Fcitx engines. Available engines are: ${engines}.";
};

View File

@ -17,7 +17,7 @@ let
[Desktop Entry]
Name=IBus
Type=Application
Exec=${ibusPackage}/bin/ibus-daemon --daemonize --xim --cache=refresh
Exec=${ibusPackage}/bin/ibus-daemon --daemonize --xim
'';
};
in
@ -30,10 +30,9 @@ in
example = literalExample "with pkgs.ibus-engines; [ mozc hangul ]";
description =
let
engines =
lib.concatStringsSep ", "
(map (name: "<literal>${name}</literal>")
(lib.attrNames pkgs.ibus-engines));
enginesDrv = filterAttrs (const isDerivation) pkgs.ibus-engines;
engines = concatStringsSep ", "
(map (name: "<literal>${name}</literal>") (attrNames enginesDrv));
in
"Enabled IBus engines. Available engines are: ${engines}.";
};

View File

@ -1,20 +1,41 @@
# This module defines a NixOS installation CD that contains X11 and
# KDE 4.
# KDE 5.
{ config, lib, pkgs, ... }:
with lib;
{
imports = [ ./installation-cd-base.nix ../../profiles/graphical.nix ];
imports = [ ./installation-cd-base.nix ];
# Provide wicd for easy wireless configuration.
#networking.wicd.enable = true;
services.xserver = {
enable = true;
# Automatically login as root.
displayManager.slim = {
enable = true;
defaultUser = "root";
autoLogin = true;
};
desktopManager.kde5 = {
enable = true;
enableQt4Support = false;
};
# Enable touchpad support for many laptops.
synaptics.enable = true;
};
environment.systemPackages =
[ # Include gparted for partitioning disks.
[ pkgs.glxinfo
# Include gparted for partitioning disks.
pkgs.gparted
# Firefox for reading the manual.
pkgs.firefox
# Include some editors.
pkgs.vim
pkgs.bvi # binary editor
@ -32,79 +53,21 @@ with lib;
# Don't start the X server by default.
services.xserver.autorun = mkForce false;
# Auto-login as root.
services.xserver.displayManager.kdm.extraConfig =
''
[X-*-Core]
AllowRootLogin=true
AutoLoginEnable=true
AutoLoginUser=root
AutoLoginPass=""
'';
# Custom kde-workspace adding some icons on the desktop
system.activationScripts.installerDesktop = let
openManual = pkgs.writeScript "nixos-manual.sh" ''
#!${pkgs.stdenv.shell}
cd ${config.system.build.manual.manual}/share/doc/nixos/
konqueror ./index.html
'';
desktopFile = pkgs.writeText "nixos-manual.desktop" ''
[Desktop Entry]
Version=1.0
Type=Application
Name=NixOS Manual
Exec=${openManual}
Icon=konqueror
Exec=firefox ${config.system.build.manual.manual}/share/doc/nixos/index.html
Icon=text-html
'';
in ''
mkdir -p /root/Desktop
ln -sfT ${desktopFile} /root/Desktop/nixos-manual.desktop
ln -sfT ${pkgs.kde4.konsole}/share/applications/kde4/konsole.desktop /root/Desktop/konsole.desktop
ln -sfT ${pkgs.kde5.konsole}/share/applications/org.kde.konsole.desktop /root/Desktop/org.kde.konsole.desktop
ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop /root/Desktop/gparted.desktop
'';
services.xserver.desktopManager.kde4.kdeWorkspacePackage = let
pkg = pkgs.kde4.kde_workspace;
plasmaInit = pkgs.writeText "00-defaultLayout.js" ''
loadTemplate("org.kde.plasma-desktop.defaultPanel")
for (var i = 0; i < screenCount; ++i) {
var desktop = new Activity
desktop.name = i18n("Desktop")
desktop.screen = i
desktop.wallpaperPlugin = 'image'
desktop.wallpaperMode = 'SingleImage'
var folderview = desktop.addWidget("folderview");
folderview.writeConfig("url", "desktop:/");
//Create more panels for other screens
if (i > 0){
var panel = new Panel
panel.screen = i
panel.location = 'bottom'
panel.height = screenGeometry(i).height > 1024 ? 35 : 27
var tasks = panel.addWidget("tasks")
tasks.writeConfig("showOnlyCurrentScreen", true);
}
}
'';
in
pkgs.stdenv.mkDerivation {
inherit (pkg) name meta;
buildCommand = ''
mkdir -p $out
cp -prf ${pkg}/* $out/
chmod a+w $out/share/apps/plasma-desktop/init
cp -f ${plasmaInit} $out/share/apps/plasma-desktop/init/00-defaultLayout.js
'';
};
}

View File

@ -1,11 +1,15 @@
# This module defines a small NixOS installation CD. It does not
# contain any graphical stuff.
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
{
imports =
[ ./installation-cd-base.nix
../../profiles/minimal.nix
];
environment.systemPackages =
[
pkgs.vim
];
}

View File

@ -178,7 +178,7 @@ in
image. It significantly increases image size. Use that when
you want to be able to keep all the sources needed to build your
system or when you are going to install the system on a computer
with slow on non-existent network connection.
with slow or non-existent network connection.
'';
};
@ -232,8 +232,6 @@ in
system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi pkgs.syslinux ];
boot.consoleLogLevel = mkDefault 7;
# In stage 1 of the boot, mount the CD as the root FS by label so
# that we don't need to know its device. We pass the label of the
# root filesystem on the kernel command line, rather than in

View File

@ -27,7 +27,6 @@ in
boot.kernelPackages = pkgs.linuxPackages_latest;
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=tty0"];
boot.consoleLogLevel = 7;
# FIXME: this probably should be in installation-device.nix
users.extraUsers.root.initialHashedPassword = "";

View File

@ -26,7 +26,6 @@ in
boot.loader.generic-extlinux-compatible.enable = true;
boot.kernelPackages = pkgs.linuxPackages_rpi;
boot.consoleLogLevel = 7;
# FIXME: this probably should be in installation-device.nix
users.extraUsers.root.initialHashedPassword = "";

View File

@ -61,7 +61,7 @@ in
pkgs.cryptsetup # needed for dm-crypt volumes
# Some networking tools.
pkgs.sshfsFuse
pkgs.sshfs-fuse
pkgs.socat
pkgs.screen
pkgs.wpa_supplicant # !!! should use the wpa module

View File

@ -55,7 +55,7 @@ in
pkgs.cryptsetup # needed for dm-crypt volumes
# Some networking tools.
pkgs.sshfsFuse
pkgs.sshfs-fuse
pkgs.socat
pkgs.screen
pkgs.wpa_supplicant # !!! should use the wpa module

View File

@ -26,29 +26,43 @@ with lib;
# here and it causes a cyclic dependency.
boot.loader.grub.enable = false;
boot.initrd.postMountCommands = ''
mkdir -p /mnt-root/nix/store
mount -t squashfs /nix-store.squashfs /mnt-root/nix/store
'';
# !!! Hack - attributes expected by other modules.
system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 pkgs.grub2_efi pkgs.syslinux ];
boot.consoleLogLevel = mkDefault 7;
fileSystems."/" =
{ fsType = "tmpfs";
options = [ "mode=0755" ];
};
# In stage 1, mount a tmpfs on top of /nix/store (the squashfs
# image) to make this a live CD.
fileSystems."/nix/.ro-store" =
{ fsType = "squashfs";
device = "../nix-store.squashfs";
options = [ "loop" ];
neededForBoot = true;
};
fileSystems."/nix/.rw-store" =
{ fsType = "tmpfs";
options = [ "mode=0755" ];
neededForBoot = true;
};
fileSystems."/nix/store" =
{ fsType = "unionfs-fuse";
device = "unionfs";
options = [ "allow_other" "cow" "nonempty" "chroot=/mnt-root" "max_files=32768" "hide_meta_files" "dirs=/nix/.rw-store=rw:/nix/.ro-store=ro" ];
};
boot.initrd.availableKernelModules = [ "squashfs" ];
boot.initrd.kernelModules = [ "loop" ];
# Closures to be copied to the Nix store, namely the init
# script and the top-level system configuration directory.
netboot.storeContents =
netboot.storeContents =
[ config.system.build.toplevel ];
# Create the squashfs image that contains the Nix store.

View File

@ -84,7 +84,7 @@ let cfg = config.system.autoUpgrade; in
${config.system.build.nixos-rebuild}/bin/nixos-rebuild switch ${toString cfg.flags}
'';
startAt = optionalString cfg.enable cfg.dates;
startAt = optional cfg.enable cfg.dates;
};
};

View File

@ -0,0 +1,5 @@
{
x86_64-linux = "/nix/store/m8z91vpfxyszhjpq4wl8m1zwlqik4fkn-nix-1.11.5";
i686-linux = "/nix/store/vk71likl32igqg6apqsj52ln3vhkq1pa-nix-1.11.5";
x86_64-darwin = "/nix/store/qfwm0b5qkr8v8gsv9dh2z3arky9p1myg-nix-1.11.5";
}

View File

@ -94,6 +94,21 @@ sub hasCPUFeature {
my $cpus = scalar (grep {/^processor\s*:/} (split '\n', $cpuinfo));
# Determine CPU governor to use
if (-e "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors") {
my $governors = read_file("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors");
# ondemand governor is not available on sandy bridge or later Intel CPUs
my @desired_governors = ("ondemand", "powersave");
my $e;
foreach $e (@desired_governors) {
if (index($governors, $e) != -1) {
last if (push @attrs, "powerManagement.cpuFreqGovernor = \"$e\";");
}
}
}
# Virtualization support?
push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
@ -527,8 +542,11 @@ EOF
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
# boot.loader.grub.efiSupport = true;
# boot.loader.grub.efiInstallAsRemovable = true;
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
# Define on which hard drive you want to install Grub.
# boot.loader.grub.device = "/dev/sda";
# boot.loader.grub.device = "/dev/sda"; # or "nodev" for efi only
EOF
}

Some files were not shown because too many files have changed in this diff Show More