Merge branch 'master' into textadept-11.1
This commit is contained in:
commit
d946d15796
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@ -79,11 +79,11 @@
|
||||
/pkgs/development/tools/poetry2nix @adisbladis
|
||||
|
||||
# Haskell
|
||||
/pkgs/development/compilers/ghc @cdepillabout
|
||||
/pkgs/development/haskell-modules @cdepillabout
|
||||
/pkgs/development/haskell-modules/default.nix @cdepillabout
|
||||
/pkgs/development/haskell-modules/generic-builder.nix @cdepillabout
|
||||
/pkgs/development/haskell-modules/hoogle.nix @cdepillabout
|
||||
/pkgs/development/compilers/ghc @cdepillabout @sternenseemann
|
||||
/pkgs/development/haskell-modules @cdepillabout @sternenseemann
|
||||
/pkgs/development/haskell-modules/default.nix @cdepillabout @sternenseemann
|
||||
/pkgs/development/haskell-modules/generic-builder.nix @cdepillabout @sternenseemann
|
||||
/pkgs/development/haskell-modules/hoogle.nix @cdepillabout @sternenseemann
|
||||
|
||||
# Perl
|
||||
/pkgs/development/interpreters/perl @volth @stigtsp
|
||||
|
146
.github/labeler.yml
vendored
Normal file
146
.github/labeler.yml
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
"6.topic: agda":
|
||||
- doc/languages-frameworks/agda.section.md
|
||||
- nixos/tests/agda.nix
|
||||
- pkgs/build-support/agda/**/*
|
||||
- pkgs/development/libraries/agda/**/*
|
||||
- pkgs/top-level/agda-packages.nix
|
||||
|
||||
"6.topic: cinnamon":
|
||||
- pkgs/desktops/cinnamon/**/*
|
||||
|
||||
"6.topic: emacs":
|
||||
- nixos/modules/services/editors/emacs.nix
|
||||
- nixos/modules/services/editors/emacs.xml
|
||||
- nixos/tests/emacs-daemon.nix
|
||||
- pkgs/applications/editors/emacs-modes/**/*
|
||||
- pkgs/applications/editors/emacs/**/*
|
||||
- pkgs/build-support/emacs/**/*
|
||||
- pkgs/top-level/emacs-packages.nix
|
||||
|
||||
"6.topic: erlang":
|
||||
- doc/languages-frameworks/beam.section.md
|
||||
- pkgs/development/beam-modules/**/*
|
||||
- pkgs/development/interpreters/elixir/**/*
|
||||
- pkgs/development/interpreters/erlang/**/*
|
||||
- pkgs/development/tools/build-managers/rebar/**/*
|
||||
- pkgs/development/tools/build-managers/rebar3/**/*
|
||||
- pkgs/development/tools/erlang/**/*
|
||||
- pkgs/top-level/beam-packages.nix
|
||||
|
||||
"6.topic: fetch":
|
||||
- pkgs/build-support/fetch*/**/*
|
||||
|
||||
"6.topic: GNOME":
|
||||
- doc/languages-frameworks/gnome.section.md
|
||||
- nixos/modules/services/desktops/gnome3/**/*
|
||||
- nixos/modules/services/x11/desktop-managers/gnome3.nix
|
||||
- nixos/tests/gnome3-xorg.nix
|
||||
- nixos/tests/gnome3.nix
|
||||
- pkgs/desktops/gnome-3/**/*
|
||||
|
||||
"6.topic: golang":
|
||||
- doc/languages-frameworks/go.section.md
|
||||
- pkgs/development/compilers/go/**/*
|
||||
- pkgs/development/go-modules/**/*
|
||||
- pkgs/development/go-packages/**/*
|
||||
|
||||
"6.topic: haskell":
|
||||
- doc/languages-frameworks/haskell.section.md
|
||||
- pkgs/development/compilers/ghc/**/*
|
||||
- pkgs/development/haskell-modules/**/*
|
||||
- pkgs/development/tools/haskell/**/*
|
||||
- pkgs/top-level/haskell-packages.nix
|
||||
|
||||
"6.topic: kernel":
|
||||
- pkgs/build-support/kernel/**/*
|
||||
|
||||
"6.topic: lua":
|
||||
- pkgs/development/interpreters/lua-5/**/*
|
||||
- pkgs/development/interpreters/luajit/**/*
|
||||
- pkgs/development/lua-modules/**/*
|
||||
- pkgs/top-level/lua-packages.nix
|
||||
|
||||
"6.topic: nixos":
|
||||
- nixos/**/*
|
||||
|
||||
"6.topic: ocaml":
|
||||
- doc/languages-frameworks/ocaml.section.md
|
||||
- pkgs/development/compilers/ocaml/**/*
|
||||
- pkgs/development/compilers/reason/**/*
|
||||
- pkgs/development/ocaml-modules/**/*
|
||||
- pkgs/development/tools/ocaml/**/*
|
||||
- pkgs/top-level/ocaml-packages.nix
|
||||
|
||||
"6.topic: pantheon":
|
||||
- nixos/modules/services/desktops/pantheon/**/*
|
||||
- nixos/modules/services/x11/desktop-managers/pantheon.nix
|
||||
- nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
|
||||
- nixos/tests/pantheon.nix
|
||||
- pkgs/desktops/pantheon/**/*
|
||||
|
||||
"6.topic: policy discussion":
|
||||
- .github/**/*
|
||||
|
||||
"6.topic: printing":
|
||||
- nixos/modules/services/printing/cupsd.nix
|
||||
- pkgs/misc/cups/**/*
|
||||
|
||||
"6.topic: python":
|
||||
- doc/languages-frameworks/python.section.md
|
||||
- pkgs/development/interpreters/python/**/*
|
||||
- pkgs/development/python-modules/**/*
|
||||
- pkgs/top-level/python-packages.nix
|
||||
|
||||
"6.topic: qt/kde":
|
||||
- doc/languages-frameworks/qt.section.md
|
||||
- nixos/modules/services/x11/desktop-managers/plasma5.nix
|
||||
- nixos/tests/plasma5.nix
|
||||
- pkgs/applications/kde/**/*
|
||||
- pkgs/desktops/plasma-5/**/*
|
||||
- pkgs/development/libraries/kde-frameworks/**/*
|
||||
- pkgs/development/libraries/qt-5/**/*
|
||||
|
||||
"6.topic: ruby":
|
||||
- doc/languages-frameworks/ruby.section.md
|
||||
- pkgs/development/interpreters/ruby/**/*
|
||||
- pkgs/development/ruby-modules/**/*
|
||||
|
||||
"6.topic: rust":
|
||||
- doc/languages-frameworks/rust.section.md
|
||||
- pkgs/build-support/rust/**/*
|
||||
- pkgs/development/compilers/rust/**/*
|
||||
|
||||
"6.topic: stdenv":
|
||||
- pkgs/stdenv/**/*
|
||||
|
||||
"6.topic: steam":
|
||||
- pkgs/games/steam/**/*
|
||||
|
||||
"6.topic: systemd":
|
||||
- pkgs/os-specific/linux/systemd/**/*
|
||||
- nixos/modules/system/boot/systemd*/**/*
|
||||
|
||||
"6.topic: TeX":
|
||||
- doc/languages-frameworks/texlive.section.md
|
||||
- pkgs/tools/typesetting/tex/**/*
|
||||
|
||||
"6.topic: vim":
|
||||
- doc/languages-frameworks/vim.section.md
|
||||
- pkgs/applications/editors/vim/**/*
|
||||
- pkgs/misc/vim-plugins/**/*
|
||||
|
||||
"6.topic: xfce":
|
||||
- nixos/doc/manual/configuration/xfce.xml
|
||||
- nixos/modules/services/x11/desktop-managers/xfce.nix
|
||||
- nixos/tests/xfce.nix
|
||||
- pkgs/desktops/xfce/**/*
|
||||
|
||||
"8.has: changelog":
|
||||
- nixos/doc/manual/release-notes/**/*
|
||||
|
||||
"8.has: documentation":
|
||||
- doc/**/*
|
||||
- nixos/doc/**/*
|
||||
|
||||
"8.has: module (update)":
|
||||
- nixos/modules/**/*
|
14
.github/workflows/labels.yml
vendored
Normal file
14
.github/workflows/labels.yml
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
name: "Label PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
jobs:
|
||||
labels:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'NixOS'
|
||||
steps:
|
||||
- uses: actions/labeler@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sync-labels: true
|
@ -18,7 +18,7 @@ stdenv.mkDerivation {
|
||||
|
||||
The main difference between `fetchurl` and `fetchzip` is in how they store the contents. `fetchurl` will store the unaltered contents of the URL within the Nix store. `fetchzip` on the other hand will decompress the archive for you, making files and directories directly accessible in the future. `fetchzip` can only be used with archives. Despite the name, `fetchzip` is not limited to .zip files and can also be used with any tarball.
|
||||
|
||||
`fetchpatch` works very similarly to `fetchurl` with the same arguments expected. It expects patch files as a source and and performs normalization on them before computing the checksum. For example it will remove comments or other unstable parts that are sometimes added by version control systems and can change over time.
|
||||
`fetchpatch` works very similarly to `fetchurl` with the same arguments expected. It expects patch files as a source and performs normalization on them before computing the checksum. For example it will remove comments or other unstable parts that are sometimes added by version control systems and can change over time.
|
||||
|
||||
|
||||
Other fetcher functions allow you to add source code directly from a VCS such as subversion or git. These are mostly straightforward nambes based on the name of the command used with the VCS system. Because they give you a working repository, they act most like `fetchzip`.
|
||||
|
@ -111,6 +111,12 @@ Create a Docker image with many of the store paths being on their own layer to i
|
||||
|
||||
*Default:* the output path's hash
|
||||
|
||||
`fromImage` _optional_
|
||||
|
||||
: The repository tarball containing the base image. It must be a valid Docker image, such as one exported by `docker save`.
|
||||
|
||||
*Default:* `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
|
||||
|
||||
`contents` _optional_
|
||||
|
||||
: Top level paths in the container. Either a single derivation, or a list of derivations.
|
||||
|
@ -16,7 +16,7 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <parameter>base</parameter> should not be be specified, as <function>makeSnap</function> will force set it.
|
||||
The <parameter>base</parameter> should not be specified, as <function>makeSnap</function> will force set it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
19
doc/builders/packages/fuse.section.md
Normal file
19
doc/builders/packages/fuse.section.md
Normal file
@ -0,0 +1,19 @@
|
||||
# FUSE {#sec-fuse}
|
||||
|
||||
Some packages rely on
|
||||
[FUSE](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) to provide
|
||||
support for additional filesystems not supported by the kernel.
|
||||
|
||||
In general, FUSE software are primarily developed for Linux but many of them can
|
||||
also run on macOS. Nixpkgs supports FUSE packages on macOS, but it requires
|
||||
[macFUSE](https://osxfuse.github.io) to be installed outside of Nix. macFUSE
|
||||
currently isn't packaged in Nixpkgs mainly because it includes a kernel
|
||||
extension, which isn't supported by Nix outside of NixOS.
|
||||
|
||||
If a package fails to run on macOS with an error message similar to the
|
||||
following, it's a likely sign that you need to have macFUSE installed.
|
||||
|
||||
dyld: Library not loaded: /usr/local/lib/libfuse.2.dylib
|
||||
Referenced from: /nix/store/w8bi72bssv0bnxhwfw3xr1mvn7myf37x-sshfs-fuse-2.10/bin/sshfs
|
||||
Reason: image not found
|
||||
[1] 92299 abort /nix/store/w8bi72bssv0bnxhwfw3xr1mvn7myf37x-sshfs-fuse-2.10/bin/sshfs
|
38
doc/builders/packages/ibus.section.md
Normal file
38
doc/builders/packages/ibus.section.md
Normal file
@ -0,0 +1,38 @@
|
||||
# ibus-engines.typing-booster {#sec-ibus-typing-booster}
|
||||
|
||||
This package is an ibus-based completion method to speed up typing.
|
||||
|
||||
## Activating the engine {#sec-ibus-typing-booster-activate}
|
||||
|
||||
IBus needs to be configured accordingly to activate `typing-booster`. The configuration depends on the desktop manager in use. For detailed instructions, please refer to the [upstream docs](https://mike-fabian.github.io/ibus-typing-booster/documentation.html).
|
||||
|
||||
On NixOS you need to explicitly enable `ibus` with given engines before customizing your desktop to use `typing-booster`. This can be achieved using the `ibus` module:
|
||||
|
||||
```nix
|
||||
{ pkgs, ... }: {
|
||||
i18n.inputMethod = {
|
||||
enabled = "ibus";
|
||||
ibus.engines = with pkgs.ibus-engines; [ typing-booster ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Using custom hunspell dictionaries {#sec-ibus-typing-booster-customize-hunspell}
|
||||
|
||||
The IBus engine is based on `hunspell` to support completion in many languages. By default the dictionaries `de-de`, `en-us`, `fr-moderne` `es-es`, `it-it`, `sv-se` and `sv-fi` are in use. To add another dictionary, the package can be overridden like this:
|
||||
|
||||
```nix
|
||||
ibus-engines.typing-booster.override { langs = [ "de-at" "en-gb" ]; }
|
||||
```
|
||||
|
||||
_Note: each language passed to `langs` must be an attribute name in `pkgs.hunspellDicts`._
|
||||
|
||||
## Built-in emoji picker {#sec-ibus-typing-booster-emoji-picker}
|
||||
|
||||
The `ibus-engines.typing-booster` package contains a program named `emoji-picker`. To display all emojis correctly, a special font such as `noto-fonts-emoji` is needed:
|
||||
|
||||
On NixOS it can be installed using the following expression:
|
||||
|
||||
```nix
|
||||
{ pkgs, ... }: { fonts.fonts = with pkgs; [ noto-fonts-emoji ]; }
|
||||
```
|
@ -1,57 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="sec-ibus-typing-booster">
|
||||
<title>ibus-engines.typing-booster</title>
|
||||
|
||||
<para>
|
||||
This package is an ibus-based completion method to speed up typing.
|
||||
</para>
|
||||
|
||||
<section xml:id="sec-ibus-typing-booster-activate">
|
||||
<title>Activating the engine</title>
|
||||
|
||||
<para>
|
||||
IBus needs to be configured accordingly to activate <literal>typing-booster</literal>. The configuration depends on the desktop manager in use. For detailed instructions, please refer to the <link xlink:href="https://mike-fabian.github.io/ibus-typing-booster/documentation.html">upstream docs</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
On NixOS you need to explicitly enable <literal>ibus</literal> with given engines before customizing your desktop to use <literal>typing-booster</literal>. This can be achieved using the <literal>ibus</literal> module:
|
||||
<programlisting>{ pkgs, ... }: {
|
||||
i18n.inputMethod = {
|
||||
enabled = "ibus";
|
||||
ibus.engines = with pkgs.ibus-engines; [ typing-booster ];
|
||||
};
|
||||
}</programlisting>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-ibus-typing-booster-customize-hunspell">
|
||||
<title>Using custom hunspell dictionaries</title>
|
||||
|
||||
<para>
|
||||
The IBus engine is based on <literal>hunspell</literal> to support completion in many languages. By default the dictionaries <literal>de-de</literal>, <literal>en-us</literal>, <literal>fr-moderne</literal> <literal>es-es</literal>, <literal>it-it</literal>, <literal>sv-se</literal> and <literal>sv-fi</literal> are in use. To add another dictionary, the package can be overridden like this:
|
||||
<programlisting>ibus-engines.typing-booster.override {
|
||||
langs = [ "de-at" "en-gb" ];
|
||||
}</programlisting>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<emphasis>Note: each language passed to <literal>langs</literal> must be an attribute name in <literal>pkgs.hunspellDicts</literal>.</emphasis>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-ibus-typing-booster-emoji-picker">
|
||||
<title>Built-in emoji picker</title>
|
||||
|
||||
<para>
|
||||
The <literal>ibus-engines.typing-booster</literal> package contains a program named <literal>emoji-picker</literal>. To display all emojis correctly, a special font such as <literal>noto-fonts-emoji</literal> is needed:
|
||||
</para>
|
||||
|
||||
<para>
|
||||
On NixOS it can be installed using the following expression:
|
||||
<programlisting>{ pkgs, ... }: {
|
||||
fonts.fonts = with pkgs; [ noto-fonts-emoji ];
|
||||
}</programlisting>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
@ -12,7 +12,8 @@
|
||||
<xi:include href="emacs.section.xml" />
|
||||
<xi:include href="firefox.section.xml" />
|
||||
<xi:include href="fish.section.xml" />
|
||||
<xi:include href="ibus.xml" />
|
||||
<xi:include href="fuse.section.xml" />
|
||||
<xi:include href="ibus.section.xml" />
|
||||
<xi:include href="kakoune.section.xml" />
|
||||
<xi:include href="linux.section.xml" />
|
||||
<xi:include href="locales.section.xml" />
|
||||
|
@ -68,15 +68,16 @@
|
||||
|
||||
Security fixes are submitted in the same way as other changes and thus the same guidelines apply.
|
||||
|
||||
If the security fix comes in the form of a patch and a CVE is available, then the name of the patch should be the CVE identifier, so e.g. `CVE-2019-13636.patch` in the case of a patch that is included in the Nixpkgs tree. If a patch is fetched the name needs to be set as well, e.g.:
|
||||
|
||||
```nix
|
||||
(fetchpatch {
|
||||
name = "CVE-2019-11068.patch";
|
||||
url = "https://gitlab.gnome.org/GNOME/libxslt/commit/e03553605b45c88f0b4b2980adfbbb8f6fca2fd6.patch";
|
||||
sha256 = "0pkpb4837km15zgg6h57bncp66d5lwrlvkr73h0lanywq7zrwhj8";
|
||||
})
|
||||
```
|
||||
- If a new version fixing the vulnerability has been released, update the package;
|
||||
- If the security fix comes in the form of a patch and a CVE is available, then add the patch to the Nixpkgs tree, and apply it to the package.
|
||||
The name of the patch should be the CVE identifier, so e.g. `CVE-2019-13636.patch`; If a patch is fetched the name needs to be set as well, e.g.:
|
||||
```nix
|
||||
(fetchpatch {
|
||||
name = "CVE-2019-11068.patch";
|
||||
url = "https://gitlab.gnome.org/GNOME/libxslt/commit/e03553605b45c88f0b4b2980adfbbb8f6fca2fd6.patch";
|
||||
sha256 = "0pkpb4837km15zgg6h57bncp66d5lwrlvkr73h0lanywq7zrwhj8";
|
||||
})
|
||||
```
|
||||
|
||||
If a security fix applies to both master and a stable release then, similar to regular changes, they are preferably delivered via master first and cherry-picked to the release branch.
|
||||
|
||||
|
45
doc/contributing/vulnerability-roundup.chapter.md
Normal file
45
doc/contributing/vulnerability-roundup.chapter.md
Normal file
@ -0,0 +1,45 @@
|
||||
# Vulnerability Roundup {#chap-vulnerability-roundup}
|
||||
|
||||
## Issues {#vulnerability-roundup-issues}
|
||||
|
||||
Vulnerable packages in Nixpkgs are managed using issues.
|
||||
Currently opened ones can be found using the following:
|
||||
|
||||
[github.com/NixOS/nixpkgs/issues?q=is:issue+is:open+"Vulnerability+roundup"](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+%22Vulnerability+roundup%22)
|
||||
|
||||
Each issue correspond to a vulnerable version of a package; As a consequence:
|
||||
|
||||
- One issue can contain several CVEs;
|
||||
- One CVE can be shared across several issues;
|
||||
- A single package can be concerned by several issues.
|
||||
|
||||
|
||||
A "Vulnerability roundup" issue usually respects the following format:
|
||||
|
||||
```txt
|
||||
<link to relevant package search on search.nix.gsc.io>, <link to relevant files in Nixpkgs on GitHub>
|
||||
|
||||
<list of related CVEs, their CVSS score, and the impacted NixOS version>
|
||||
|
||||
<list of the scanned Nixpkgs versions>
|
||||
|
||||
<list of relevant contributors>
|
||||
```
|
||||
|
||||
Note that there can be an extra comment containing links to previously reported (and still open) issues for the same package.
|
||||
|
||||
|
||||
## Triaging and Fixing {#vulnerability-roundup-triaging-and-fixing}
|
||||
|
||||
**Note**: An issue can be a "false positive" (i.e. automatically opened, but without the package it refers to being actually vulnerable).
|
||||
If you find such a "false positive", comment on the issue an explanation of why it falls into this category, linking as much information as the necessary to help maintainers double check.
|
||||
|
||||
If you are investigating a "true positive":
|
||||
|
||||
- Find the earliest patched version or a code patch in the CVE details;
|
||||
- Is the issue already patched (version up-to-date or patch applied manually) in Nixpkgs's `master` branch?
|
||||
- **No**:
|
||||
- [Submit a security fix](#submitting-changes-submitting-security-fixes);
|
||||
- Once the fix is merged into `master`, [submit the change to the vulnerable release branch(es)](https://nixos.org/manual/nixpkgs/stable/#submitting-changes-stable-release-branches);
|
||||
- **Yes**: [Backport the change to the vulnerable release branch(es)](https://nixos.org/manual/nixpkgs/stable/#submitting-changes-stable-release-branches).
|
||||
- When the patch has made it into all the relevant branches (`master`, and the vulnerable releases), close the relevant issue(s).
|
@ -7,7 +7,7 @@
|
||||
<section xml:id="function-library-lib.attrsets.attrByPath">
|
||||
<title><function>lib.attrset.attrByPath</function></title>
|
||||
|
||||
<subtitle><literal>attrByPath :: [String] -> Any -> AttrSet</literal>
|
||||
<subtitle><literal>attrByPath :: [String] -> Any -> AttrSet -> Any</literal>
|
||||
</subtitle>
|
||||
|
||||
<xi:include href="./locations.xml" xpointer="lib.attrsets.attrByPath" />
|
||||
|
@ -80,7 +80,7 @@ Most of the function arguments have reasonable default settings.
|
||||
|
||||
You can specify license names:
|
||||
|
||||
* `extraLicenses` is a list of of license names.
|
||||
* `extraLicenses` is a list of license names.
|
||||
You can get these names from repo.json or `querypackages.sh licenses`. The SDK
|
||||
license (`android-sdk-license`) is accepted for you if you set accept_license
|
||||
to true. If you are doing something like working with preview SDKs, you will
|
||||
|
@ -64,7 +64,7 @@ $ dotnet --info
|
||||
|
||||
The `dotnetCorePackages.sdk_X_Y` is preferred over the old dotnet-sdk as both major and minor version are very important for a dotnet environment. If a given minor version isn't present (or was changed), then this will likely break your ability to build a project.
|
||||
|
||||
## dotnetCorePackages.sdk vs vs dotnetCorePackages.net vs dotnetCorePackages.netcore vs dotnetCorePackages.aspnetcore
|
||||
## dotnetCorePackages.sdk vs dotnetCorePackages.net vs dotnetCorePackages.netcore vs dotnetCorePackages.aspnetcore
|
||||
|
||||
The `dotnetCorePackages.sdk` contains both a runtime and the full sdk of a given version. The `net`, `netcore` and `aspnetcore` packages are meant to serve as minimal runtimes to deploy alongside already built applications. For runtime versions >= .NET 5 `net` is used while `netcore` is used for older .NET Core runtime version.
|
||||
|
||||
|
@ -50,7 +50,7 @@ and install it in your profile with
|
||||
```shell
|
||||
nix-env -iA nixpkgs.myLuaEnv
|
||||
```
|
||||
The environment is is installed by referring to the attribute, and considering
|
||||
The environment is installed by referring to the attribute, and considering
|
||||
the `nixpkgs` channel was used.
|
||||
|
||||
#### Lua environment defined in `/etc/nixos/configuration.nix`
|
||||
@ -129,7 +129,7 @@ the whitelist maintainers/scripts/luarocks-packages.csv and updated by running m
|
||||
[luarocks2nix](https://github.com/nix-community/luarocks) is a tool capable of generating nix derivations from both rockspec and src.rock (and favors the src.rock).
|
||||
The automation only goes so far though and some packages need to be customized.
|
||||
These customizations go in `pkgs/development/lua-modules/overrides.nix`.
|
||||
For instance if the rockspec defines `external_dependencies`, these need to be manually added in in its rockspec file then it won't work.
|
||||
For instance if the rockspec defines `external_dependencies`, these need to be manually added in its rockspec file then it won't work.
|
||||
|
||||
You can try converting luarocks packages to nix packages with the command `nix-shell -p luarocks-nix` and then `luarocks nix PKG_NAME`.
|
||||
Nix rely on luarocks to install lua packages, basically it runs:
|
||||
|
@ -334,7 +334,7 @@ Above, we were mostly just focused on use cases and what to do to get started
|
||||
creating working Python environments in nix.
|
||||
|
||||
Now that you know the basics to be up and running, it is time to take a step
|
||||
back and take a deeper look at at how Python packages are packaged on Nix. Then,
|
||||
back and take a deeper look at how Python packages are packaged on Nix. Then,
|
||||
we will look at how you can use development mode with your code.
|
||||
|
||||
#### Python library packages in Nixpkgs
|
||||
@ -638,7 +638,7 @@ are disabled.
|
||||
|
||||
#### Using pythonImportsCheck
|
||||
|
||||
Although unit tests are highly prefered to valid correctness of a package. Not
|
||||
Although unit tests are highly prefered to validate correctness of a package, not
|
||||
all packages have test suites that can be ran easily, and some have none at all.
|
||||
To help ensure the package still works, `pythonImportsCheck` can attempt to import
|
||||
the listed modules.
|
||||
@ -918,7 +918,7 @@ because their behaviour is different:
|
||||
|
||||
* `nativeBuildInputs ? []`: Build-time only dependencies. Typically executables
|
||||
as well as the items listed in `setup_requires`.
|
||||
* `buildInputs ? []`: Build and/or run-time dependencies that need to be be
|
||||
* `buildInputs ? []`: Build and/or run-time dependencies that need to be
|
||||
compiled for the host machine. Typically non-Python libraries which are being
|
||||
linked.
|
||||
* `checkInputs ? []`: Dependencies needed for running the `checkPhase`. These
|
||||
@ -1551,13 +1551,11 @@ In a `setup.py` or `setup.cfg` it is common to declare dependencies:
|
||||
|
||||
### Contributing guidelines
|
||||
|
||||
Following rules are desired to be respected:
|
||||
The following rules are desired to be respected:
|
||||
|
||||
* Python libraries are called from `python-packages.nix` and packaged with
|
||||
`buildPythonPackage`. The expression of a library should be in
|
||||
`pkgs/development/python-modules/<name>/default.nix`.
|
||||
* Libraries in `pkgs/top-level/python-packages.nix` are sorted
|
||||
alphanumerically to avoid merge conflicts and ease locating attributes.
|
||||
* Python applications live outside of `python-packages.nix` and are packaged
|
||||
with `buildPythonApplication`.
|
||||
* Make sure libraries build for all Python interpreters.
|
||||
@ -1567,8 +1565,11 @@ Following rules are desired to be respected:
|
||||
case, when you disable tests, leave a comment explaining why.
|
||||
* Commit names of Python libraries should reflect that they are Python
|
||||
libraries, so write for example `pythonPackages.numpy: 1.11 -> 1.12`.
|
||||
* Attribute names in `python-packages.nix` should be normalized according to
|
||||
[PEP 0503](https://www.python.org/dev/peps/pep-0503/#normalized-names). This
|
||||
means that characters should be converted to lowercase and `.` and `_` should
|
||||
be replaced by a single `-` (foo-bar-baz instead of Foo__Bar.baz )
|
||||
* Attribute names in `python-packages.nix` should be sorted alphanumerically.
|
||||
* Attribute names in `python-packages.nix` as well as `pname`s should match the
|
||||
library's name on PyPI, but be normalized according to [PEP
|
||||
0503](https://www.python.org/dev/peps/pep-0503/#normalized-names). This means
|
||||
that characters should be converted to lowercase and `.` and `_` should be
|
||||
replaced by a single `-` (foo-bar-baz instead of Foo__Bar.baz).
|
||||
If necessary, `pname` has to be given a different value within `fetchPypi`.
|
||||
* Attribute names in `python-packages.nix` should be sorted alphanumerically to
|
||||
avoid merge conflicts and ease locating attributes.
|
||||
|
@ -229,7 +229,7 @@ end
|
||||
|
||||
If you want to package a specific version, you can use the standard Gemfile syntax for that, e.g. `gem 'mdl', '0.5.0'`, but if you want the latest stable version anyway, it's easier to update by simply running the `bundle lock` and `bundix` steps again.
|
||||
|
||||
Now you can also also make a `default.nix` that looks like this:
|
||||
Now you can also make a `default.nix` that looks like this:
|
||||
|
||||
```nix
|
||||
{ bundlerApp }:
|
||||
|
@ -72,8 +72,8 @@ For `cargoHash` you can use:
|
||||
Per the instructions in the [Cargo Book](https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html)
|
||||
best practices guide, Rust applications should always commit the `Cargo.lock`
|
||||
file in git to ensure a reproducible build. However, a few packages do not, and
|
||||
Nix depends on this file, so if it missing you can use `cargoPatches` to apply
|
||||
it in the `patchPhase`. Consider sending a PR upstream with a note to the
|
||||
Nix depends on this file, so if it is missing you can use `cargoPatches` to
|
||||
apply it in the `patchPhase`. Consider sending a PR upstream with a note to the
|
||||
maintainer describing why it's important to include in the application.
|
||||
|
||||
The fetcher will verify that the `Cargo.lock` file is in sync with the `src`
|
||||
@ -146,6 +146,7 @@ where they are known to differ. But there are ways to customize the argument:
|
||||
rustc.platform = { foo = ""; bar = ""; };
|
||||
};
|
||||
}
|
||||
```
|
||||
will result in:
|
||||
```shell
|
||||
--target /nix/store/asdfasdfsadf-thumb-crazy.json # contains {"foo":"","bar":""}
|
||||
@ -156,7 +157,7 @@ path) can be passed directly to `buildRustPackage`:
|
||||
|
||||
```nix
|
||||
pkgs.rustPlatform.buildRustPackage {
|
||||
(...)
|
||||
/* ... */
|
||||
target = "x86_64-fortanix-unknown-sgx";
|
||||
}
|
||||
```
|
||||
@ -191,6 +192,13 @@ rustPlatform.buildRustPackage {
|
||||
Please note that the code will be compiled twice here: once in `release` mode
|
||||
for the `buildPhase`, and again in `debug` mode for the `checkPhase`.
|
||||
|
||||
Test flags, e.g., `--features xxx/yyy`, can be passed to `cargo test` via the
|
||||
`cargoTestFlags` attribute.
|
||||
|
||||
Another attribute, called `checkFlags`, is used to pass arguments to the test
|
||||
binary itself, as stated
|
||||
(here)[https://doc.rust-lang.org/cargo/commands/cargo-test.html].
|
||||
|
||||
#### Tests relying on the structure of the `target/` directory
|
||||
|
||||
Some tests may rely on the structure of the `target/` directory. Those tests
|
||||
@ -320,9 +328,10 @@ attributes can also be used:
|
||||
variable `buildAndTestSubdir` can be used to build a crate in a
|
||||
Cargo workspace. Additional maturin flags can be passed through
|
||||
`maturinBuildFlags`.
|
||||
* `cargoCheckHook`: run tests using Cargo. Additional flags can be
|
||||
passed to Cargo using `checkFlags` and `checkFlagsArray`. By
|
||||
default, tests are run in parallel. This can be disabled by setting
|
||||
* `cargoCheckHook`: run tests using Cargo. The build type for checks
|
||||
can be set using `cargoCheckType`. Additional flags can be passed to
|
||||
the tests using `checkFlags` and `checkFlagsArray`. By default,
|
||||
tests are run in parallel. This can be disabled by setting
|
||||
`dontUseCargoParallelTests`.
|
||||
* `cargoInstallHook`: install binaries and static/shared libraries
|
||||
that were built using `cargoBuildHook`.
|
||||
@ -737,7 +746,7 @@ with import "${src.out}/rust-overlay.nix" pkgs pkgs;
|
||||
stdenv.mkDerivation {
|
||||
name = "rust-env";
|
||||
buildInputs = [
|
||||
# Note: to use use stable, just replace `nightly` with `stable`
|
||||
# Note: to use stable, just replace `nightly` with `stable`
|
||||
latest.rustChannels.nightly.rust
|
||||
|
||||
# Add some extra dependencies from `pkgs`
|
||||
|
@ -15,9 +15,9 @@
|
||||
</part>
|
||||
<part>
|
||||
<title>Standard environment</title>
|
||||
<xi:include href="stdenv/stdenv.xml" />
|
||||
<xi:include href="stdenv/meta.xml" />
|
||||
<xi:include href="stdenv/multiple-output.xml" />
|
||||
<xi:include href="stdenv/stdenv.chapter.xml" />
|
||||
<xi:include href="stdenv/meta.chapter.xml" />
|
||||
<xi:include href="stdenv/multiple-output.chapter.xml" />
|
||||
<xi:include href="stdenv/cross-compilation.chapter.xml" />
|
||||
<xi:include href="stdenv/platform-notes.chapter.xml" />
|
||||
</part>
|
||||
@ -35,6 +35,7 @@
|
||||
<xi:include href="contributing/quick-start.xml" />
|
||||
<xi:include href="contributing/coding-conventions.xml" />
|
||||
<xi:include href="contributing/submitting-changes.chapter.xml" />
|
||||
<xi:include href="contributing/vulnerability-roundup.chapter.xml" />
|
||||
<xi:include href="contributing/reviewing-contributions.xml" />
|
||||
<xi:include href="contributing/contributing-to-documentation.xml" />
|
||||
</part>
|
||||
|
194
doc/stdenv/meta.chapter.md
Normal file
194
doc/stdenv/meta.chapter.md
Normal file
@ -0,0 +1,194 @@
|
||||
# Meta-attributes {#chap-meta}
|
||||
|
||||
Nix packages can declare *meta-attributes* that contain information about a package such as a description, its homepage, its license, and so on. For instance, the GNU Hello package has a `meta` declaration like this:
|
||||
|
||||
```nix
|
||||
meta = with lib; {
|
||||
description = "A program that produces a familiar, friendly greeting";
|
||||
longDescription = ''
|
||||
GNU Hello is a program that prints "Hello, world!" when you run it.
|
||||
It is fully customizable.
|
||||
'';
|
||||
homepage = "https://www.gnu.org/software/hello/manual/";
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = [ maintainers.eelco ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
```
|
||||
|
||||
Meta-attributes are not passed to the builder of the package. Thus, a change to a meta-attribute doesn’t trigger a recompilation of the package. The value of a meta-attribute must be a string.
|
||||
|
||||
The meta-attributes of a package can be queried from the command-line using `nix-env`:
|
||||
|
||||
```ShellSession
|
||||
$ nix-env -qa hello --json
|
||||
{
|
||||
"hello": {
|
||||
"meta": {
|
||||
"description": "A program that produces a familiar, friendly greeting",
|
||||
"homepage": "https://www.gnu.org/software/hello/manual/",
|
||||
"license": {
|
||||
"fullName": "GNU General Public License version 3 or later",
|
||||
"shortName": "GPLv3+",
|
||||
"url": "http://www.fsf.org/licensing/licenses/gpl.html"
|
||||
},
|
||||
"longDescription": "GNU Hello is a program that prints \"Hello, world!\" when you run it.\nIt is fully customizable.\n",
|
||||
"maintainers": [
|
||||
"Ludovic Court\u00e8s <ludo@gnu.org>"
|
||||
],
|
||||
"platforms": [
|
||||
"i686-linux",
|
||||
"x86_64-linux",
|
||||
"armv5tel-linux",
|
||||
"armv7l-linux",
|
||||
"mips32-linux",
|
||||
"x86_64-darwin",
|
||||
"i686-cygwin",
|
||||
"i686-freebsd",
|
||||
"x86_64-freebsd",
|
||||
"i686-openbsd",
|
||||
"x86_64-openbsd"
|
||||
],
|
||||
"position": "/home/user/dev/nixpkgs/pkgs/applications/misc/hello/default.nix:14"
|
||||
},
|
||||
"name": "hello-2.9",
|
||||
"system": "x86_64-linux"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`nix-env` knows about the `description` field specifically:
|
||||
|
||||
```ShellSession
|
||||
$ nix-env -qa hello --description
|
||||
hello-2.3 A program that produces a familiar, friendly greeting
|
||||
```
|
||||
|
||||
## Standard meta-attributes {#sec-standard-meta-attributes}
|
||||
|
||||
It is expected that each meta-attribute is one of the following:
|
||||
|
||||
### `description` {#var-meta-description}
|
||||
|
||||
A short (one-line) description of the package. This is shown by `nix-env -q --description` and also on the Nixpkgs release pages.
|
||||
|
||||
Don’t include a period at the end. Don’t include newline characters. Capitalise the first character. For brevity, don’t repeat the name of package --- just describe what it does.
|
||||
|
||||
Wrong: `"libpng is a library that allows you to decode PNG images."`
|
||||
|
||||
Right: `"A library for decoding PNG images"`
|
||||
|
||||
### `longDescription` {#var-meta-longDescription}
|
||||
|
||||
An arbitrarily long description of the package.
|
||||
|
||||
### `branch` {#var-meta-branch}
|
||||
|
||||
Release branch. Used to specify that a package is not going to receive updates that are not in this branch; for example, Linux kernel 3.0 is supposed to be updated to 3.0.X, not 3.1.
|
||||
|
||||
### `homepage` {#var-meta-homepage}
|
||||
|
||||
The package’s homepage. Example: `https://www.gnu.org/software/hello/manual/`
|
||||
|
||||
### `downloadPage` {#var-meta-downloadPage}
|
||||
|
||||
The page where a link to the current version can be found. Example: `https://ftp.gnu.org/gnu/hello/`
|
||||
|
||||
### `changelog` {#var-meta-changelog}
|
||||
|
||||
A link or a list of links to the location of Changelog for a package. A link may use expansion to refer to the correct changelog version. Example: `"https://git.savannah.gnu.org/cgit/hello.git/plain/NEWS?h=v${version}"`
|
||||
|
||||
### `license` {#var-meta-license}
|
||||
|
||||
The license, or licenses, for the package. One from the attribute set defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix). At this moment using both a list of licenses and a single license is valid. If the license field is in the form of a list representation, then it means that parts of the package are licensed differently. Each license should preferably be referenced by their attribute. The non-list attribute value can also be a space delimited string representation of the contained attribute `shortNames` or `spdxIds`. The following are all valid examples:
|
||||
|
||||
- Single license referenced by attribute (preferred) `lib.licenses.gpl3Only`.
|
||||
- Single license referenced by its attribute shortName (frowned upon) `"gpl3Only"`.
|
||||
- Single license referenced by its attribute spdxId (frowned upon) `"GPL-3.0-only"`.
|
||||
- Multiple licenses referenced by attribute (preferred) `with lib.licenses; [ asl20 free ofl ]`.
|
||||
- Multiple licenses referenced as a space delimited string of attribute shortNames (frowned upon) `"asl20 free ofl"`.
|
||||
|
||||
For details, see [Licenses](#sec-meta-license).
|
||||
|
||||
### `maintainers` {#var-meta-maintainers}
|
||||
|
||||
A list of the maintainers of this Nix expression. Maintainers are defined in [`nixpkgs/maintainers/maintainer-list.nix`](https://github.com/NixOS/nixpkgs/blob/master/maintainers/maintainer-list.nix). There is no restriction to becoming a maintainer, just add yourself to that list in a separate commit titled “maintainers: add alice”, and reference maintainers with `maintainers = with lib.maintainers; [ alice bob ]`.
|
||||
|
||||
### `priority` {#var-meta-priority}
|
||||
|
||||
The *priority* of the package, used by `nix-env` to resolve file name conflicts between packages. See the Nix manual page for `nix-env` for details. Example: `"10"` (a low-priority package).
|
||||
|
||||
### `platforms` {#var-meta-platforms}
|
||||
|
||||
The list of Nix platform types on which the package is supported. Hydra builds packages according to the platform specified. If no platform is specified, the package does not have prebuilt binaries. An example is:
|
||||
|
||||
```nix
|
||||
meta.platforms = lib.platforms.linux;
|
||||
```
|
||||
|
||||
Attribute Set `lib.platforms` defines [various common lists](https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix) of platforms types.
|
||||
|
||||
### `tests` {#var-meta-tests}
|
||||
|
||||
::: warning
|
||||
This attribute is special in that it is not actually under the `meta` attribute set but rather under the `passthru` attribute set. This is due to how `meta` attributes work, and the fact that they are supposed to contain only metadata, not derivations.
|
||||
:::
|
||||
|
||||
An attribute set with as values tests. A test is a derivation, which builds successfully when the test passes, and fails to build otherwise. A derivation that is a test needs to have `meta.timeout` defined.
|
||||
|
||||
The NixOS tests are available as `nixosTests` in parameters of derivations. For instance, the OpenSMTPD derivation includes lines similar to:
|
||||
|
||||
```nix
|
||||
{ /* ... */, nixosTests }:
|
||||
{
|
||||
# ...
|
||||
passthru.tests = {
|
||||
basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### `timeout` {#var-meta-timeout}
|
||||
|
||||
A timeout (in seconds) for building the derivation. If the derivation takes longer than this time to build, it can fail due to breaking the timeout. However, all computers do not have the same computing power, hence some builders may decide to apply a multiplicative factor to this value. When filling this value in, try to keep it approximately consistent with other values already present in `nixpkgs`.
|
||||
|
||||
### `hydraPlatforms` {#var-meta-hydraPlatforms}
|
||||
|
||||
The list of Nix platform types for which the Hydra instance at `hydra.nixos.org` will build the package. (Hydra is the Nix-based continuous build system.) It defaults to the value of `meta.platforms`. Thus, the only reason to set `meta.hydraPlatforms` is if you want `hydra.nixos.org` to build the package on a subset of `meta.platforms`, or not at all, e.g.
|
||||
|
||||
```nix
|
||||
meta.platforms = lib.platforms.linux;
|
||||
meta.hydraPlatforms = [];
|
||||
```
|
||||
|
||||
### `broken` {#var-meta-broken}
|
||||
|
||||
If set to `true`, the package is marked as "broken", meaning that it won’t show up in `nix-env -qa`, and cannot be built or installed. Such packages should be removed from Nixpkgs eventually unless they are fixed.
|
||||
|
||||
### `updateWalker` {#var-meta-updateWalker}
|
||||
|
||||
If set to `true`, the package is tested to be updated correctly by the `update-walker.sh` script without additional settings. Such packages have `meta.version` set and their homepage (or the page specified by `meta.downloadPage`) contains a direct link to the package tarball.
|
||||
|
||||
## Licenses {#sec-meta-license}
|
||||
|
||||
The `meta.license` attribute should preferably contain a value from `lib.licenses` defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix), or in-place license description of the same format if the license is unlikely to be useful in another expression.
|
||||
|
||||
Although it’s typically better to indicate the specific license, a few generic options are available:
|
||||
|
||||
### `lib.licenses.free`, `"free"`
|
||||
|
||||
Catch-all for free software licenses not listed above.
|
||||
|
||||
### `lib.licenses.unfreeRedistributable`, `"unfree-redistributable"`
|
||||
|
||||
Unfree package that can be redistributed in binary form. That is, it’s legal to redistribute the *output* of the derivation. This means that the package can be included in the Nixpkgs channel.
|
||||
|
||||
Sometimes proprietary software can only be redistributed unmodified. Make sure the builder doesn’t actually modify the original binaries; otherwise we’re breaking the license. For instance, the NVIDIA X11 drivers can be redistributed unmodified, but our builder applies `patchelf` to make them work. Thus, its license is `"unfree"` and it cannot be included in the Nixpkgs channel.
|
||||
|
||||
### `lib.licenses.unfree`, `"unfree"`
|
||||
|
||||
Unfree package that cannot be redistributed. You can build it yourself, but you cannot redistribute the output of the derivation. Thus it cannot be included in the Nixpkgs channel.
|
||||
|
||||
### `lib.licenses.unfreeRedistributableFirmware`, `"unfree-redistributable-firmware"`
|
||||
|
||||
This package supplies unfree, redistributable firmware. This is a separate value from `unfree-redistributable` because not everybody cares whether firmware is free.
|
@ -1,349 +0,0 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-meta">
|
||||
<title>Meta-attributes</title>
|
||||
<para>
|
||||
Nix packages can declare <emphasis>meta-attributes</emphasis> that contain information about a package such as a description, its homepage, its license, and so on. For instance, the GNU Hello package has a <varname>meta</varname> declaration like this:
|
||||
<programlisting>
|
||||
meta = with lib; {
|
||||
description = "A program that produces a familiar, friendly greeting";
|
||||
longDescription = ''
|
||||
GNU Hello is a program that prints "Hello, world!" when you run it.
|
||||
It is fully customizable.
|
||||
'';
|
||||
homepage = "https://www.gnu.org/software/hello/manual/";
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = [ maintainers.eelco ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
</programlisting>
|
||||
</para>
|
||||
<para>
|
||||
Meta-attributes are not passed to the builder of the package. Thus, a change to a meta-attribute doesn’t trigger a recompilation of the package. The value of a meta-attribute must be a string.
|
||||
</para>
|
||||
<para>
|
||||
The meta-attributes of a package can be queried from the command-line using <command>nix-env</command>:
|
||||
<screen>
|
||||
<prompt>$ </prompt>nix-env -qa hello --json
|
||||
{
|
||||
"hello": {
|
||||
"meta": {
|
||||
"description": "A program that produces a familiar, friendly greeting",
|
||||
"homepage": "https://www.gnu.org/software/hello/manual/",
|
||||
"license": {
|
||||
"fullName": "GNU General Public License version 3 or later",
|
||||
"shortName": "GPLv3+",
|
||||
"url": "http://www.fsf.org/licensing/licenses/gpl.html"
|
||||
},
|
||||
"longDescription": "GNU Hello is a program that prints \"Hello, world!\" when you run it.\nIt is fully customizable.\n",
|
||||
"maintainers": [
|
||||
"Ludovic Court\u00e8s <ludo@gnu.org>"
|
||||
],
|
||||
"platforms": [
|
||||
"i686-linux",
|
||||
"x86_64-linux",
|
||||
"armv5tel-linux",
|
||||
"armv7l-linux",
|
||||
"mips32-linux",
|
||||
"x86_64-darwin",
|
||||
"i686-cygwin",
|
||||
"i686-freebsd",
|
||||
"x86_64-freebsd",
|
||||
"i686-openbsd",
|
||||
"x86_64-openbsd"
|
||||
],
|
||||
"position": "/home/user/dev/nixpkgs/pkgs/applications/misc/hello/default.nix:14"
|
||||
},
|
||||
"name": "hello-2.9",
|
||||
"system": "x86_64-linux"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
</screen>
|
||||
<command>nix-env</command> knows about the <varname>description</varname> field specifically:
|
||||
<screen>
|
||||
<prompt>$ </prompt>nix-env -qa hello --description
|
||||
hello-2.3 A program that produces a familiar, friendly greeting
|
||||
</screen>
|
||||
</para>
|
||||
<section xml:id="sec-standard-meta-attributes">
|
||||
<title>Standard meta-attributes</title>
|
||||
|
||||
<para>
|
||||
It is expected that each meta-attribute is one of the following:
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry xml:id="var-meta-description">
|
||||
<term>
|
||||
<varname>description</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A short (one-line) description of the package. This is shown by <command>nix-env -q --description</command> and also on the Nixpkgs release pages.
|
||||
</para>
|
||||
<para>
|
||||
Don’t include a period at the end. Don’t include newline characters. Capitalise the first character. For brevity, don’t repeat the name of package — just describe what it does.
|
||||
</para>
|
||||
<para>
|
||||
Wrong: <literal>"libpng is a library that allows you to decode PNG images."</literal>
|
||||
</para>
|
||||
<para>
|
||||
Right: <literal>"A library for decoding PNG images"</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-longDescription">
|
||||
<term>
|
||||
<varname>longDescription</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
An arbitrarily long description of the package.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-branch">
|
||||
<term>
|
||||
<varname>branch</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Release branch. Used to specify that a package is not going to receive updates that are not in this branch; for example, Linux kernel 3.0 is supposed to be updated to 3.0.X, not 3.1.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-homepage">
|
||||
<term>
|
||||
<varname>homepage</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The package’s homepage. Example: <literal>https://www.gnu.org/software/hello/manual/</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-downloadPage">
|
||||
<term>
|
||||
<varname>downloadPage</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The page where a link to the current version can be found. Example: <literal>https://ftp.gnu.org/gnu/hello/</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-changelog">
|
||||
<term>
|
||||
<varname>changelog</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A link or a list of links to the location of Changelog for a package. A link may use expansion to refer to the correct changelog version. Example: <literal>"https://git.savannah.gnu.org/cgit/hello.git/plain/NEWS?h=v${version}"</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-license">
|
||||
<term>
|
||||
<varname>license</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The license, or licenses, for the package. One from the attribute set defined in <link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix"> <filename>nixpkgs/lib/licenses.nix</filename></link>. At this moment using both a list of licenses and a single license is valid. If the license field is in the form of a list representation, then it means that parts of the package are licensed differently. Each license should preferably be referenced by their attribute. The non-list attribute value can also be a space delimited string representation of the contained attribute shortNames or spdxIds. The following are all valid examples:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Single license referenced by attribute (preferred) <literal>lib.licenses.gpl3Only</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Single license referenced by its attribute shortName (frowned upon) <literal>"gpl3Only"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Single license referenced by its attribute spdxId (frowned upon) <literal>"GPL-3.0-only"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Multiple licenses referenced by attribute (preferred) <literal>with lib.licenses; [ asl20 free ofl ]</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Multiple licenses referenced as a space delimited string of attribute shortNames (frowned upon) <literal>"asl20 free ofl"</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
For details, see <xref linkend='sec-meta-license'/>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-maintainers">
|
||||
<term>
|
||||
<varname>maintainers</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A list of the maintainers of this Nix expression. Maintainers are defined in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/maintainers/maintainer-list.nix"><filename>nixpkgs/maintainers/maintainer-list.nix</filename></link>. There is no restriction to becoming a maintainer, just add yourself to that list in a separate commit titled 'maintainers: add alice', and reference maintainers with <literal>maintainers = with lib.maintainers; [ alice bob ]</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-priority">
|
||||
<term>
|
||||
<varname>priority</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The <emphasis>priority</emphasis> of the package, used by <command>nix-env</command> to resolve file name conflicts between packages. See the Nix manual page for <command>nix-env</command> for details. Example: <literal>"10"</literal> (a low-priority package).
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-platforms">
|
||||
<term>
|
||||
<varname>platforms</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The list of Nix platform types on which the package is supported. Hydra builds packages according to the platform specified. If no platform is specified, the package does not have prebuilt binaries. An example is:
|
||||
<programlisting>
|
||||
meta.platforms = lib.platforms.linux;
|
||||
</programlisting>
|
||||
Attribute Set <varname>lib.platforms</varname> defines <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix"> various common lists</link> of platforms types.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-tests">
|
||||
<term>
|
||||
<varname>tests</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<warning>
|
||||
<para>
|
||||
This attribute is special in that it is not actually under the <literal>meta</literal> attribute set but rather under the <literal>passthru</literal> attribute set. This is due to how <literal>meta</literal> attributes work, and the fact that they are supposed to contain only metadata, not derivations.
|
||||
</para>
|
||||
</warning>
|
||||
<para>
|
||||
An attribute set with as values tests. A test is a derivation, which builds successfully when the test passes, and fails to build otherwise. A derivation that is a test needs to have <literal>meta.timeout</literal> defined.
|
||||
</para>
|
||||
<para>
|
||||
The NixOS tests are available as <literal>nixosTests</literal> in parameters of derivations. For instance, the OpenSMTPD derivation includes lines similar to:
|
||||
<programlisting>
|
||||
{ /* ... */, nixosTests }:
|
||||
{
|
||||
# ...
|
||||
passthru.tests = {
|
||||
basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-timeout">
|
||||
<term>
|
||||
<varname>timeout</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A timeout (in seconds) for building the derivation. If the derivation takes longer than this time to build, it can fail due to breaking the timeout. However, all computers do not have the same computing power, hence some builders may decide to apply a multiplicative factor to this value. When filling this value in, try to keep it approximately consistent with other values already present in <literal>nixpkgs</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-hydraPlatforms">
|
||||
<term>
|
||||
<varname>hydraPlatforms</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The list of Nix platform types for which the Hydra instance at <literal>hydra.nixos.org</literal> will build the package. (Hydra is the Nix-based continuous build system.) It defaults to the value of <varname>meta.platforms</varname>. Thus, the only reason to set <varname>meta.hydraPlatforms</varname> is if you want <literal>hydra.nixos.org</literal> to build the package on a subset of <varname>meta.platforms</varname>, or not at all, e.g.
|
||||
<programlisting>
|
||||
meta.platforms = lib.platforms.linux;
|
||||
meta.hydraPlatforms = [];
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-broken">
|
||||
<term>
|
||||
<varname>broken</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
If set to <literal>true</literal>, the package is marked as “broken”, meaning that it won’t show up in <literal>nix-env -qa</literal>, and cannot be built or installed. Such packages should be removed from Nixpkgs eventually unless they are fixed.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry xml:id="var-meta-updateWalker">
|
||||
<term>
|
||||
<varname>updateWalker</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
If set to <literal>true</literal>, the package is tested to be updated correctly by the <literal>update-walker.sh</literal> script without additional settings. Such packages have <varname>meta.version</varname> set and their homepage (or the page specified by <varname>meta.downloadPage</varname>) contains a direct link to the package tarball.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
||||
<section xml:id="sec-meta-license">
|
||||
<title>Licenses</title>
|
||||
|
||||
<para>
|
||||
The <varname>meta.license</varname> attribute should preferrably contain a value from <varname>lib.licenses</varname> defined in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix"> <filename>nixpkgs/lib/licenses.nix</filename></link>, or in-place license description of the same format if the license is unlikely to be useful in another expression.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Although it's typically better to indicate the specific license, a few generic options are available:
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>lib.licenses.free</varname>, <varname>"free"</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Catch-all for free software licenses not listed above.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>lib.licenses.unfreeRedistributable</varname>, <varname>"unfree-redistributable"</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Unfree package that can be redistributed in binary form. That is, it’s legal to redistribute the <emphasis>output</emphasis> of the derivation. This means that the package can be included in the Nixpkgs channel.
|
||||
</para>
|
||||
<para>
|
||||
Sometimes proprietary software can only be redistributed unmodified. Make sure the builder doesn’t actually modify the original binaries; otherwise we’re breaking the license. For instance, the NVIDIA X11 drivers can be redistributed unmodified, but our builder applies <command>patchelf</command> to make them work. Thus, its license is <varname>"unfree"</varname> and it cannot be included in the Nixpkgs channel.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>lib.licenses.unfree</varname>, <varname>"unfree"</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Unfree package that cannot be redistributed. You can build it yourself, but you cannot redistribute the output of the derivation. Thus it cannot be included in the Nixpkgs channel.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>lib.licenses.unfreeRedistributableFirmware</varname>, <varname>"unfree-redistributable-firmware"</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This package supplies unfree, redistributable firmware. This is a separate value from <varname>unfree-redistributable</varname> because not everybody cares whether firmware is free.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</para>
|
||||
</section>
|
||||
</chapter>
|
128
doc/stdenv/multiple-output.chapter.md
Normal file
128
doc/stdenv/multiple-output.chapter.md
Normal file
@ -0,0 +1,128 @@
|
||||
# Multiple-output packages {#chap-multiple-output}
|
||||
|
||||
## Introduction {#sec-multiple-outputs-introduction}
|
||||
|
||||
The Nix language allows a derivation to produce multiple outputs, which is similar to what is utilized by other Linux distribution packaging systems. The outputs reside in separate Nix store paths, so they can be mostly handled independently of each other, including passing to build inputs, garbage collection or binary substitution. The exception is that building from source always produces all the outputs.
|
||||
|
||||
The main motivation is to save disk space by reducing runtime closure sizes; consequently also sizes of substituted binaries get reduced. Splitting can be used to have more granular runtime dependencies, for example the typical reduction is to split away development-only files, as those are typically not needed during runtime. As a result, closure sizes of many packages can get reduced to a half or even much less.
|
||||
|
||||
::: note
|
||||
The reduction effects could be instead achieved by building the parts in completely separate derivations. That would often additionally reduce build-time closures, but it tends to be much harder to write such derivations, as build systems typically assume all parts are being built at once. This compromise approach of single source package producing multiple binary packages is also utilized often by rpm and deb.
|
||||
:::
|
||||
|
||||
A number of attributes can be used to work with a derivation with multiple outputs. The attribute `outputs` is a list of strings, which are the names of the outputs. For each of these names, an identically named attribute is created, corresponding to that output. The attribute `meta.outputsToInstall` is used to determine the default set of outputs to install when using the derivation name unqualified.
|
||||
|
||||
## Installing a split package {#sec-multiple-outputs-installing}
|
||||
|
||||
When installing a package with multiple outputs, the package’s `meta.outputsToInstall` attribute determines which outputs are actually installed. `meta.outputsToInstall` is a list whose [default installs binaries and the associated man pages](https://github.com/NixOS/nixpkgs/blob/f1680774340d5443a1409c3421ced84ac1163ba9/pkgs/stdenv/generic/make-derivation.nix#L310-L320). The following sections describe ways to install different outputs.
|
||||
|
||||
### Selecting outputs to install via NixOS {#sec-multiple-outputs-installing-nixos}
|
||||
|
||||
NixOS provides two ways to select the outputs to install for packages listed in `environment.systemPackages`:
|
||||
|
||||
- The configuration option `environment.extraOutputsToInstall` is appended to each package’s `meta.outputsToInstall` attribute to determine the outputs to install. It can for example be used to install `info` documentation or debug symbols for all packages.
|
||||
|
||||
- The outputs can be listed as packages in `environment.systemPackages`. For example, the `"out"` and `"info"` outputs for the `coreutils` package can be installed by including `coreutils` and `coreutils.info` in `environment.systemPackages`.
|
||||
|
||||
### Selecting outputs to install via `nix-env` {#sec-multiple-outputs-installing-nix-env}
|
||||
|
||||
`nix-env` lacks an easy way to select the outputs to install. When installing a package, `nix-env` always installs the outputs listed in `meta.outputsToInstall`, even when the user explicitly selects an output.
|
||||
|
||||
::: warning
|
||||
`nix-env` silenty disregards the outputs selected by the user, and instead installs the outputs from `meta.outputsToInstall`. For example,
|
||||
|
||||
```ShellSession
|
||||
$ nix-env -iA nixpkgs.coreutils.info
|
||||
```
|
||||
|
||||
installs the `"out"` output (`coreutils.meta.outputsToInstall` is `[ "out" ]`) instead of the requested `"info"`.
|
||||
:::
|
||||
|
||||
The only recourse to select an output with `nix-env` is to override the package’s `meta.outputsToInstall`, using the functions described in <xref linkend="chap-overrides" />. For example, the following overlay adds the `"info"` output for the `coreutils` package:
|
||||
|
||||
```nix
|
||||
self: super:
|
||||
{
|
||||
coreutils = super.coreutils.overrideAttrs (oldAttrs: {
|
||||
meta = oldAttrs.meta // { outputsToInstall = oldAttrs.meta.outputsToInstall or [ "out" ] ++ [ "info" ]; };
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Using a split package {#sec-multiple-outputs-using-split-packages}
|
||||
|
||||
In the Nix language the individual outputs can be reached explicitly as attributes, e.g. `coreutils.info`, but the typical case is just using packages as build inputs.
|
||||
|
||||
When a multiple-output derivation gets into a build input of another derivation, the `dev` output is added if it exists, otherwise the first output is added. In addition to that, `propagatedBuildOutputs` of that package which by default contain `$outputBin` and `$outputLib` are also added. (See <xref linkend="multiple-output-file-type-groups" />.)
|
||||
|
||||
In some cases it may be desirable to combine different outputs under a single store path. A function `symlinkJoin` can be used to do this. (Note that it may negate some closure size benefits of using a multiple-output package.)
|
||||
|
||||
## Writing a split derivation {#sec-multiple-outputs-}
|
||||
|
||||
Here you find how to write a derivation that produces multiple outputs.
|
||||
|
||||
In nixpkgs there is a framework supporting multiple-output derivations. It tries to cover most cases by default behavior. You can find the source separated in `<nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh>`; it’s relatively well-readable. The whole machinery is triggered by defining the `outputs` attribute to contain the list of desired output names (strings).
|
||||
|
||||
```nix
|
||||
outputs = [ "bin" "dev" "out" "doc" ];
|
||||
```
|
||||
|
||||
Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. Typically you also want to have the main `out` output, as it catches any files that didn’t get elsewhere.
|
||||
|
||||
::: note
|
||||
There is a special handling of the `debug` output, described at <xref linkend="stdenv-separateDebugInfo" />.
|
||||
:::
|
||||
|
||||
### “Binaries first” {#multiple-output-file-binaries-first-convention}
|
||||
|
||||
A commonly adopted convention in `nixpkgs` is that executables provided by the package are contained within its first output. This convention allows the dependent packages to reference the executables provided by packages in a uniform manner. For instance, provided with the knowledge that the `perl` package contains a `perl` executable it can be referenced as `${pkgs.perl}/bin/perl` within a Nix derivation that needs to execute a Perl script.
|
||||
|
||||
The `glibc` package is a deliberate single exception to the “binaries first” convention. The `glibc` has `libs` as its first output allowing the libraries provided by `glibc` to be referenced directly (e.g. `${stdenv.glibc}/lib/ld-linux-x86-64.so.2`). The executables provided by `glibc` can be accessed via its `bin` attribute (e.g. `${stdenv.glibc.bin}/bin/ldd`).
|
||||
|
||||
The reason for why `glibc` deviates from the convention is because referencing a library provided by `glibc` is a very common operation among Nix packages. For instance, third-party executables packaged by Nix are typically patched and relinked with the relevant version of `glibc` libraries from Nix packages (please see the documentation on [patchelf](https://github.com/NixOS/patchelf/blob/master/README) for more details).
|
||||
|
||||
### File type groups {#multiple-output-file-type-groups}
|
||||
|
||||
The support code currently recognizes some particular kinds of outputs and either instructs the build system of the package to put files into their desired outputs or it moves the files during the fixup phase. Each group of file types has an `outputFoo` variable specifying the output name where they should go. If that variable isn’t defined by the derivation writer, it is guessed – a default output name is defined, falling back to other possibilities if the output isn’t defined.
|
||||
|
||||
#### ` $outputDev`
|
||||
|
||||
is for development-only files. These include C(++) headers (`include/`), pkg-config (`lib/pkgconfig/`), cmake (`lib/cmake/`) and aclocal files (`share/aclocal/`). They go to `dev` or `out` by default.
|
||||
|
||||
#### ` $outputBin`
|
||||
|
||||
is meant for user-facing binaries, typically residing in `bin/`. They go to `bin` or `out` by default.
|
||||
|
||||
#### ` $outputLib`
|
||||
|
||||
is meant for libraries, typically residing in `lib/` and `libexec/`. They go to `lib` or `out` by default.
|
||||
|
||||
#### ` $outputDoc`
|
||||
|
||||
is for user documentation, typically residing in `share/doc/`. It goes to `doc` or `out` by default.
|
||||
|
||||
#### ` $outputDevdoc`
|
||||
|
||||
is for _developer_ documentation. Currently we count gtk-doc and devhelp books, typically residing in `share/gtk-doc/` and `share/devhelp/`, in there. It goes to `devdoc` or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
|
||||
|
||||
#### ` $outputMan`
|
||||
|
||||
is for man pages (except for section 3), typically residing in `share/man/man[0-9]/`. They go to `man` or `$outputBin` by default.
|
||||
|
||||
#### ` $outputDevman`
|
||||
|
||||
is for section 3 man pages, typically residing in `share/man/man[0-9]/`. They go to `devman` or `$outputMan` by default.
|
||||
|
||||
#### ` $outputInfo`
|
||||
|
||||
is for info pages, typically residing in `share/info/`. They go to `info` or `$outputBin` by default.
|
||||
|
||||
### Common caveats {#sec-multiple-outputs-caveats}
|
||||
|
||||
- Some configure scripts don’t like some of the parameters passed by default by the framework, e.g. `--docdir=/foo/bar`. You can disable this by setting `setOutputFlags = false;`.
|
||||
|
||||
- The outputs of a single derivation can retain references to each other, but note that circular references are not allowed. (And each strongly-connected component would act as a single output anyway.)
|
||||
|
||||
- Most of split packages contain their core functionality in libraries. These libraries tend to refer to various kind of data that typically gets into `out`, e.g. locale strings, so there is often no advantage in separating the libraries into `lib`, as keeping them in `out` is easier.
|
||||
|
||||
- Some packages have hidden assumptions on install paths, which complicates splitting.
|
@ -1,261 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE chapter [
|
||||
<!ENTITY ndash "–"> <!-- @vcunat likes to use this one ;-) -->
|
||||
]>
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-multiple-output">
|
||||
<title>Multiple-output packages</title>
|
||||
<section xml:id="sec-multiple-outputs-introduction">
|
||||
<title>Introduction</title>
|
||||
|
||||
<para>
|
||||
The Nix language allows a derivation to produce multiple outputs, which is similar to what is utilized by other Linux distribution packaging systems. The outputs reside in separate Nix store paths, so they can be mostly handled independently of each other, including passing to build inputs, garbage collection or binary substitution. The exception is that building from source always produces all the outputs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The main motivation is to save disk space by reducing runtime closure sizes; consequently also sizes of substituted binaries get reduced. Splitting can be used to have more granular runtime dependencies, for example the typical reduction is to split away development-only files, as those are typically not needed during runtime. As a result, closure sizes of many packages can get reduced to a half or even much less.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
The reduction effects could be instead achieved by building the parts in completely separate derivations. That would often additionally reduce build-time closures, but it tends to be much harder to write such derivations, as build systems typically assume all parts are being built at once. This compromise approach of single source package producing multiple binary packages is also utilized often by rpm and deb.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
A number of attributes can be used to work with a derivation with multiple outputs. The attribute <varname>outputs</varname> is a list of strings, which are the names of the outputs. For each of these names, an identically named attribute is created, corresponding to that output. The attribute <varname>meta.outputsToInstall</varname> is used to determine the default set of outputs to install when using the derivation name unqualified.
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-multiple-outputs-installing">
|
||||
<title>Installing a split package</title>
|
||||
|
||||
<para>
|
||||
When installing a package with multiple outputs, the package's <varname>meta.outputsToInstall</varname> attribute determines which outputs are actually installed. <varname>meta.outputsToInstall</varname> is a list whose <link xlink:href="https://github.com/NixOS/nixpkgs/blob/f1680774340d5443a1409c3421ced84ac1163ba9/pkgs/stdenv/generic/make-derivation.nix#L310-L320">default installs binaries and the associated man pages</link>. The following sections describe ways to install different outputs.
|
||||
</para>
|
||||
|
||||
<section xml:id="sec-multiple-outputs-installing-nixos">
|
||||
<title>Selecting outputs to install via NixOS</title>
|
||||
|
||||
<para>
|
||||
NixOS provides two ways to select the outputs to install for packages listed in <varname>environment.systemPackages</varname>:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The configuration option <varname>environment.extraOutputsToInstall</varname> is appended to each package's <varname>meta.outputsToInstall</varname> attribute to determine the outputs to install. It can for example be used to install <literal>info</literal> documentation or debug symbols for all packages.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The outputs can be listed as packages in <varname>environment.systemPackages</varname>. For example, the <literal>"out"</literal> and <literal>"info"</literal> outputs for the <varname>coreutils</varname> package can be installed by including <varname>coreutils</varname> and <varname>coreutils.info</varname> in <varname>environment.systemPackages</varname>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-multiple-outputs-installing-nix-env">
|
||||
<title>Selecting outputs to install via <command>nix-env</command></title>
|
||||
|
||||
<para>
|
||||
<command>nix-env</command> lacks an easy way to select the outputs to install. When installing a package, <command>nix-env</command> always installs the outputs listed in <varname>meta.outputsToInstall</varname>, even when the user explicitly selects an output.
|
||||
</para>
|
||||
|
||||
<warning>
|
||||
<para>
|
||||
<command>nix-env</command> silenty disregards the outputs selected by the user, and instead installs the outputs from <varname>meta.outputsToInstall</varname>. For example,
|
||||
</para>
|
||||
<screen><prompt>$ </prompt>nix-env -iA nixpkgs.coreutils.info</screen>
|
||||
<para>
|
||||
installs the <literal>"out"</literal> output (<varname>coreutils.meta.outputsToInstall</varname> is <literal>[ "out" ]</literal>) instead of the requested <literal>"info"</literal>.
|
||||
</para>
|
||||
</warning>
|
||||
|
||||
<para>
|
||||
The only recourse to select an output with <command>nix-env</command> is to override the package's <varname>meta.outputsToInstall</varname>, using the functions described in <xref linkend="chap-overrides" />. For example, the following overlay adds the <literal>"info"</literal> output for the <varname>coreutils</varname> package:
|
||||
</para>
|
||||
|
||||
<programlisting>self: super:
|
||||
{
|
||||
coreutils = super.coreutils.overrideAttrs (oldAttrs: {
|
||||
meta = oldAttrs.meta // { outputsToInstall = oldAttrs.meta.outputsToInstall or [ "out" ] ++ [ "info" ]; };
|
||||
});
|
||||
}
|
||||
</programlisting>
|
||||
</section>
|
||||
</section>
|
||||
<section xml:id="sec-multiple-outputs-using-split-packages">
|
||||
<title>Using a split package</title>
|
||||
|
||||
<para>
|
||||
In the Nix language the individual outputs can be reached explicitly as attributes, e.g. <varname>coreutils.info</varname>, but the typical case is just using packages as build inputs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When a multiple-output derivation gets into a build input of another derivation, the <varname>dev</varname> output is added if it exists, otherwise the first output is added. In addition to that, <varname>propagatedBuildOutputs</varname> of that package which by default contain <varname>$outputBin</varname> and <varname>$outputLib</varname> are also added. (See <xref linkend="multiple-output-file-type-groups" />.)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In some cases it may be desirable to combine different outputs under a single store path. A function <literal>symlinkJoin</literal> can be used to do this. (Note that it may negate some closure size benefits of using a multiple-output package.)
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="sec-multiple-outputs-">
|
||||
<title>Writing a split derivation</title>
|
||||
|
||||
<para>
|
||||
Here you find how to write a derivation that produces multiple outputs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In nixpkgs there is a framework supporting multiple-output derivations. It tries to cover most cases by default behavior. You can find the source separated in <<filename>nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh</filename>>; it's relatively well-readable. The whole machinery is triggered by defining the <varname>outputs</varname> attribute to contain the list of desired output names (strings).
|
||||
</para>
|
||||
|
||||
<programlisting>outputs = [ "bin" "dev" "out" "doc" ];</programlisting>
|
||||
|
||||
<para>
|
||||
Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. Typically you also want to have the main <varname>out</varname> output, as it catches any files that didn't get elsewhere.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
There is a special handling of the <varname>debug</varname> output, described at <xref linkend="stdenv-separateDebugInfo" />.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<section xml:id="multiple-output-file-binaries-first-convention">
|
||||
<title><quote>Binaries first</quote></title>
|
||||
|
||||
<para>
|
||||
A commonly adopted convention in <literal>nixpkgs</literal> is that executables provided by the package are contained within its first output. This convention allows the dependent packages to reference the executables provided by packages in a uniform manner. For instance, provided with the knowledge that the <literal>perl</literal> package contains a <literal>perl</literal> executable it can be referenced as <literal>${pkgs.perl}/bin/perl</literal> within a Nix derivation that needs to execute a Perl script.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <literal>glibc</literal> package is a deliberate single exception to the <quote>binaries first</quote> convention. The <literal>glibc</literal> has <literal>libs</literal> as its first output allowing the libraries provided by <literal>glibc</literal> to be referenced directly (e.g. <literal>${stdenv.glibc}/lib/ld-linux-x86-64.so.2</literal>). The executables provided by <literal>glibc</literal> can be accessed via its <literal>bin</literal> attribute (e.g. <literal>${stdenv.glibc.bin}/bin/ldd</literal>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The reason for why <literal>glibc</literal> deviates from the convention is because referencing a library provided by <literal>glibc</literal> is a very common operation among Nix packages. For instance, third-party executables packaged by Nix are typically patched and relinked with the relevant version of <literal>glibc</literal> libraries from Nix packages (please see the documentation on <link xlink:href="https://github.com/NixOS/patchelf/blob/master/README">patchelf</link> for more details).
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="multiple-output-file-type-groups">
|
||||
<title>File type groups</title>
|
||||
|
||||
<para>
|
||||
The support code currently recognizes some particular kinds of outputs and either instructs the build system of the package to put files into their desired outputs or it moves the files during the fixup phase. Each group of file types has an <varname>outputFoo</varname> variable specifying the output name where they should go. If that variable isn't defined by the derivation writer, it is guessed – a default output name is defined, falling back to other possibilities if the output isn't defined.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputDev</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for development-only files. These include C(++) headers (<filename>include/</filename>), pkg-config (<filename>lib/pkgconfig/</filename>), cmake (<filename>lib/cmake/</filename>) and aclocal files (<varname>share/aclocal/</varname>). They go to <varname>dev</varname> or <varname>out</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputBin</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is meant for user-facing binaries, typically residing in <filename>bin/</filename>. They go to <varname>bin</varname> or <varname>out</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputLib</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is meant for libraries, typically residing in <filename>lib/</filename> and <filename>libexec/</filename>. They go to <varname>lib</varname> or <varname>out</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputDoc</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for user documentation, typically residing in <filename>share/doc/</filename>. It goes to <varname>doc</varname> or <varname>out</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputDevdoc</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and devhelp books, typically residing in <filename>share/gtk-doc/</filename> and <filename>share/devhelp/</filename>, in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputMan</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for man pages (except for section 3), typically residing in <filename>share/man/man[0-9]/</filename>. They go to <varname>man</varname> or <varname>$outputBin</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputDevman</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for section 3 man pages, typically residing in <filename>share/man/man3/</filename>. They go to <varname>devman</varname> or <varname>$outputMan</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname> $outputInfo</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
is for info pages, typically residing in <filename>share/info/</filename>. They go to <varname>info</varname> or <varname>$outputBin</varname> by default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-multiple-outputs-caveats">
|
||||
<title>Common caveats</title>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Some configure scripts don't like some of the parameters passed by default by the framework, e.g. <literal>--docdir=/foo/bar</literal>. You can disable this by setting <literal>setOutputFlags = false;</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The outputs of a single derivation can retain references to each other, but note that circular references are not allowed. (And each strongly-connected component would act as a single output anyway.)
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Most of split packages contain their core functionality in libraries. These libraries tend to refer to various kind of data that typically gets into <varname>out</varname>, e.g. locale strings, so there is often no advantage in separating the libraries into <varname>lib</varname>, as keeping them in <varname>out</varname> is easier.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Some packages have hidden assumptions on install paths, which complicates splitting.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
<!--Writing a split derivation-->
|
||||
</chapter>
|
1215
doc/stdenv/stdenv.chapter.md
Normal file
1215
doc/stdenv/stdenv.chapter.md
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -230,7 +230,7 @@ self: super:
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For BLAS/LAPACK switching to work correctly, all packages must depend on <literal>blas</literal> or <literal>lapack</literal>. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions versions of BLAS/LAPACK currently in the wild, <literal>LP64</literal> (integer size = 32 bits) and <literal>ILP64</literal> (integer size = 64 bits). Some software needs special flags or patches to work with <literal>ILP64</literal>. You can check if <literal>ILP64</literal> is used in Nixpkgs with <varname>blas.isILP64</varname> and <varname>lapack.isILP64</varname>. Some software does NOT work with <literal>ILP64</literal>, and derivations need to specify an assertion to prevent this. You can prevent <literal>ILP64</literal> from being used with the following:
|
||||
For BLAS/LAPACK switching to work correctly, all packages must depend on <literal>blas</literal> or <literal>lapack</literal>. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, <literal>LP64</literal> (integer size = 32 bits) and <literal>ILP64</literal> (integer size = 64 bits). Some software needs special flags or patches to work with <literal>ILP64</literal>. You can check if <literal>ILP64</literal> is used in Nixpkgs with <varname>blas.isILP64</varname> and <varname>lapack.isILP64</varname>. Some software does NOT work with <literal>ILP64</literal>, and derivations need to specify an assertion to prevent this. You can prevent <literal>ILP64</literal> from being used with the following:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
|
@ -603,6 +603,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) ({
|
||||
free = false;
|
||||
};
|
||||
|
||||
odbl = spdx {
|
||||
spdxId = "ODbL-1.0";
|
||||
fullName = "Open Data Commons Open Database License v1.0";
|
||||
};
|
||||
|
||||
ofl = spdx {
|
||||
spdxId = "OFL-1.1";
|
||||
fullName = "SIL Open Font License 1.1";
|
||||
|
@ -361,6 +361,17 @@ rec {
|
||||
*/
|
||||
byName = attr: f: modules:
|
||||
foldl' (acc: module:
|
||||
if !(builtins.isAttrs module.${attr}) then
|
||||
throw ''
|
||||
You're trying to declare a value of type `${builtins.typeOf module.${attr}}'
|
||||
rather than an attribute-set for the option
|
||||
`${builtins.concatStringsSep "." prefix}'!
|
||||
|
||||
This usually happens if `${builtins.concatStringsSep "." prefix}' has option
|
||||
definitions inside that are not matched. Please check how to properly define
|
||||
this option by e.g. referring to `man 5 configuration.nix'!
|
||||
''
|
||||
else
|
||||
acc // (mapAttrs (n: v:
|
||||
(acc.${n} or []) ++ f module v
|
||||
) module.${attr}
|
||||
|
@ -107,6 +107,11 @@ rec {
|
||||
powerpc64le = "ppc64le";
|
||||
}.${final.parsed.cpu.name} or final.parsed.cpu.name;
|
||||
|
||||
darwinArch = {
|
||||
armv7a = "armv7";
|
||||
aarch64 = "arm64";
|
||||
}.${final.parsed.cpu.name} or final.parsed.cpu.name;
|
||||
|
||||
emulator = pkgs: let
|
||||
qemu-user = pkgs.qemu.override {
|
||||
smartcardSupport = false;
|
||||
|
@ -303,14 +303,12 @@ rec {
|
||||
preferBuiltin = true;
|
||||
target = "zImage";
|
||||
extraConfig = ''
|
||||
# Serial port for Raspberry Pi 3. Upstream forgot to add it to the ARMv7 defconfig.
|
||||
# Serial port for Raspberry Pi 3. Wasn't included in ARMv7 defconfig
|
||||
# until 4.17.
|
||||
SERIAL_8250_BCM2835AUX y
|
||||
SERIAL_8250_EXTENDED y
|
||||
SERIAL_8250_SHARE_IRQ y
|
||||
|
||||
# Fix broken sunxi-sid nvmem driver.
|
||||
TI_CPTS y
|
||||
|
||||
# Hangs ODROID-XU4
|
||||
ARM_BIG_LITTLE_CPUIDLE n
|
||||
|
||||
|
@ -169,7 +169,7 @@ checkConfigOutput "foo" config.submodule.foo ./declare-submoduleWith-special.nix
|
||||
## shorthandOnlyDefines config behaves as expected
|
||||
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
|
||||
checkConfigError 'is not of type `boolean' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-noshorthand.nix
|
||||
checkConfigError 'value is a boolean while a set was expected' config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
|
||||
checkConfigError "You're trying to declare a value of type \`bool'\nrather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
|
||||
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
|
||||
|
||||
## submoduleWith should merge all modules in one swoop
|
||||
|
@ -746,6 +746,12 @@
|
||||
githubId = 1296771;
|
||||
name = "Anders Riutta";
|
||||
};
|
||||
armijnhemel = {
|
||||
email = "armijn@tjaldur.nl";
|
||||
github = "armijnhemel";
|
||||
githubId = 10587952;
|
||||
name = "Armijn Hemel";
|
||||
};
|
||||
arnarg = {
|
||||
email = "arnarg@fastmail.com";
|
||||
github = "arnarg";
|
||||
@ -1859,6 +1865,12 @@
|
||||
fingerprint = "68B8 0D57 B2E5 4AC3 EC1F 49B0 B37E 0F23 7101 6A4C";
|
||||
}];
|
||||
};
|
||||
collares = {
|
||||
email = "mauricio@collares.org";
|
||||
github = "collares";
|
||||
githubId = 244239;
|
||||
name = "Mauricio Collares";
|
||||
};
|
||||
copumpkin = {
|
||||
email = "pumpkingod@gmail.com";
|
||||
github = "copumpkin";
|
||||
@ -2889,6 +2901,12 @@
|
||||
githubId = 3787281;
|
||||
name = "Erik Rybakken";
|
||||
};
|
||||
erin = {
|
||||
name = "Erin van der Veen";
|
||||
email = "erin@erinvanderveen.nl";
|
||||
github = "ErinvanderVeen";
|
||||
githubId = 10973664;
|
||||
};
|
||||
erosennin = {
|
||||
email = "ag@sologoc.com";
|
||||
github = "erosennin";
|
||||
@ -2924,7 +2942,7 @@
|
||||
name = "Adam Copp";
|
||||
};
|
||||
ethancedwards8 = {
|
||||
email = "ethancarteredwards@gmail.com";
|
||||
email = "ethan@ethancedwards.com";
|
||||
github = "ethancedwards8";
|
||||
githubId = 60861925;
|
||||
name = "Ethan Carter Edwards";
|
||||
@ -3181,6 +3199,12 @@
|
||||
githubId = 19961516;
|
||||
name = "Felix Weilbach";
|
||||
};
|
||||
fliegendewurst = {
|
||||
email = "arne.keller@posteo.de";
|
||||
github = "FliegendeWurst";
|
||||
githubId = 12560461;
|
||||
name = "Arne Keller";
|
||||
};
|
||||
flokli = {
|
||||
email = "flokli@flokli.de";
|
||||
github = "flokli";
|
||||
@ -3615,6 +3639,12 @@
|
||||
githubId = 343415;
|
||||
name = "Greg Roodt";
|
||||
};
|
||||
gtrunsec = {
|
||||
email = "gtrunsec@hardenedlinux.org";
|
||||
github = "GTrunSec";
|
||||
githubId = 21156405;
|
||||
name = "GuangTao Zhang";
|
||||
};
|
||||
guibou = {
|
||||
email = "guillaum.bouchard@gmail.com";
|
||||
github = "guibou";
|
||||
@ -3991,6 +4021,12 @@
|
||||
githubId = 61913481;
|
||||
name = "Mat Marini";
|
||||
};
|
||||
illustris = {
|
||||
email = "me@illustris.tech";
|
||||
github = "illustris";
|
||||
githubId = 3948275;
|
||||
name = "Harikrishnan R";
|
||||
};
|
||||
ilya-fedin = {
|
||||
email = "fedin-ilja2010@ya.ru";
|
||||
github = "ilya-fedin";
|
||||
@ -4147,6 +4183,12 @@
|
||||
github = "j0hax";
|
||||
githubId = 3802620;
|
||||
};
|
||||
j4m3s = {
|
||||
name = "James Landrein";
|
||||
email = "github@j4m3s.eu";
|
||||
github = "j4m3s-s";
|
||||
githubId = 9413812;
|
||||
};
|
||||
jacg = {
|
||||
name = "Jacek Generowicz";
|
||||
email = "jacg@my-post-office.net";
|
||||
@ -4171,6 +4213,12 @@
|
||||
githubId = 175537;
|
||||
name = "Johannes Lötzsch";
|
||||
};
|
||||
jackgerrits = {
|
||||
email = "jack@jackgerrits.com";
|
||||
github = "jackgerrits";
|
||||
githubId = 7558482;
|
||||
name = "Jack Gerrits";
|
||||
};
|
||||
jagajaga = {
|
||||
email = "ars.seroka@gmail.com";
|
||||
github = "jagajaga";
|
||||
@ -5524,6 +5572,12 @@
|
||||
githubId = 7622248;
|
||||
name = "Sebastian Zivota";
|
||||
};
|
||||
locallycompact = {
|
||||
email = "dan.firth@homotopic.tech";
|
||||
github = "locallycompact";
|
||||
githubId = 1267527;
|
||||
name = "Daniel Firth";
|
||||
};
|
||||
lopsided98 = {
|
||||
email = "benwolsieffer@gmail.com";
|
||||
github = "lopsided98";
|
||||
@ -6289,6 +6343,12 @@
|
||||
githubId = 1776903;
|
||||
name = "Andrew Abbott";
|
||||
};
|
||||
mitchmindtree = {
|
||||
email = "mail@mitchellnordine.com";
|
||||
github = "mitchmindtree";
|
||||
githubId = 4587373;
|
||||
name = "Mitchell Nordine";
|
||||
};
|
||||
mjanczyk = {
|
||||
email = "m@dragonvr.pl";
|
||||
github = "mjanczyk";
|
||||
@ -6743,6 +6803,12 @@
|
||||
githubId = 1488603;
|
||||
name = "François Espinet";
|
||||
};
|
||||
neverbehave = {
|
||||
email = "i@never.pet";
|
||||
github = "NeverBehave";
|
||||
githubId = 17120571;
|
||||
name = "Xinhao Luo";
|
||||
};
|
||||
nikitavoloboev = {
|
||||
email = "nikita.voloboev@gmail.com";
|
||||
github = "nikitavoloboev";
|
||||
@ -6839,6 +6905,12 @@
|
||||
githubId = 16385648;
|
||||
name = "Niko Pavlinek";
|
||||
};
|
||||
nixinator = {
|
||||
email = "33lockdown33@protonmail.com";
|
||||
github = "nixinator";
|
||||
githubId = 66913205;
|
||||
name = "Rick Sanchez";
|
||||
};
|
||||
nixy = {
|
||||
email = "nixy@nixy.moe";
|
||||
github = "nixy";
|
||||
@ -6981,6 +7053,12 @@
|
||||
githubId = 3359345;
|
||||
name = "obadz";
|
||||
};
|
||||
obsidian-systems-maintenance = {
|
||||
name = "Obsidian Systems Maintenance";
|
||||
email = "maintainer@obsidian.systems";
|
||||
github = "obsidian-systems-maintenance";
|
||||
githubId = 80847921;
|
||||
};
|
||||
odi = {
|
||||
email = "oliver.dunkl@gmail.com";
|
||||
github = "odi";
|
||||
@ -9761,6 +9839,16 @@
|
||||
githubId = 16151097;
|
||||
name = "Valentin Gehrke";
|
||||
};
|
||||
tuxinaut = {
|
||||
email = "trash4you@tuxinaut.de";
|
||||
github = "tuxinaut";
|
||||
githubId = 722482;
|
||||
name = "Denny Schäfer";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0xB057455D1E567270";
|
||||
fingerprint = "C752 0E49 4D92 1740 D263 C467 B057 455D 1E56 7270";
|
||||
}];
|
||||
};
|
||||
tv = {
|
||||
email = "tv@krebsco.de";
|
||||
github = "4z3";
|
||||
@ -10104,6 +10192,12 @@
|
||||
email = "vq@erq.se";
|
||||
name = "Daniel Nilsson";
|
||||
};
|
||||
vrinek = {
|
||||
email = "vrinek@hey.com";
|
||||
github = "vrinek";
|
||||
name = "Kostas Karachalios";
|
||||
githubId = 81346;
|
||||
};
|
||||
vrthra = {
|
||||
email = "rahul@gopinath.org";
|
||||
github = "vrthra";
|
||||
@ -10156,6 +10250,12 @@
|
||||
email = "kirill.wedens@gmail.com";
|
||||
name = "wedens";
|
||||
};
|
||||
weihua = {
|
||||
email = "luwh364@gmail.com";
|
||||
github = "weihua-lu";
|
||||
githubId = 9002575;
|
||||
name = "Weihua Lu";
|
||||
};
|
||||
wheelsandmetal = {
|
||||
email = "jakob@schmutz.co.uk";
|
||||
github = "wheelsandmetal";
|
||||
@ -10712,6 +10812,12 @@
|
||||
githubId = 3674056;
|
||||
name = "Asad Saeeduddin";
|
||||
};
|
||||
matthewcroughan = {
|
||||
email = "matt@croughan.sh";
|
||||
github = "matthewcroughan";
|
||||
githubId = 26458780;
|
||||
name = "Matthew Croughan";
|
||||
};
|
||||
ngerstle = {
|
||||
name = "Nicholas Gerstle";
|
||||
email = "ngerstle@gmail.com";
|
||||
|
526
maintainers/scripts/pluginupdate.py
Normal file
526
maintainers/scripts/pluginupdate.py
Normal file
@ -0,0 +1,526 @@
|
||||
# Used by pkgs/misc/vim-plugins/update.py and pkgs/applications/editors/kakoune/plugins/update.py
|
||||
|
||||
# format:
|
||||
# $ nix run nixpkgs.python3Packages.black -c black update.py
|
||||
# type-check:
|
||||
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
|
||||
# linted:
|
||||
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265 update.py
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import http
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree as ET
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
from multiprocessing.dummy import Pool
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple, Union, Any, Callable
|
||||
from urllib.parse import urljoin, urlparse
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
import git
|
||||
|
||||
ATOM_ENTRY = "{http://www.w3.org/2005/Atom}entry" # " vim gets confused here
|
||||
ATOM_LINK = "{http://www.w3.org/2005/Atom}link" # "
|
||||
ATOM_UPDATED = "{http://www.w3.org/2005/Atom}updated" # "
|
||||
|
||||
|
||||
def retry(ExceptionToCheck: Any, tries: int = 4, delay: float = 3, backoff: float = 2):
|
||||
"""Retry calling the decorated function using an exponential backoff.
|
||||
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
|
||||
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
|
||||
(BSD licensed)
|
||||
:param ExceptionToCheck: the exception on which to retry
|
||||
:param tries: number of times to try (not retry) before giving up
|
||||
:param delay: initial delay between retries in seconds
|
||||
:param backoff: backoff multiplier e.g. value of 2 will double the delay
|
||||
each retry
|
||||
"""
|
||||
|
||||
def deco_retry(f: Callable) -> Callable:
|
||||
@wraps(f)
|
||||
def f_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
mtries, mdelay = tries, delay
|
||||
while mtries > 1:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except ExceptionToCheck as e:
|
||||
print(f"{str(e)}, Retrying in {mdelay} seconds...")
|
||||
time.sleep(mdelay)
|
||||
mtries -= 1
|
||||
mdelay *= backoff
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return f_retry # true decorator
|
||||
|
||||
return deco_retry
|
||||
|
||||
|
||||
def make_request(url: str) -> urllib.request.Request:
|
||||
token = os.getenv("GITHUB_API_TOKEN")
|
||||
headers = {}
|
||||
if token is not None:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
return urllib.request.Request(url, headers=headers)
|
||||
|
||||
|
||||
class Repo:
|
||||
def __init__(
|
||||
self, owner: str, name: str, branch: str, alias: Optional[str]
|
||||
) -> None:
|
||||
self.owner = owner
|
||||
self.name = name
|
||||
self.branch = branch
|
||||
self.alias = alias
|
||||
self.redirect: Dict[str, str] = {}
|
||||
|
||||
def url(self, path: str) -> str:
|
||||
return urljoin(f"https://github.com/{self.owner}/{self.name}/", path)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Repo({self.owner}, {self.name})"
|
||||
|
||||
@retry(urllib.error.URLError, tries=4, delay=3, backoff=2)
|
||||
def has_submodules(self) -> bool:
|
||||
try:
|
||||
req = make_request(self.url(f"blob/{self.branch}/.gitmodules"))
|
||||
urllib.request.urlopen(req, timeout=10).close()
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.code == 404:
|
||||
return False
|
||||
else:
|
||||
raise
|
||||
return True
|
||||
|
||||
@retry(urllib.error.URLError, tries=4, delay=3, backoff=2)
|
||||
def latest_commit(self) -> Tuple[str, datetime]:
|
||||
commit_url = self.url(f"commits/{self.branch}.atom")
|
||||
commit_req = make_request(commit_url)
|
||||
with urllib.request.urlopen(commit_req, timeout=10) as req:
|
||||
self.check_for_redirect(commit_url, req)
|
||||
xml = req.read()
|
||||
root = ET.fromstring(xml)
|
||||
latest_entry = root.find(ATOM_ENTRY)
|
||||
assert latest_entry is not None, f"No commits found in repository {self}"
|
||||
commit_link = latest_entry.find(ATOM_LINK)
|
||||
assert commit_link is not None, f"No link tag found feed entry {xml}"
|
||||
url = urlparse(commit_link.get("href"))
|
||||
updated_tag = latest_entry.find(ATOM_UPDATED)
|
||||
assert (
|
||||
updated_tag is not None and updated_tag.text is not None
|
||||
), f"No updated tag found feed entry {xml}"
|
||||
updated = datetime.strptime(updated_tag.text, "%Y-%m-%dT%H:%M:%SZ")
|
||||
return Path(str(url.path)).name, updated
|
||||
|
||||
def check_for_redirect(self, url: str, req: http.client.HTTPResponse):
|
||||
response_url = req.geturl()
|
||||
if url != response_url:
|
||||
new_owner, new_name = (
|
||||
urllib.parse.urlsplit(response_url).path.strip("/").split("/")[:2]
|
||||
)
|
||||
end_line = "\n" if self.alias is None else f" as {self.alias}\n"
|
||||
plugin_line = "{owner}/{name}" + end_line
|
||||
|
||||
old_plugin = plugin_line.format(owner=self.owner, name=self.name)
|
||||
new_plugin = plugin_line.format(owner=new_owner, name=new_name)
|
||||
self.redirect[old_plugin] = new_plugin
|
||||
|
||||
def prefetch_git(self, ref: str) -> str:
|
||||
data = subprocess.check_output(
|
||||
["nix-prefetch-git", "--fetch-submodules", self.url(""), ref]
|
||||
)
|
||||
return json.loads(data)["sha256"]
|
||||
|
||||
def prefetch_github(self, ref: str) -> str:
|
||||
data = subprocess.check_output(
|
||||
["nix-prefetch-url", "--unpack", self.url(f"archive/{ref}.tar.gz")]
|
||||
)
|
||||
return data.strip().decode("utf-8")
|
||||
|
||||
|
||||
class Plugin:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
commit: str,
|
||||
has_submodules: bool,
|
||||
sha256: str,
|
||||
date: Optional[datetime] = None,
|
||||
) -> None:
|
||||
self.name = name
|
||||
self.commit = commit
|
||||
self.has_submodules = has_submodules
|
||||
self.sha256 = sha256
|
||||
self.date = date
|
||||
|
||||
@property
|
||||
def normalized_name(self) -> str:
|
||||
return self.name.replace(".", "-")
|
||||
|
||||
@property
|
||||
def version(self) -> str:
|
||||
assert self.date is not None
|
||||
return self.date.strftime("%Y-%m-%d")
|
||||
|
||||
def as_json(self) -> Dict[str, str]:
|
||||
copy = self.__dict__.copy()
|
||||
del copy["date"]
|
||||
return copy
|
||||
|
||||
|
||||
class Editor:
|
||||
"""The configuration of the update script."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
root: Path,
|
||||
get_plugins: str,
|
||||
generate_nix: Callable[[List[Tuple[str, str, Plugin]], str], None],
|
||||
default_in: Optional[Path] = None,
|
||||
default_out: Optional[Path] = None,
|
||||
deprecated: Optional[Path] = None,
|
||||
cache_file: Optional[str] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.root = root
|
||||
self.get_plugins = get_plugins
|
||||
self.generate_nix = generate_nix
|
||||
self.default_in = default_in or root.joinpath(f"{name}-plugin-names")
|
||||
self.default_out = default_out or root.joinpath("generated.nix")
|
||||
self.deprecated = deprecated or root.joinpath("deprecated.json")
|
||||
self.cache_file = cache_file or f"{name}-plugin-cache.json"
|
||||
|
||||
|
||||
class CleanEnvironment(object):
|
||||
def __enter__(self) -> None:
|
||||
self.old_environ = os.environ.copy()
|
||||
local_pkgs = str(Path(__file__).parent.parent.parent)
|
||||
os.environ["NIX_PATH"] = f"localpkgs={local_pkgs}"
|
||||
self.empty_config = NamedTemporaryFile()
|
||||
self.empty_config.write(b"{}")
|
||||
self.empty_config.flush()
|
||||
os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
||||
os.environ.update(self.old_environ)
|
||||
self.empty_config.close()
|
||||
|
||||
|
||||
def get_current_plugins(editor: Editor) -> List[Plugin]:
|
||||
with CleanEnvironment():
|
||||
out = subprocess.check_output(["nix", "eval", "--json", editor.get_plugins])
|
||||
data = json.loads(out)
|
||||
plugins = []
|
||||
for name, attr in data.items():
|
||||
p = Plugin(name, attr["rev"], attr["submodules"], attr["sha256"])
|
||||
plugins.append(p)
|
||||
return plugins
|
||||
|
||||
|
||||
def prefetch_plugin(
|
||||
user: str,
|
||||
repo_name: str,
|
||||
branch: str,
|
||||
alias: Optional[str],
|
||||
cache: "Optional[Cache]" = None,
|
||||
) -> Tuple[Plugin, Dict[str, str]]:
|
||||
repo = Repo(user, repo_name, branch, alias)
|
||||
commit, date = repo.latest_commit()
|
||||
has_submodules = repo.has_submodules()
|
||||
cached_plugin = cache[commit] if cache else None
|
||||
if cached_plugin is not None:
|
||||
cached_plugin.name = alias or repo_name
|
||||
cached_plugin.date = date
|
||||
return cached_plugin, repo.redirect
|
||||
|
||||
print(f"prefetch {user}/{repo_name}")
|
||||
if has_submodules:
|
||||
sha256 = repo.prefetch_git(commit)
|
||||
else:
|
||||
sha256 = repo.prefetch_github(commit)
|
||||
|
||||
return (
|
||||
Plugin(alias or repo_name, commit, has_submodules, sha256, date=date),
|
||||
repo.redirect,
|
||||
)
|
||||
|
||||
|
||||
def fetch_plugin_from_pluginline(plugin_line: str) -> Plugin:
|
||||
plugin, _ = prefetch_plugin(*parse_plugin_line(plugin_line))
|
||||
return plugin
|
||||
|
||||
|
||||
def print_download_error(plugin: str, ex: Exception):
|
||||
print(f"{plugin}: {ex}", file=sys.stderr)
|
||||
ex_traceback = ex.__traceback__
|
||||
tb_lines = [
|
||||
line.rstrip("\n")
|
||||
for line in traceback.format_exception(ex.__class__, ex, ex_traceback)
|
||||
]
|
||||
print("\n".join(tb_lines))
|
||||
|
||||
|
||||
def check_results(
|
||||
results: List[Tuple[str, str, Union[Exception, Plugin], Dict[str, str]]]
|
||||
) -> Tuple[List[Tuple[str, str, Plugin]], Dict[str, str]]:
|
||||
failures: List[Tuple[str, Exception]] = []
|
||||
plugins = []
|
||||
redirects: Dict[str, str] = {}
|
||||
for (owner, name, result, redirect) in results:
|
||||
if isinstance(result, Exception):
|
||||
failures.append((name, result))
|
||||
else:
|
||||
plugins.append((owner, name, result))
|
||||
redirects.update(redirect)
|
||||
|
||||
print(f"{len(results) - len(failures)} plugins were checked", end="")
|
||||
if len(failures) == 0:
|
||||
print()
|
||||
return plugins, redirects
|
||||
else:
|
||||
print(f", {len(failures)} plugin(s) could not be downloaded:\n")
|
||||
|
||||
for (plugin, exception) in failures:
|
||||
print_download_error(plugin, exception)
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_plugin_line(line: str) -> Tuple[str, str, str, Optional[str]]:
|
||||
branch = "master"
|
||||
alias = None
|
||||
name, repo = line.split("/")
|
||||
if " as " in repo:
|
||||
repo, alias = repo.split(" as ")
|
||||
alias = alias.strip()
|
||||
if "@" in repo:
|
||||
repo, branch = repo.split("@")
|
||||
|
||||
return (name.strip(), repo.strip(), branch.strip(), alias)
|
||||
|
||||
|
||||
def load_plugin_spec(plugin_file: str) -> List[Tuple[str, str, str, Optional[str]]]:
|
||||
plugins = []
|
||||
with open(plugin_file) as f:
|
||||
for line in f:
|
||||
plugin = parse_plugin_line(line)
|
||||
if not plugin[0]:
|
||||
msg = f"Invalid repository {line}, must be in the format owner/repo[ as alias]"
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
plugins.append(plugin)
|
||||
return plugins
|
||||
|
||||
|
||||
def get_cache_path(cache_file_name: str) -> Optional[Path]:
|
||||
xdg_cache = os.environ.get("XDG_CACHE_HOME", None)
|
||||
if xdg_cache is None:
|
||||
home = os.environ.get("HOME", None)
|
||||
if home is None:
|
||||
return None
|
||||
xdg_cache = str(Path(home, ".cache"))
|
||||
|
||||
return Path(xdg_cache, cache_file_name)
|
||||
|
||||
|
||||
class Cache:
|
||||
def __init__(self, initial_plugins: List[Plugin], cache_file_name: str) -> None:
|
||||
self.cache_file = get_cache_path(cache_file_name)
|
||||
|
||||
downloads = {}
|
||||
for plugin in initial_plugins:
|
||||
downloads[plugin.commit] = plugin
|
||||
downloads.update(self.load())
|
||||
self.downloads = downloads
|
||||
|
||||
def load(self) -> Dict[str, Plugin]:
|
||||
if self.cache_file is None or not self.cache_file.exists():
|
||||
return {}
|
||||
|
||||
downloads: Dict[str, Plugin] = {}
|
||||
with open(self.cache_file) as f:
|
||||
data = json.load(f)
|
||||
for attr in data.values():
|
||||
p = Plugin(
|
||||
attr["name"], attr["commit"], attr["has_submodules"], attr["sha256"]
|
||||
)
|
||||
downloads[attr["commit"]] = p
|
||||
return downloads
|
||||
|
||||
def store(self) -> None:
|
||||
if self.cache_file is None:
|
||||
return
|
||||
|
||||
os.makedirs(self.cache_file.parent, exist_ok=True)
|
||||
with open(self.cache_file, "w+") as f:
|
||||
data = {}
|
||||
for name, attr in self.downloads.items():
|
||||
data[name] = attr.as_json()
|
||||
json.dump(data, f, indent=4, sort_keys=True)
|
||||
|
||||
def __getitem__(self, key: str) -> Optional[Plugin]:
|
||||
return self.downloads.get(key, None)
|
||||
|
||||
def __setitem__(self, key: str, value: Plugin) -> None:
|
||||
self.downloads[key] = value
|
||||
|
||||
|
||||
def prefetch(
|
||||
args: Tuple[str, str, str, Optional[str]], cache: Cache
|
||||
) -> Tuple[str, str, Union[Exception, Plugin], dict]:
|
||||
assert len(args) == 4
|
||||
owner, repo, branch, alias = args
|
||||
try:
|
||||
plugin, redirect = prefetch_plugin(owner, repo, branch, alias, cache)
|
||||
cache[plugin.commit] = plugin
|
||||
return (owner, repo, plugin, redirect)
|
||||
except Exception as e:
|
||||
return (owner, repo, e, {})
|
||||
|
||||
|
||||
def rewrite_input(
|
||||
input_file: Path,
|
||||
deprecated: Path,
|
||||
redirects: Dict[str, str] = None,
|
||||
append: Tuple = (),
|
||||
):
|
||||
with open(input_file, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
lines.extend(append)
|
||||
|
||||
if redirects:
|
||||
lines = [redirects.get(line, line) for line in lines]
|
||||
|
||||
cur_date_iso = datetime.now().strftime("%Y-%m-%d")
|
||||
with open(deprecated, "r") as f:
|
||||
deprecations = json.load(f)
|
||||
for old, new in redirects.items():
|
||||
old_plugin = fetch_plugin_from_pluginline(old)
|
||||
new_plugin = fetch_plugin_from_pluginline(new)
|
||||
if old_plugin.normalized_name != new_plugin.normalized_name:
|
||||
deprecations[old_plugin.normalized_name] = {
|
||||
"new": new_plugin.normalized_name,
|
||||
"date": cur_date_iso,
|
||||
}
|
||||
with open(deprecated, "w") as f:
|
||||
json.dump(deprecations, f, indent=4, sort_keys=True)
|
||||
|
||||
lines = sorted(lines, key=str.casefold)
|
||||
|
||||
with open(input_file, "w") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def parse_args(editor: Editor):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
f"Updates nix derivations for {editor.name} plugins"
|
||||
f"By default from {editor.default_in} to {editor.default_out}"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--add",
|
||||
dest="add_plugins",
|
||||
default=[],
|
||||
action="append",
|
||||
help=f"Plugin to add to {editor.name}Plugins from Github in the form owner/repo",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input-names",
|
||||
"-i",
|
||||
dest="input_file",
|
||||
default=editor.default_in,
|
||||
help="A list of plugins in the form owner/repo",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--out",
|
||||
"-o",
|
||||
dest="outfile",
|
||||
default=editor.default_out,
|
||||
help="Filename to save generated nix code",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--proc",
|
||||
"-p",
|
||||
dest="proc",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Number of concurrent processes to spawn.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def commit(repo: git.Repo, message: str, files: List[Path]) -> None:
|
||||
repo.index.add([str(f.resolve()) for f in files])
|
||||
|
||||
if repo.index.diff("HEAD"):
|
||||
print(f'committing to nixpkgs "{message}"')
|
||||
repo.index.commit(message)
|
||||
else:
|
||||
print("no changes in working tree to commit")
|
||||
|
||||
|
||||
def get_update(input_file: str, outfile: str, proc: int, editor: Editor):
|
||||
cache: Cache = Cache(get_current_plugins(editor), editor.cache_file)
|
||||
_prefetch = functools.partial(prefetch, cache=cache)
|
||||
|
||||
def update() -> dict:
|
||||
plugin_names = load_plugin_spec(input_file)
|
||||
|
||||
try:
|
||||
pool = Pool(processes=proc)
|
||||
results = pool.map(_prefetch, plugin_names)
|
||||
finally:
|
||||
cache.store()
|
||||
|
||||
plugins, redirects = check_results(results)
|
||||
|
||||
editor.generate_nix(plugins, outfile)
|
||||
|
||||
return redirects
|
||||
|
||||
return update
|
||||
|
||||
|
||||
def update_plugins(editor: Editor):
|
||||
"""The main entry function of this module. All input arguments are grouped in the `Editor`."""
|
||||
|
||||
args = parse_args(editor)
|
||||
nixpkgs_repo = git.Repo(editor.root, search_parent_directories=True)
|
||||
update = get_update(args.input_file, args.outfile, args.proc, editor)
|
||||
|
||||
redirects = update()
|
||||
rewrite_input(args.input_file, editor.deprecated, redirects)
|
||||
commit(nixpkgs_repo, f"{editor.name}Plugins: update", [args.outfile])
|
||||
|
||||
if redirects:
|
||||
update()
|
||||
commit(
|
||||
nixpkgs_repo,
|
||||
f"{editor.name}Plugins: resolve github repository redirects",
|
||||
[args.outfile, args.input_file, editor.deprecated],
|
||||
)
|
||||
|
||||
for plugin_line in args.add_plugins:
|
||||
rewrite_input(args.input_file, editor.deprecated, append=(plugin_line + "\n",))
|
||||
update()
|
||||
plugin = fetch_plugin_from_pluginline(plugin_line)
|
||||
commit(
|
||||
nixpkgs_repo,
|
||||
"{editor}Plugins.{name}: init at {version}".format(
|
||||
editor=editor.name, name=plugin.normalized_name, version=plugin.version
|
||||
),
|
||||
[args.outfile, args.input_file],
|
||||
)
|
@ -82,6 +82,7 @@ with lib.maintainers; {
|
||||
jtojnar
|
||||
worldofpeace
|
||||
dasj19
|
||||
maxeaubrey
|
||||
];
|
||||
scope = "Maintain GNOME desktop environment and platform.";
|
||||
};
|
||||
@ -96,6 +97,18 @@ with lib.maintainers; {
|
||||
scope = "Maintain Jitsi.";
|
||||
};
|
||||
|
||||
kodi = {
|
||||
members = [
|
||||
aanderse
|
||||
cpages
|
||||
edwtjo
|
||||
minijackson
|
||||
peterhoeg
|
||||
sephalon
|
||||
];
|
||||
scope = "Maintain Kodi and related packages.";
|
||||
};
|
||||
|
||||
matrix = {
|
||||
members = [
|
||||
ma27
|
||||
@ -134,6 +147,7 @@ with lib.maintainers; {
|
||||
timokau
|
||||
omasanori
|
||||
raskin
|
||||
collares
|
||||
];
|
||||
scope = "Maintain SageMath and the dependencies that are likely to break it.";
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
.PHONY: all
|
||||
all: manual-combined.xml format
|
||||
all: manual-combined.xml
|
||||
|
||||
.PHONY: debug
|
||||
debug: generated manual-combined.xml
|
||||
|
@ -26,7 +26,11 @@ Enter passphrase for /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d: ***
|
||||
|
||||
<prompt># </prompt>mkfs.ext4 /dev/mapper/<replaceable>crypted</replaceable>
|
||||
</screen>
|
||||
To ensure that this file system is automatically mounted at boot time as
|
||||
The LUKS volume should be automatically picked up by
|
||||
<command>nixos-generate-config</command>, but you might want to verify that your
|
||||
<filename>hardware-configuration.nix</filename> looks correct.
|
||||
|
||||
To manually ensure that the system is automatically mounted at boot time as
|
||||
<filename>/</filename>, add the following to
|
||||
<filename>configuration.nix</filename>:
|
||||
<programlisting>
|
||||
|
@ -25,7 +25,8 @@
|
||||
|
||||
<screen>
|
||||
<prompt>$ </prompt>cd /path/to/nixpkgs/nixos/doc/manual
|
||||
<prompt>$ </prompt>make
|
||||
<prompt>$ </prompt>nix-shell
|
||||
<prompt>nix-shell$ </prompt>make
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
|
@ -374,7 +374,7 @@
|
||||
You may want to look at the options starting with
|
||||
<option><link linkend="opt-boot.loader.efi.canTouchEfiVariables">boot.loader.efi</link></option>
|
||||
and
|
||||
<option><link linkend="opt-boot.loader.systemd-boot.enable">boot.loader.systemd</link></option>
|
||||
<option><link linkend="opt-boot.loader.systemd-boot.enable">boot.loader.systemd-boot</link></option>
|
||||
as well.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -720,8 +720,8 @@ environment.systemPackages = [
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Aditionally to the much stricter runtime environmet the
|
||||
<literal>/dev/urandom</literal> mount lines we previously had in the code (that would
|
||||
Additionally to the much stricter runtime environment the
|
||||
<literal>/dev/urandom</literal> mount lines we previously had in the code (that
|
||||
randomly failed during the stop-phase) have been removed as systemd will take care of those for us.
|
||||
</para>
|
||||
|
||||
@ -809,7 +809,7 @@ environment.systemPackages = [
|
||||
<listitem>
|
||||
<para>
|
||||
Platforms, like <varname>stdenv.hostPlatform</varname>, no longer have a <varname>platform</varname> attribute.
|
||||
It has been (mostly) flattoned away:
|
||||
It has been (mostly) flattened away:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem><para><varname>platform.gcc</varname> is now <varname>gcc</varname></para></listitem>
|
||||
|
@ -17,7 +17,19 @@ nix-build '<nixpkgs/nixos/lib/eval-config.nix>' \
|
||||
img_path=$(echo gce/*.tar.gz)
|
||||
img_name=${IMAGE_NAME:-$(basename "$img_path")}
|
||||
img_id=$(echo "$img_name" | sed 's|.raw.tar.gz$||;s|\.|-|g;s|_|-|g')
|
||||
img_family=$(echo "$img_id" | cut -d - -f1-4)
|
||||
|
||||
if ! gsutil ls "gs://${BUCKET_NAME}/$img_name"; then
|
||||
gsutil cp "$img_path" "gs://${BUCKET_NAME}/$img_name"
|
||||
gsutil acl ch -u AllUsers:R "gs://${BUCKET_NAME}/$img_name"
|
||||
|
||||
gcloud compute images create \
|
||||
"$img_id" \
|
||||
--source-uri "gs://${BUCKET_NAME}/$img_name" \
|
||||
--family="$img_family"
|
||||
|
||||
gcloud compute images add-iam-policy-binding \
|
||||
"$img_id" \
|
||||
--member='allAuthenticatedUsers' \
|
||||
--role='roles/compute.imageUser'
|
||||
fi
|
||||
|
@ -288,7 +288,7 @@ foreach my $u (values %usersOut) {
|
||||
push @shadowNew, join(":", $u->{name}, $hashedPassword, "1::::::") . "\n";
|
||||
}
|
||||
|
||||
updateFile("/etc/shadow", \@shadowNew, 0600);
|
||||
updateFile("/etc/shadow", \@shadowNew, 0640);
|
||||
{
|
||||
my $uid = getpwnam "root";
|
||||
my $gid = getgrnam "shadow";
|
||||
|
@ -98,7 +98,7 @@ in
|
||||
|
||||
See "Multiple-output packages" chapter in the nixpkgs manual for more info.
|
||||
'';
|
||||
# which is at ../../../doc/multiple-output.xml
|
||||
# which is at ../../../doc/multiple-output.chapter.md
|
||||
};
|
||||
|
||||
man.enable = mkOption {
|
||||
|
@ -47,9 +47,9 @@ in
|
||||
doc = mkOption {
|
||||
type = docFile;
|
||||
internal = true;
|
||||
example = "./meta.xml";
|
||||
example = "./meta.chapter.xml";
|
||||
description = ''
|
||||
Documentation prologe for the set of options of each module. This
|
||||
Documentation prologue for the set of options of each module. This
|
||||
option should be defined at most once per module.
|
||||
'';
|
||||
};
|
||||
|
@ -155,6 +155,7 @@
|
||||
./programs/nm-applet.nix
|
||||
./programs/npm.nix
|
||||
./programs/oblogout.nix
|
||||
./programs/partition-manager.nix
|
||||
./programs/plotinus.nix
|
||||
./programs/proxychains.nix
|
||||
./programs/qt5ct.nix
|
||||
@ -177,6 +178,7 @@
|
||||
./programs/tmux.nix
|
||||
./programs/traceroute.nix
|
||||
./programs/tsm-client.nix
|
||||
./programs/turbovnc.nix
|
||||
./programs/udevil.nix
|
||||
./programs/usbtop.nix
|
||||
./programs/vim.nix
|
||||
@ -680,6 +682,7 @@
|
||||
./services/networking/i2p.nix
|
||||
./services/networking/icecream/scheduler.nix
|
||||
./services/networking/icecream/daemon.nix
|
||||
./services/networking/inspircd.nix
|
||||
./services/networking/iodine.nix
|
||||
./services/networking/iperf3.nix
|
||||
./services/networking/ircd-hybrid/default.nix
|
||||
@ -882,6 +885,7 @@
|
||||
./services/web-apps/atlassian/confluence.nix
|
||||
./services/web-apps/atlassian/crowd.nix
|
||||
./services/web-apps/atlassian/jira.nix
|
||||
./services/web-apps/bookstack.nix
|
||||
./services/web-apps/convos.nix
|
||||
./services/web-apps/cryptpad.nix
|
||||
./services/web-apps/documize.nix
|
||||
|
@ -40,7 +40,7 @@ in
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = literalExample "$${pkgs.my-configs}/lesskey";
|
||||
example = literalExample "\${pkgs.my-configs}/lesskey";
|
||||
description = ''
|
||||
Path to lesskey configuration file.
|
||||
|
||||
|
19
nixos/modules/programs/partition-manager.nix
Normal file
19
nixos/modules/programs/partition-manager.nix
Normal file
@ -0,0 +1,19 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
meta.maintainers = [ maintainers.oxalica ];
|
||||
|
||||
###### interface
|
||||
options = {
|
||||
programs.partition-manager.enable = mkEnableOption "KDE Partition Manager";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf config.programs.partition-manager.enable {
|
||||
services.dbus.packages = [ pkgs.libsForQt5.kpmcore ];
|
||||
# `kpmcore` need to be installed to pull in polkit actions.
|
||||
environment.systemPackages = [ pkgs.libsForQt5.kpmcore pkgs.partition-manager ];
|
||||
};
|
||||
}
|
@ -90,7 +90,7 @@ in {
|
||||
rxvt-unicode # For backward compatibility (old default terminal)
|
||||
];
|
||||
defaultText = literalExample ''
|
||||
with pkgs; [ swaylock swayidle xwayland rxvt-unicode dmenu ];
|
||||
with pkgs; [ swaylock swayidle rxvt-unicode alacritty dmenu ];
|
||||
'';
|
||||
example = literalExample ''
|
||||
with pkgs; [
|
||||
|
54
nixos/modules/programs/turbovnc.nix
Normal file
54
nixos/modules/programs/turbovnc.nix
Normal file
@ -0,0 +1,54 @@
|
||||
# Global configuration for the SSH client.
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.programs.turbovnc;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
|
||||
programs.turbovnc = {
|
||||
|
||||
ensureHeadlessSoftwareOpenGL = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to set up NixOS such that TurboVNC's built-in software OpenGL
|
||||
implementation works.
|
||||
|
||||
This will enable <option>hardware.opengl.enable</option> so that OpenGL
|
||||
programs can find Mesa's llvmpipe drivers.
|
||||
|
||||
Setting this option to <code>false</code> does not mean that software
|
||||
OpenGL won't work; it may still work depending on your system
|
||||
configuration.
|
||||
|
||||
This option is also intended to generate warnings if you are using some
|
||||
configuration that's incompatible with using headless software OpenGL
|
||||
in TurboVNC.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.ensureHeadlessSoftwareOpenGL {
|
||||
|
||||
# TurboVNC has builtin support for Mesa llvmpipe's `swrast`
|
||||
# software rendering to implemnt GLX (OpenGL on Xorg).
|
||||
# However, just building TurboVNC with support for that is not enough
|
||||
# (it only takes care of the X server side part of OpenGL);
|
||||
# the indiviudual applications (e.g. `glxgears`) also need to directly load
|
||||
# the OpenGL libs.
|
||||
# Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
|
||||
# can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.
|
||||
# This comment exists to explain why `hardware.` is involved,
|
||||
# even though 100% software rendering is used.
|
||||
hardware.opengl.enable = true;
|
||||
|
||||
};
|
||||
}
|
@ -24,7 +24,7 @@ let
|
||||
Type = "oneshot";
|
||||
User = "acme";
|
||||
Group = mkDefault "acme";
|
||||
UMask = 0023;
|
||||
UMask = 0022;
|
||||
StateDirectoryMode = 750;
|
||||
ProtectSystem = "full";
|
||||
PrivateTmp = true;
|
||||
@ -235,7 +235,7 @@ let
|
||||
# https://github.com/NixOS/nixpkgs/pull/81371#issuecomment-605526099
|
||||
wantedBy = optionals (!config.boot.isContainer) [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [ lego coreutils diffutils ];
|
||||
path = with pkgs; [ lego coreutils diffutils openssl ];
|
||||
|
||||
serviceConfig = commonServiceConfig // {
|
||||
Group = data.group;
|
||||
@ -274,10 +274,44 @@ let
|
||||
script = ''
|
||||
set -euxo pipefail
|
||||
|
||||
# This reimplements the expiration date check, but without querying
|
||||
# the acme server first. By doing this offline, we avoid errors
|
||||
# when the network or DNS are unavailable, which can happen during
|
||||
# nixos-rebuild switch.
|
||||
is_expiration_skippable() {
|
||||
pem=$1
|
||||
|
||||
# This function relies on set -e to exit early if any of the
|
||||
# conditions or programs fail.
|
||||
|
||||
[[ -e $pem ]]
|
||||
|
||||
expiration_line="$(
|
||||
set -euxo pipefail
|
||||
openssl x509 -noout -enddate <$pem \
|
||||
| grep notAfter \
|
||||
| sed -e 's/^notAfter=//'
|
||||
)"
|
||||
[[ -n "$expiration_line" ]]
|
||||
|
||||
expiration_date="$(date -d "$expiration_line" +%s)"
|
||||
now="$(date +%s)"
|
||||
expiration_s=$[expiration_date - now]
|
||||
expiration_days=$[expiration_s / (3600 * 24)] # rounds down
|
||||
|
||||
[[ $expiration_days -gt ${toString cfg.validMinDays} ]]
|
||||
}
|
||||
|
||||
${optionalString (data.webroot != null) ''
|
||||
# Ensure the webroot exists
|
||||
mkdir -p '${data.webroot}/.well-known/acme-challenge'
|
||||
chown 'acme:${data.group}' ${data.webroot}/{.well-known,.well-known/acme-challenge}
|
||||
# Ensure the webroot exists. Fixing group is required in case configuration was changed between runs.
|
||||
# Lego will fail if the webroot does not exist at all.
|
||||
(
|
||||
mkdir -p '${data.webroot}/.well-known/acme-challenge' \
|
||||
&& chgrp '${data.group}' ${data.webroot}/.well-known/acme-challenge
|
||||
) || (
|
||||
echo 'Please ensure ${data.webroot}/.well-known/acme-challenge exists and is writable by acme:${data.group}' \
|
||||
&& exit 1
|
||||
)
|
||||
''}
|
||||
|
||||
echo '${domainHash}' > domainhash.txt
|
||||
@ -288,8 +322,14 @@ let
|
||||
# When domains are updated, there's no need to do a full
|
||||
# Lego run, but it's likely renew won't work if days is too low.
|
||||
if [ -e certificates/domainhash.txt ] && cmp -s domainhash.txt certificates/domainhash.txt; then
|
||||
lego ${renewOpts} --days ${toString cfg.validMinDays}
|
||||
if is_expiration_skippable out/full.pem; then
|
||||
echo 1>&2 "nixos-acme: skipping renewal because expiration isn't within the coming ${toString cfg.validMinDays} days"
|
||||
else
|
||||
echo 1>&2 "nixos-acme: renewing now, because certificate expires within the configured ${toString cfg.validMinDays} days"
|
||||
lego ${renewOpts} --days ${toString cfg.validMinDays}
|
||||
fi
|
||||
else
|
||||
echo 1>&2 "certificate domain(s) have changed; will renew now"
|
||||
# Any number > 90 works, but this one is over 9000 ;-)
|
||||
lego ${renewOpts} --days 9001
|
||||
fi
|
||||
|
@ -93,10 +93,12 @@ in
|
||||
};
|
||||
|
||||
paths = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
default = null;
|
||||
description = ''
|
||||
Which paths to backup.
|
||||
Which paths to backup. If null or an empty array, no
|
||||
backup command will be run. This can be used to create a
|
||||
prune-only job.
|
||||
'';
|
||||
example = [
|
||||
"/var/lib/postgresql"
|
||||
@ -217,7 +219,7 @@ in
|
||||
resticCmd = "${pkgs.restic}/bin/restic${extraOptions}";
|
||||
filesFromTmpFile = "/run/restic-backups-${name}/includes";
|
||||
backupPaths = if (backup.dynamicFilesFrom == null)
|
||||
then concatStringsSep " " backup.paths
|
||||
then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
|
||||
else "--files-from ${filesFromTmpFile}";
|
||||
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
|
||||
( resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts) )
|
||||
@ -243,7 +245,8 @@ in
|
||||
restartIfChanged = false;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ] ++ pruneCmd;
|
||||
ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ])
|
||||
++ pruneCmd;
|
||||
User = backup.user;
|
||||
RuntimeDirectory = "restic-backups-${name}";
|
||||
CacheDirectory = "restic-backups-${name}";
|
||||
|
@ -276,9 +276,9 @@ in
|
||||
${concatMapStrings (img: ''
|
||||
echo "Seeding container image: ${img}"
|
||||
${if (lib.hasSuffix "gz" img) then
|
||||
''${pkgs.gzip}/bin/zcat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
|
||||
''${pkgs.gzip}/bin/zcat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import --all-platforms -''
|
||||
else
|
||||
''${pkgs.coreutils}/bin/cat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import -''
|
||||
''${pkgs.coreutils}/bin/cat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import --all-platforms -''
|
||||
}
|
||||
'') cfg.seedDockerImages}
|
||||
|
||||
|
@ -274,6 +274,15 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
etcSlurm = mkOption {
|
||||
type = types.path;
|
||||
internal = true;
|
||||
default = etcSlurm;
|
||||
description = ''
|
||||
Path to directory with slurm config files. This option is set by default from the
|
||||
Slurm module and is meant to make the Slurm config file available to other modules.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
@ -308,7 +317,7 @@ in
|
||||
#!/bin/sh
|
||||
if [ -z "$SLURM_CONF" ]
|
||||
then
|
||||
SLURM_CONF="${etcSlurm}/slurm.conf" "$EXE" "\$@"
|
||||
SLURM_CONF="${cfg.etcSlurm}/slurm.conf" "$EXE" "\$@"
|
||||
else
|
||||
"$EXE" "\$0"
|
||||
fi
|
||||
|
6
nixos/modules/services/desktops/pipewire/README.md
Normal file
6
nixos/modules/services/desktops/pipewire/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
# Updating
|
||||
|
||||
1. Update the version & hash in pkgs/development/libraries/pipewire/default.nix
|
||||
2. run `nix build -f /path/to/nixpkgs/checkout pipewire pipewire.mediaSession`
|
||||
3. copy all JSON files from result/etc/pipewire and result-mediaSession/etc/pipewire/media-session.d to this directory
|
||||
4. add new files to the module config and passthru tests
|
@ -6,21 +6,34 @@
|
||||
"audio.convert.*": "audioconvert/libspa-audioconvert",
|
||||
"support.*": "support/libspa-support"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-rtkit": {
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rtkit",
|
||||
"args": {},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-client-device": null,
|
||||
"libpipewire-module-adapter": null,
|
||||
"libpipewire-module-metadata": null,
|
||||
"libpipewire-module-session-manager": null
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-device"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-session-manager"
|
||||
}
|
||||
],
|
||||
"filter.properties": {},
|
||||
"stream.properties": {}
|
||||
}
|
||||
|
@ -6,14 +6,26 @@
|
||||
"audio.convert.*": "audioconvert/libspa-audioconvert",
|
||||
"support.*": "support/libspa-support"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-client-device": null,
|
||||
"libpipewire-module-adapter": null,
|
||||
"libpipewire-module-metadata": null,
|
||||
"libpipewire-module-session-manager": null
|
||||
},
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-device"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-session-manager"
|
||||
}
|
||||
],
|
||||
"filter.properties": {},
|
||||
"stream.properties": {}
|
||||
}
|
||||
|
@ -5,17 +5,24 @@
|
||||
"context.spa-libs": {
|
||||
"support.*": "support/libspa-support"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-rtkit": {
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rtkit",
|
||||
"args": {},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-metadata": null
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
}
|
||||
],
|
||||
"jack.properties": {}
|
||||
}
|
||||
|
@ -6,21 +6,34 @@
|
||||
"api.v4l2.*": "v4l2/libspa-v4l2",
|
||||
"api.libcamera.*": "libcamera/libspa-libcamera"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-rtkit": {
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rtkit",
|
||||
"args": {},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-client-device": null,
|
||||
"libpipewire-module-adapter": null,
|
||||
"libpipewire-module-metadata": null,
|
||||
"libpipewire-module-session-manager": null
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-device"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-session-manager"
|
||||
}
|
||||
],
|
||||
"session.modules": {
|
||||
"default": [
|
||||
"flatpak",
|
||||
|
@ -9,21 +9,12 @@ let
|
||||
&& pkgs.stdenv.isx86_64
|
||||
&& pkgs.pkgsi686Linux.pipewire != null;
|
||||
|
||||
prioritizeNativeProtocol = {
|
||||
"context.modules" = {
|
||||
"libpipewire-module-protocol-native" = {
|
||||
_priority = -100;
|
||||
_content = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Use upstream config files passed through spa-json-dump as the base
|
||||
# Patched here as necessary for them to work with this module
|
||||
defaults = {
|
||||
alsa-monitor = (builtins.fromJSON (builtins.readFile ./alsa-monitor.conf.json));
|
||||
bluez-monitor = (builtins.fromJSON (builtins.readFile ./bluez-monitor.conf.json));
|
||||
media-session = recursiveUpdate (builtins.fromJSON (builtins.readFile ./media-session.conf.json)) prioritizeNativeProtocol;
|
||||
media-session = (builtins.fromJSON (builtins.readFile ./media-session.conf.json));
|
||||
v4l2-monitor = (builtins.fromJSON (builtins.readFile ./v4l2-monitor.conf.json));
|
||||
};
|
||||
# Helpers for generating the pipewire JSON config file
|
||||
|
@ -4,25 +4,35 @@
|
||||
"audio.convert.*": "audioconvert/libspa-audioconvert",
|
||||
"support.*": "support/libspa-support"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-rtkit": {
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rtkit",
|
||||
"args": {},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-adapter": null,
|
||||
"libpipewire-module-metadata": null,
|
||||
"libpipewire-module-protocol-pulse": {
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-protocol-pulse",
|
||||
"args": {
|
||||
"server.address": [
|
||||
"unix:native"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
"stream.properties": {}
|
||||
}
|
||||
|
@ -14,42 +14,66 @@
|
||||
"api.jack.*": "jack/libspa-jack",
|
||||
"support.*": "support/libspa-support"
|
||||
},
|
||||
"context.modules": {
|
||||
"libpipewire-module-rtkit": {
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rtkit",
|
||||
"args": {},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-protocol-native": null,
|
||||
"libpipewire-module-profiler": null,
|
||||
"libpipewire-module-metadata": null,
|
||||
"libpipewire-module-spa-device-factory": null,
|
||||
"libpipewire-module-spa-node-factory": null,
|
||||
"libpipewire-module-client-node": null,
|
||||
"libpipewire-module-client-device": null,
|
||||
"libpipewire-module-portal": {
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-profiler"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-metadata"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-spa-device-factory"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-spa-node-factory"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-device"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-portal",
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
"libpipewire-module-access": {
|
||||
{
|
||||
"name": "libpipewire-module-access",
|
||||
"args": {}
|
||||
},
|
||||
"libpipewire-module-adapter": null,
|
||||
"libpipewire-module-link-factory": null,
|
||||
"libpipewire-module-session-manager": null
|
||||
},
|
||||
"context.objects": {
|
||||
"spa-node-factory": {
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-link-factory"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-session-manager"
|
||||
}
|
||||
],
|
||||
"context.objects": [
|
||||
{
|
||||
"factory": "spa-node-factory",
|
||||
"args": {
|
||||
"factory.name": "support.node.driver",
|
||||
"node.name": "Dummy-Driver",
|
||||
"priority.driver": 8000
|
||||
}
|
||||
}
|
||||
},
|
||||
"context.exec": {}
|
||||
],
|
||||
"context.exec": []
|
||||
}
|
||||
|
@ -18,45 +18,15 @@ let
|
||||
ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire"
|
||||
'';
|
||||
|
||||
prioritizeNativeProtocol = {
|
||||
"context.modules" = {
|
||||
# Most other modules depend on this, so put it first
|
||||
"libpipewire-module-protocol-native" = {
|
||||
_priority = -100;
|
||||
_content = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
fixDaemonModulePriorities = {
|
||||
"context.modules" = {
|
||||
# Most other modules depend on thism so put it first
|
||||
"libpipewire-module-protocol-native" = {
|
||||
_priority = -100;
|
||||
_content = null;
|
||||
};
|
||||
# Needs to be before libpipewire-module-access
|
||||
"libpipewire-module-portal" = {
|
||||
_priority = -50;
|
||||
_content = {
|
||||
flags = [
|
||||
"ifexists"
|
||||
"nofail"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Use upstream config files passed through spa-json-dump as the base
|
||||
# Patched here as necessary for them to work with this module
|
||||
defaults = {
|
||||
client = recursiveUpdate (builtins.fromJSON (builtins.readFile ./client.conf.json)) prioritizeNativeProtocol;
|
||||
client-rt = recursiveUpdate (builtins.fromJSON (builtins.readFile ./client-rt.conf.json)) prioritizeNativeProtocol;
|
||||
jack = recursiveUpdate (builtins.fromJSON (builtins.readFile ./jack.conf.json)) prioritizeNativeProtocol;
|
||||
client = builtins.fromJSON (builtins.readFile ./client.conf.json);
|
||||
client-rt = builtins.fromJSON (builtins.readFile ./client-rt.conf.json);
|
||||
jack = builtins.fromJSON (builtins.readFile ./jack.conf.json);
|
||||
# Remove session manager invocation from the upstream generated file, it points to the wrong path
|
||||
pipewire = recursiveUpdate (builtins.fromJSON (builtins.readFile ./pipewire.conf.json)) fixDaemonModulePriorities;
|
||||
pipewire-pulse = recursiveUpdate (builtins.fromJSON (builtins.readFile ./pipewire-pulse.conf.json)) prioritizeNativeProtocol;
|
||||
pipewire = builtins.fromJSON (builtins.readFile ./pipewire.conf.json);
|
||||
pipewire-pulse = builtins.fromJSON (builtins.readFile ./pipewire-pulse.conf.json);
|
||||
};
|
||||
|
||||
# Helpers for generating the pipewire JSON config file
|
||||
|
@ -36,6 +36,7 @@ let
|
||||
only_admins_can_pause_the_game = true;
|
||||
autosave_only_on_server = true;
|
||||
admins = [];
|
||||
non_blocking_saving = cfg.nonBlockingSaving;
|
||||
} // cfg.extraSettings;
|
||||
serverSettingsFile = pkgs.writeText "server-settings.json" (builtins.toJSON (filterAttrsRecursive (n: v: v != null) serverSettings));
|
||||
modDir = pkgs.factorio-utils.mkModDirDrv cfg.mods;
|
||||
@ -193,6 +194,15 @@ in
|
||||
Autosave interval in minutes.
|
||||
'';
|
||||
};
|
||||
nonBlockingSaving = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Highly experimental feature, enable only at your own risk of losing your saves.
|
||||
On UNIX systems, server will fork itself to create an autosave.
|
||||
Autosaving on connected Windows clients will be disabled regardless of autosave_only_on_server option.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -4,9 +4,7 @@ with lib;
|
||||
|
||||
let
|
||||
|
||||
pkg = if config.hardware.sane.snapshot
|
||||
then pkgs.sane-backends-git
|
||||
else pkgs.sane-backends;
|
||||
pkg = pkgs.sane-backends;
|
||||
|
||||
sanedConf = pkgs.writeTextFile {
|
||||
name = "saned.conf";
|
||||
|
@ -196,6 +196,7 @@ let
|
||||
domain: "${cfg.smtp.domain}",
|
||||
${optionalString (cfg.smtp.authentication != null) "authentication: :${cfg.smtp.authentication},"}
|
||||
enable_starttls_auto: ${boolToString cfg.smtp.enableStartTLSAuto},
|
||||
tls: ${boolToString cfg.smtp.tls},
|
||||
ca_file: "/etc/ssl/certs/ca-certificates.crt",
|
||||
openssl_verify_mode: '${cfg.smtp.opensslVerifyMode}'
|
||||
}
|
||||
@ -463,6 +464,12 @@ in {
|
||||
description = "Whether to try to use StartTLS.";
|
||||
};
|
||||
|
||||
tls = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to use TLS wrapper-mode.";
|
||||
};
|
||||
|
||||
opensslVerifyMode = mkOption {
|
||||
type = types.str;
|
||||
default = "peer";
|
||||
|
@ -28,7 +28,7 @@ let
|
||||
unpack = id: (name: source:
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "redmine-${id}-${name}";
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cd $out
|
||||
|
@ -30,12 +30,49 @@ in
|
||||
Whether to run the exporter as the local 'postgres' super user.
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO perhaps LoadCredential would be more appropriate
|
||||
environmentFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/root/prometheus-postgres-exporter.env";
|
||||
description = ''
|
||||
Environment file as defined in <citerefentry>
|
||||
<refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
|
||||
</citerefentry>.
|
||||
|
||||
Secrets may be passed to the service without adding them to the
|
||||
world-readable Nix store, by specifying placeholder variables as
|
||||
the option value in Nix and setting these variables accordingly in the
|
||||
environment file.
|
||||
|
||||
Environment variables from this file will be interpolated into the
|
||||
config file using envsubst with this syntax:
|
||||
<literal>$ENVIRONMENT ''${VARIABLE}</literal>
|
||||
|
||||
The main use is to set the DATA_SOURCE_NAME that contains the
|
||||
postgres password
|
||||
|
||||
note that contents from this file will override dataSourceName
|
||||
if you have set it from nix.
|
||||
|
||||
<programlisting>
|
||||
# Content of the environment file
|
||||
DATA_SOURCE_NAME=postgresql://username:password@localhost:5432/postgres?sslmode=disable
|
||||
</programlisting>
|
||||
|
||||
Note that this file needs to be available on the host on which
|
||||
this exporter is running.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
serviceOpts = {
|
||||
environment.DATA_SOURCE_NAME = cfg.dataSourceName;
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
User = mkIf cfg.runAsLocalSuperUser (mkForce "postgres");
|
||||
EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
|
@ -1,7 +1,7 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mkEnableOption mkIf mkOption types;
|
||||
inherit (lib) mkEnableOption mkIf mkOption optionalString types;
|
||||
|
||||
generic = variant:
|
||||
let
|
||||
@ -26,6 +26,14 @@ let
|
||||
<link xlink:href='http://bird.network.cz/'/>
|
||||
'';
|
||||
};
|
||||
checkConfig = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether the config should be checked at build time.
|
||||
Disabling this might become necessary if the config includes files not present during build time.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -36,7 +44,7 @@ let
|
||||
environment.etc."bird/${variant}.conf".source = pkgs.writeTextFile {
|
||||
name = "${variant}.conf";
|
||||
text = cfg.config;
|
||||
checkPhase = ''
|
||||
checkPhase = optionalString cfg.checkConfig ''
|
||||
${pkg}/bin/${birdBin} -d -p -c $out
|
||||
'';
|
||||
};
|
||||
@ -50,7 +58,7 @@ let
|
||||
Type = "forking";
|
||||
Restart = "on-failure";
|
||||
ExecStart = "${pkg}/bin/${birdBin} -c /etc/bird/${variant}.conf -u ${variant} -g ${variant}";
|
||||
ExecReload = "${pkg}/bin/${birdc} configure";
|
||||
ExecReload = "/bin/sh -c '${pkg}/bin/${birdBin} -c /etc/bird/${variant}.conf -p && ${pkg}/bin/${birdc} configure'";
|
||||
ExecStop = "${pkg}/bin/${birdc} down";
|
||||
CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_SETUID" "CAP_SETGID"
|
||||
# see bird/sysdep/linux/syspriv.h
|
||||
|
@ -245,7 +245,7 @@ in
|
||||
fi
|
||||
|
||||
if [ -z "$CJDNS_ADMIN_PASSWORD" ]; then
|
||||
echo "CJDNS_ADMIN_PASSWORD=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 96)" \
|
||||
echo "CJDNS_ADMIN_PASSWORD=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)" \
|
||||
>> /etc/cjdns.keys
|
||||
fi
|
||||
'';
|
||||
|
62
nixos/modules/services/networking/inspircd.nix
Normal file
62
nixos/modules/services/networking/inspircd.nix
Normal file
@ -0,0 +1,62 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.inspircd;
|
||||
|
||||
configFile = pkgs.writeText "inspircd.conf" cfg.config;
|
||||
|
||||
in {
|
||||
meta = {
|
||||
maintainers = [ lib.maintainers.sternenseemann ];
|
||||
};
|
||||
|
||||
options = {
|
||||
services.inspircd = {
|
||||
enable = lib.mkEnableOption "InspIRCd";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.inspircd;
|
||||
defaultText = lib.literalExample "pkgs.inspircd";
|
||||
example = lib.literalExample "pkgs.inspircdMinimal";
|
||||
description = ''
|
||||
The InspIRCd package to use. This is mainly useful
|
||||
to specify an overridden version of the
|
||||
<literal>pkgs.inspircd</literal> dervivation, for
|
||||
example if you want to use a more minimal InspIRCd
|
||||
distribution with less modules enabled or with
|
||||
modules enabled which can't be distributed in binary
|
||||
form due to licensing issues.
|
||||
'';
|
||||
};
|
||||
|
||||
config = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = ''
|
||||
Verbatim <literal>inspircd.conf</literal> file.
|
||||
For a list of options, consult the
|
||||
<link xlink:href="https://docs.inspircd.org/3/configuration/">InspIRCd documentation</link>, the
|
||||
<link xlink:href="https://docs.inspircd.org/3/modules/">Module documentation</link>
|
||||
and the example configuration files distributed
|
||||
with <literal>pkgs.inspircd.doc</literal>
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.inspircd = {
|
||||
description = "InspIRCd - the stable, high-performance and modular Internet Relay Chat Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "network.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = ''
|
||||
${lib.getBin cfg.package}/bin/inspircd start --config ${configFile} --nofork --nopid
|
||||
'';
|
||||
DynamicUser = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -238,7 +238,7 @@ let
|
||||
wantedBy = [ "wireguard-${name}.service" ];
|
||||
requiredBy = [ "wireguard-${name}.service" ];
|
||||
before = [ "wireguard-${name}.service" ];
|
||||
path = with pkgs; [ wireguard ];
|
||||
path = with pkgs; [ wireguard-tools ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
|
@ -104,7 +104,7 @@ let
|
||||
ignoreCollisions = true;
|
||||
};
|
||||
|
||||
filterGutenprint = pkgs: filter (pkg: pkg.meta.isGutenprint or false == true) pkgs;
|
||||
filterGutenprint = filter (pkg: pkg.meta.isGutenprint or false == true);
|
||||
containsGutenprint = pkgs: length (filterGutenprint pkgs) > 0;
|
||||
getGutenprint = pkgs: head (filterGutenprint pkgs);
|
||||
|
||||
|
@ -29,15 +29,14 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
# We use the 'out' output, since localtime has its 'bin' output
|
||||
# first, so that is what we get if we use the derivation bare.
|
||||
# Install the polkit rules.
|
||||
environment.systemPackages = [ pkgs.localtime.out ];
|
||||
environment.systemPackages = [ pkgs.localtime ];
|
||||
# Install the systemd unit.
|
||||
systemd.packages = [ pkgs.localtime.out ];
|
||||
systemd.packages = [ pkgs.localtime ];
|
||||
|
||||
users.users.localtimed = {
|
||||
description = "Taskserver user";
|
||||
description = "localtime daemon";
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
systemd.services.localtime = {
|
||||
|
365
nixos/modules/services/web-apps/bookstack.nix
Normal file
365
nixos/modules/services/web-apps/bookstack.nix
Normal file
@ -0,0 +1,365 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.bookstack;
|
||||
bookstack = pkgs.bookstack.override {
|
||||
dataDir = cfg.dataDir;
|
||||
};
|
||||
db = cfg.database;
|
||||
mail = cfg.mail;
|
||||
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
|
||||
# shell script for local administration
|
||||
artisan = pkgs.writeScriptBin "bookstack" ''
|
||||
#! ${pkgs.runtimeShell}
|
||||
cd ${bookstack}
|
||||
sudo=exec
|
||||
if [[ "$USER" != ${user} ]]; then
|
||||
sudo='exec /run/wrappers/bin/sudo -u ${user}'
|
||||
fi
|
||||
$sudo ${pkgs.php}/bin/php artisan $*
|
||||
'';
|
||||
|
||||
|
||||
in {
|
||||
options.services.bookstack = {
|
||||
|
||||
enable = mkEnableOption "BookStack";
|
||||
|
||||
user = mkOption {
|
||||
default = "bookstack";
|
||||
description = "User bookstack runs as.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
default = "bookstack";
|
||||
description = "Group bookstack runs as.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
appKeyFile = mkOption {
|
||||
description = ''
|
||||
A file containing the AppKey.
|
||||
Used for encryption where needed. Can be generated with <code>head -c 32 /dev/urandom| base64</code> and must be prefixed with <literal>base64:</literal>.
|
||||
'';
|
||||
example = "/run/keys/bookstack-appkey";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
appURL = mkOption {
|
||||
description = ''
|
||||
The root URL that you want to host BookStack on. All URLs in BookStack will be generated using this value.
|
||||
If you change this in the future you may need to run a command to update stored URLs in the database. Command example: <code>php artisan bookstack:update-url https://old.example.com https://new.example.com</code>
|
||||
'';
|
||||
example = "https://example.com";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
cacheDir = mkOption {
|
||||
description = "BookStack cache directory";
|
||||
default = "/var/cache/bookstack";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
description = "BookStack data directory";
|
||||
default = "/var/lib/bookstack";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
database = {
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = "Database host address.";
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 3306;
|
||||
description = "Database host port.";
|
||||
};
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "bookstack";
|
||||
description = "Database name.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = user;
|
||||
defaultText = "\${user}";
|
||||
description = "Database username.";
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/run/keys/bookstack-dbpassword";
|
||||
description = ''
|
||||
A file containing the password corresponding to
|
||||
<option>database.user</option>.
|
||||
'';
|
||||
};
|
||||
createLocally = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Create the database and database user locally.";
|
||||
};
|
||||
};
|
||||
|
||||
mail = {
|
||||
driver = mkOption {
|
||||
type = types.enum [ "smtp" "sendmail" ];
|
||||
default = "smtp";
|
||||
description = "Mail driver to use.";
|
||||
};
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = "Mail host address.";
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 1025;
|
||||
description = "Mail host port.";
|
||||
};
|
||||
fromName = mkOption {
|
||||
type = types.str;
|
||||
default = "BookStack";
|
||||
description = "Mail \"from\" name.";
|
||||
};
|
||||
from = mkOption {
|
||||
type = types.str;
|
||||
default = "mail@bookstackapp.com";
|
||||
description = "Mail \"from\" email.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "bookstack";
|
||||
description = "Mail username.";
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/run/keys/bookstack-mailpassword";
|
||||
description = ''
|
||||
A file containing the password corresponding to
|
||||
<option>mail.user</option>.
|
||||
'';
|
||||
};
|
||||
encryption = mkOption {
|
||||
type = with types; nullOr (enum [ "tls" ]);
|
||||
default = null;
|
||||
description = "SMTP encryption mechanism to use.";
|
||||
};
|
||||
};
|
||||
|
||||
maxUploadSize = mkOption {
|
||||
type = types.str;
|
||||
default = "18M";
|
||||
example = "1G";
|
||||
description = "The maximum size for uploads (e.g. images).";
|
||||
};
|
||||
|
||||
poolConfig = mkOption {
|
||||
type = with types; attrsOf (oneOf [ str int bool ]);
|
||||
default = {
|
||||
"pm" = "dynamic";
|
||||
"pm.max_children" = 32;
|
||||
"pm.start_servers" = 2;
|
||||
"pm.min_spare_servers" = 2;
|
||||
"pm.max_spare_servers" = 4;
|
||||
"pm.max_requests" = 500;
|
||||
};
|
||||
description = ''
|
||||
Options for the bookstack PHP pool. See the documentation on <literal>php-fpm.conf</literal>
|
||||
for details on configuration directives.
|
||||
'';
|
||||
};
|
||||
|
||||
nginx = mkOption {
|
||||
type = types.submodule (
|
||||
recursiveUpdate
|
||||
(import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
|
||||
);
|
||||
default = {};
|
||||
example = {
|
||||
serverAliases = [
|
||||
"bookstack.\${config.networking.domain}"
|
||||
];
|
||||
# To enable encryption and let let's encrypt take care of certificate
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
description = ''
|
||||
With this option, you can customize the nginx virtualHost settings.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.nullOr types.lines;
|
||||
default = null;
|
||||
example = ''
|
||||
ALLOWED_IFRAME_HOSTS="https://example.com"
|
||||
WKHTMLTOPDF=/home/user/bins/wkhtmltopdf
|
||||
'';
|
||||
description = ''
|
||||
Lines to be appended verbatim to the BookStack configuration.
|
||||
Refer to <link xlink:href="https://www.bookstackapp.com/docs/"/> for details on supported values.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{ assertion = db.createLocally -> db.user == user;
|
||||
message = "services.bookstack.database.user must be set to ${user} if services.mediawiki.database.createLocally is set true.";
|
||||
}
|
||||
{ assertion = db.createLocally -> db.passwordFile == null;
|
||||
message = "services.bookstack.database.passwordFile cannot be specified if services.bookstack.database.createLocally is set to true.";
|
||||
}
|
||||
];
|
||||
|
||||
environment.systemPackages = [ artisan ];
|
||||
|
||||
services.mysql = mkIf db.createLocally {
|
||||
enable = true;
|
||||
package = mkDefault pkgs.mariadb;
|
||||
ensureDatabases = [ db.name ];
|
||||
ensureUsers = [
|
||||
{ name = db.user;
|
||||
ensurePermissions = { "${db.name}.*" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.phpfpm.pools.bookstack = {
|
||||
inherit user;
|
||||
inherit group;
|
||||
phpOptions = ''
|
||||
log_errors = on
|
||||
post_max_size = ${cfg.maxUploadSize}
|
||||
upload_max_filesize = ${cfg.maxUploadSize}
|
||||
'';
|
||||
settings = {
|
||||
"listen.mode" = "0660";
|
||||
"listen.owner" = user;
|
||||
"listen.group" = group;
|
||||
} // cfg.poolConfig;
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = mkDefault true;
|
||||
virtualHosts.bookstack = mkMerge [ cfg.nginx {
|
||||
root = mkForce "${bookstack}/public";
|
||||
extraConfig = optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "fastcgi_param HTTPS on;";
|
||||
locations = {
|
||||
"/" = {
|
||||
index = "index.php";
|
||||
extraConfig = ''try_files $uri $uri/ /index.php?$query_string;'';
|
||||
};
|
||||
"~ \.php$" = {
|
||||
extraConfig = ''
|
||||
try_files $uri $uri/ /index.php?$query_string;
|
||||
include ${pkgs.nginx}/conf/fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param REDIRECT_STATUS 200;
|
||||
fastcgi_pass unix:${config.services.phpfpm.pools."bookstack".socket};
|
||||
${optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "fastcgi_param HTTPS on;"}
|
||||
'';
|
||||
};
|
||||
"~ \.(js|css|gif|png|ico|jpg|jpeg)$" = {
|
||||
extraConfig = "expires 365d;";
|
||||
};
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
systemd.services.bookstack-setup = {
|
||||
description = "Preperation tasks for BookStack";
|
||||
before = [ "phpfpm-bookstack.service" ];
|
||||
after = optional db.createLocally "mysql.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = user;
|
||||
WorkingDirectory = "${bookstack}";
|
||||
};
|
||||
script = ''
|
||||
# create .env file
|
||||
echo "
|
||||
APP_KEY=base64:$(head -n1 ${cfg.appKeyFile})
|
||||
APP_URL=${cfg.appURL}
|
||||
DB_HOST=${db.host}
|
||||
DB_PORT=${toString db.port}
|
||||
DB_DATABASE=${db.name}
|
||||
DB_USERNAME=${db.user}
|
||||
MAIL_DRIVER=${mail.driver}
|
||||
MAIL_FROM_NAME=\"${mail.fromName}\"
|
||||
MAIL_FROM=${mail.from}
|
||||
MAIL_HOST=${mail.host}
|
||||
MAIL_PORT=${toString mail.port}
|
||||
${optionalString (mail.user != null) "MAIL_USERNAME=${mail.user};"}
|
||||
${optionalString (mail.encryption != null) "MAIL_ENCRYPTION=${mail.encryption};"}
|
||||
${optionalString (db.passwordFile != null) "DB_PASSWORD=$(head -n1 ${db.passwordFile})"}
|
||||
${optionalString (mail.passwordFile != null) "MAIL_PASSWORD=$(head -n1 ${mail.passwordFile})"}
|
||||
APP_SERVICES_CACHE=${cfg.cacheDir}/services.php
|
||||
APP_PACKAGES_CACHE=${cfg.cacheDir}/packages.php
|
||||
APP_CONFIG_CACHE=${cfg.cacheDir}/config.php
|
||||
APP_ROUTES_CACHE=${cfg.cacheDir}/routes-v7.php
|
||||
APP_EVENTS_CACHE=${cfg.cacheDir}/events.php
|
||||
${optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "SESSION_SECURE_COOKIE=true"}
|
||||
${toString cfg.extraConfig}
|
||||
" > "${cfg.dataDir}/.env"
|
||||
# set permissions
|
||||
chmod 700 "${cfg.dataDir}/.env"
|
||||
|
||||
# migrate db
|
||||
${pkgs.php}/bin/php artisan migrate --force
|
||||
|
||||
# create caches
|
||||
${pkgs.php}/bin/php artisan config:cache
|
||||
${pkgs.php}/bin/php artisan route:cache
|
||||
${pkgs.php}/bin/php artisan view:cache
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${cfg.cacheDir} 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir} 0710 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/public 0750 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/public/uploads 0750 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/app 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/fonts 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/framework 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/framework/cache 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/framework/sessions 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/framework/views 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/logs 0700 ${user} ${group} - -"
|
||||
"d ${cfg.dataDir}/storage/uploads 0700 ${user} ${group} - -"
|
||||
];
|
||||
|
||||
users = {
|
||||
users = mkIf (user == "bookstack") {
|
||||
bookstack = {
|
||||
inherit group;
|
||||
isSystemUser = true;
|
||||
};
|
||||
"${config.services.nginx.user}".extraGroups = [ group ];
|
||||
};
|
||||
groups = mkIf (group == "bookstack") {
|
||||
bookstack = {};
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ ymarkus ];
|
||||
}
|
@ -193,7 +193,7 @@ let
|
||||
};
|
||||
sourceRoot = ".";
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
@ -220,7 +220,7 @@ let
|
||||
sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
|
||||
};
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
|
@ -133,8 +133,10 @@ in
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
preStart = ''
|
||||
install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.certFile} ${cfg.dataDir}/cert.pem
|
||||
install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.keyFile} ${cfg.dataDir}/key.pem
|
||||
${optionalString (cfg.insecure != true) ''
|
||||
install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.certFile} ${cfg.dataDir}/cert.pem
|
||||
install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.keyFile} ${cfg.dataDir}/key.pem
|
||||
''}
|
||||
'';
|
||||
|
||||
serviceConfig = mkMerge [
|
||||
|
@ -43,8 +43,32 @@ let
|
||||
LogsDirectoryMode = "0750";
|
||||
# Access write directories
|
||||
UMask = "0027";
|
||||
# Capabilities
|
||||
CapabilityBoundingSet = "";
|
||||
# Security
|
||||
NoNewPrivileges = true;
|
||||
# Sandboxing
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
|
||||
RestrictNamespaces = true;
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = false;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
PrivateMounts = true;
|
||||
# System Call Filtering
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @reboot @resources @setuid @swap";
|
||||
};
|
||||
|
||||
envFile = pkgs.writeText "mastodon.env" (lib.concatMapStrings (s: s + "\n") (
|
||||
|
@ -109,7 +109,7 @@ let
|
||||
sha256 = "1rhba5h5fjlhy8p05zf0p14c9iagfh96y91r36ni0rmk6y891lyd";
|
||||
};
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
@ -136,7 +136,7 @@ let
|
||||
sha256 = "0rjwm811f4aa4q43r77zxlpklyb85q08f9c8ns2akcarrvj5ydx3";
|
||||
};
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
|
@ -804,7 +804,7 @@ in
|
||||
ProtectControlGroups = true;
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = !(builtins.any (mod: (mod.allowMemoryWriteExecute or false)) cfg.package.modules);
|
||||
MemoryDenyWriteExecute = !(builtins.any (mod: (mod.allowMemoryWriteExecute or false)) (optionals (cfg.package ? modules) cfg.package.modules));
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
PrivateMounts = true;
|
||||
|
@ -15,12 +15,15 @@ import re
|
||||
import datetime
|
||||
import glob
|
||||
import os.path
|
||||
from typing import Tuple, List, Optional
|
||||
|
||||
def copy_if_not_exists(source, dest):
|
||||
|
||||
def copy_if_not_exists(source: str, dest: str) -> None:
|
||||
if not os.path.exists(dest):
|
||||
shutil.copyfile(source, dest)
|
||||
|
||||
def system_dir(profile, generation):
|
||||
|
||||
def system_dir(profile: Optional[str], generation: int) -> str:
|
||||
if profile:
|
||||
return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation)
|
||||
else:
|
||||
@ -42,7 +45,8 @@ MEMTEST_BOOT_ENTRY = """title MemTest86
|
||||
efi /efi/memtest86/BOOTX64.efi
|
||||
"""
|
||||
|
||||
def write_loader_conf(profile, generation):
|
||||
|
||||
def write_loader_conf(profile: Optional[str], generation: int) -> None:
|
||||
with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
|
||||
if "@timeout@" != "":
|
||||
f.write("timeout @timeout@\n")
|
||||
@ -55,10 +59,12 @@ def write_loader_conf(profile, generation):
|
||||
f.write("console-mode @consoleMode@\n");
|
||||
os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
|
||||
|
||||
def profile_path(profile, generation, name):
|
||||
|
||||
def profile_path(profile: Optional[str], generation: int, name: str) -> str:
|
||||
return os.readlink("%s/%s" % (system_dir(profile, generation), name))
|
||||
|
||||
def copy_from_profile(profile, generation, name, dry_run=False):
|
||||
|
||||
def copy_from_profile(profile: Optional[str], generation: int, name: str, dry_run: bool = False) -> str:
|
||||
store_file_path = profile_path(profile, generation, name)
|
||||
suffix = os.path.basename(store_file_path)
|
||||
store_dir = os.path.basename(os.path.dirname(store_file_path))
|
||||
@ -67,7 +73,8 @@ def copy_from_profile(profile, generation, name, dry_run=False):
|
||||
copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
|
||||
return efi_file_path
|
||||
|
||||
def describe_generation(generation_dir):
|
||||
|
||||
def describe_generation(generation_dir: str) -> str:
|
||||
try:
|
||||
with open("%s/nixos-version" % generation_dir) as f:
|
||||
nixos_version = f.read()
|
||||
@ -87,7 +94,8 @@ def describe_generation(generation_dir):
|
||||
|
||||
return description
|
||||
|
||||
def write_entry(profile, generation, machine_id):
|
||||
|
||||
def write_entry(profile: Optional[str], generation: int, machine_id: str) -> None:
|
||||
kernel = copy_from_profile(profile, generation, "kernel")
|
||||
initrd = copy_from_profile(profile, generation, "initrd")
|
||||
try:
|
||||
@ -116,14 +124,16 @@ def write_entry(profile, generation, machine_id):
|
||||
f.write("machine-id %s\n" % machine_id)
|
||||
os.rename(tmp_path, entry_file)
|
||||
|
||||
def mkdir_p(path):
|
||||
|
||||
def mkdir_p(path: str) -> None:
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST or not os.path.isdir(path):
|
||||
raise
|
||||
|
||||
def get_generations(profile=None):
|
||||
|
||||
def get_generations(profile: Optional[str] = None) -> List[Tuple[Optional[str], int]]:
|
||||
gen_list = subprocess.check_output([
|
||||
"@nix@/bin/nix-env",
|
||||
"--list-generations",
|
||||
@ -137,7 +147,8 @@ def get_generations(profile=None):
|
||||
configurationLimit = @configurationLimit@
|
||||
return [ (profile, int(line.split()[0])) for line in gen_lines ][-configurationLimit:]
|
||||
|
||||
def remove_old_entries(gens):
|
||||
|
||||
def remove_old_entries(gens: List[Tuple[Optional[str], int]]) -> None:
|
||||
rex_profile = re.compile("^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
|
||||
rex_generation = re.compile("^@efiSysMountPoint@/loader/entries/nixos.*-generation-(.*)\.conf$")
|
||||
known_paths = []
|
||||
@ -150,8 +161,8 @@ def remove_old_entries(gens):
|
||||
prof = rex_profile.sub(r"\1", path)
|
||||
else:
|
||||
prof = "system"
|
||||
gen = int(rex_generation.sub(r"\1", path))
|
||||
if not (prof, gen) in gens:
|
||||
gen_number = int(rex_generation.sub(r"\1", path))
|
||||
if not (prof, gen_number) in gens:
|
||||
os.unlink(path)
|
||||
except ValueError:
|
||||
pass
|
||||
@ -159,7 +170,8 @@ def remove_old_entries(gens):
|
||||
if not path in known_paths and not os.path.isdir(path):
|
||||
os.unlink(path)
|
||||
|
||||
def get_profiles():
|
||||
|
||||
def get_profiles() -> List[str]:
|
||||
if os.path.isdir("/nix/var/nix/profiles/system-profiles/"):
|
||||
return [x
|
||||
for x in os.listdir("/nix/var/nix/profiles/system-profiles/")
|
||||
@ -167,7 +179,8 @@ def get_profiles():
|
||||
else:
|
||||
return []
|
||||
|
||||
def main():
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description='Update NixOS-related systemd-boot files')
|
||||
parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default NixOS config to boot')
|
||||
args = parser.parse_args()
|
||||
@ -182,7 +195,9 @@ def main():
|
||||
# be there on newly installed systems, so let's generate one so that
|
||||
# bootctl can find it and we can also pass it to write_entry() later.
|
||||
cmd = ["@systemd@/bin/systemd-machine-id-setup", "--print"]
|
||||
machine_id = subprocess.check_output(cmd).rstrip()
|
||||
machine_id = subprocess.run(
|
||||
cmd, text=True, check=True, stdout=subprocess.PIPE
|
||||
).stdout.rstrip()
|
||||
|
||||
if os.getenv("NIXOS_INSTALL_GRUB") == "1":
|
||||
warnings.warn("NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER", DeprecationWarning)
|
||||
@ -213,7 +228,6 @@ def main():
|
||||
print("updating systemd-boot from %s to %s" % (sdboot_version, systemd_version))
|
||||
subprocess.check_call(["@systemd@/bin/bootctl", "--path=@efiSysMountPoint@", "update"])
|
||||
|
||||
|
||||
mkdir_p("@efiSysMountPoint@/efi/nixos")
|
||||
mkdir_p("@efiSysMountPoint@/loader/entries")
|
||||
|
||||
@ -252,5 +266,6 @@ def main():
|
||||
if rc != 0:
|
||||
print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -7,7 +7,7 @@ let
|
||||
|
||||
efi = config.boot.loader.efi;
|
||||
|
||||
gummibootBuilder = pkgs.substituteAll {
|
||||
systemdBootBuilder = pkgs.substituteAll {
|
||||
src = ./systemd-boot-builder.py;
|
||||
|
||||
isExecutable = true;
|
||||
@ -30,6 +30,17 @@ let
|
||||
|
||||
memtest86 = if cfg.memtest86.enable then pkgs.memtest86-efi else "";
|
||||
};
|
||||
|
||||
checkedSystemdBootBuilder = pkgs.runCommand "systemd-boot" {
|
||||
nativeBuildInputs = [ pkgs.mypy ];
|
||||
} ''
|
||||
install -m755 ${systemdBootBuilder} $out
|
||||
mypy \
|
||||
--no-implicit-optional \
|
||||
--disallow-untyped-calls \
|
||||
--disallow-untyped-defs \
|
||||
$out
|
||||
'';
|
||||
in {
|
||||
|
||||
imports =
|
||||
@ -131,7 +142,7 @@ in {
|
||||
boot.loader.supportsInitrdSecrets = true;
|
||||
|
||||
system = {
|
||||
build.installBootLoader = gummibootBuilder;
|
||||
build.installBootLoader = checkedSystemdBootBuilder;
|
||||
|
||||
boot.loader.id = "systemd-boot";
|
||||
|
||||
|
@ -167,6 +167,7 @@ exec {logOutFd}>&- {logErrFd}>&-
|
||||
|
||||
# Start systemd.
|
||||
echo "starting systemd..."
|
||||
|
||||
PATH=/run/current-system/systemd/lib/systemd:@fsPackagesPath@ \
|
||||
LOCALE_ARCHIVE=/run/current-system/sw/lib/locale/locale-archive \
|
||||
LOCALE_ARCHIVE=/run/current-system/sw/lib/locale/locale-archive @systemdUnitPathEnvVar@ \
|
||||
exec @systemdExecutable@
|
||||
|
@ -10,7 +10,7 @@ let
|
||||
src = ./stage-2-init.sh;
|
||||
shellDebug = "${pkgs.bashInteractive}/bin/bash";
|
||||
shell = "${pkgs.bash}/bin/bash";
|
||||
inherit (config.boot) systemdExecutable;
|
||||
inherit (config.boot) systemdExecutable extraSystemdUnitPaths;
|
||||
isExecutable = true;
|
||||
inherit (config.nix) readOnlyStore;
|
||||
inherit useHostResolvConf;
|
||||
@ -20,6 +20,10 @@ let
|
||||
pkgs.util-linux
|
||||
] ++ lib.optional useHostResolvConf pkgs.openresolv);
|
||||
fsPackagesPath = lib.makeBinPath config.system.fsPackages;
|
||||
systemdUnitPathEnvVar = lib.optionalString (config.boot.extraSystemdUnitPaths != [])
|
||||
("SYSTEMD_UNIT_PATH="
|
||||
+ builtins.concatStringsSep ":" config.boot.extraSystemdUnitPaths
|
||||
+ ":"); # If SYSTEMD_UNIT_PATH ends with an empty component (":"), the usual unit load path will be appended to the contents of the variable
|
||||
postBootCommands = pkgs.writeText "local-cmds"
|
||||
''
|
||||
${config.boot.postBootCommands}
|
||||
@ -82,6 +86,15 @@ in
|
||||
PATH.
|
||||
'';
|
||||
};
|
||||
|
||||
extraSystemdUnitPaths = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
Additional paths that get appended to the SYSTEMD_UNIT_PATH environment variable
|
||||
that can contain mutable unit files.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -175,8 +175,10 @@ let
|
||||
"timers.target.wants"
|
||||
];
|
||||
|
||||
upstreamUserUnits =
|
||||
[ "basic.target"
|
||||
upstreamUserUnits = [
|
||||
"app.slice"
|
||||
"background.slice"
|
||||
"basic.target"
|
||||
"bluetooth.target"
|
||||
"default.target"
|
||||
"exit.target"
|
||||
@ -184,6 +186,7 @@ let
|
||||
"graphical-session.target"
|
||||
"paths.target"
|
||||
"printer.target"
|
||||
"session.slice"
|
||||
"shutdown.target"
|
||||
"smartcard.target"
|
||||
"sockets.target"
|
||||
@ -193,6 +196,7 @@ let
|
||||
"systemd-tmpfiles-clean.timer"
|
||||
"systemd-tmpfiles-setup.service"
|
||||
"timers.target"
|
||||
"xdg-desktop-autostart.target"
|
||||
];
|
||||
|
||||
makeJobScript = name: text:
|
||||
|
@ -118,8 +118,9 @@ in
|
||||
[network]
|
||||
cni_plugin_dirs = ["${pkgs.cni-plugins}/bin/"]
|
||||
|
||||
${lib.optionalString (cfg.ociSeccompBpfHook.enable == true) ''
|
||||
[engine]
|
||||
init_path = "${pkgs.catatonit}/bin/catatonit"
|
||||
${lib.optionalString (cfg.ociSeccompBpfHook.enable) ''
|
||||
hooks_dir = [
|
||||
"${config.boot.kernelPackages.oci-seccomp-bpf-hook}",
|
||||
]
|
||||
|
@ -5,5 +5,13 @@ let self = {
|
||||
"17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
|
||||
"18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
|
||||
"18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
|
||||
latest = self."18.09";
|
||||
|
||||
# This format will be handled by the upcoming NixOPS 2.0 release.
|
||||
# The old images based on a GS object are deprecated.
|
||||
"20.09" = {
|
||||
project = "nixos-cloud";
|
||||
name = "nixos-image-20-09-3531-3858fbc08e6-x86-64-linux";
|
||||
};
|
||||
|
||||
latest = self."20.09";
|
||||
}; in self
|
||||
|
@ -253,7 +253,7 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
|
||||
|
||||
def check_connection(node, domain, retries=3):
|
||||
assert retries >= 0
|
||||
assert retries >= 0, f"Failed to connect to https://{domain}"
|
||||
|
||||
result = node.succeed(
|
||||
"openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
|
||||
@ -262,12 +262,12 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
|
||||
for line in result.lower().split("\n"):
|
||||
if "verification" in line and "error" in line:
|
||||
time.sleep(1)
|
||||
time.sleep(3)
|
||||
return check_connection(node, domain, retries - 1)
|
||||
|
||||
|
||||
def check_connection_key_bits(node, domain, bits, retries=3):
|
||||
assert retries >= 0
|
||||
assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
|
||||
|
||||
result = node.succeed(
|
||||
"openssl s_client -CAfile /tmp/ca.crt"
|
||||
@ -277,12 +277,12 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
print("Key type:", result)
|
||||
|
||||
if bits not in result:
|
||||
time.sleep(1)
|
||||
time.sleep(3)
|
||||
return check_connection_key_bits(node, domain, bits, retries - 1)
|
||||
|
||||
|
||||
def check_stapling(node, domain, retries=3):
|
||||
assert retries >= 0
|
||||
assert retries >= 0, "OCSP Stapling check failed"
|
||||
|
||||
# Pebble doesn't provide a full OCSP responder, so just check the URL
|
||||
result = node.succeed(
|
||||
@ -293,10 +293,23 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
print("OCSP Responder URL:", result)
|
||||
|
||||
if "${caDomain}:4002" not in result.lower():
|
||||
time.sleep(1)
|
||||
time.sleep(3)
|
||||
return check_stapling(node, domain, retries - 1)
|
||||
|
||||
|
||||
def download_ca_certs(node, retries=5):
|
||||
assert retries >= 0, "Failed to connect to pebble to download root CA certs"
|
||||
|
||||
exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
|
||||
exit_code_2, _ = node.execute(
|
||||
"curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
|
||||
)
|
||||
|
||||
if exit_code + exit_code_2 > 0:
|
||||
time.sleep(3)
|
||||
return download_ca_certs(node, retries - 1)
|
||||
|
||||
|
||||
client.start()
|
||||
dnsserver.start()
|
||||
|
||||
@ -313,8 +326,7 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
acme.wait_for_unit("network-online.target")
|
||||
acme.wait_for_unit("pebble.service")
|
||||
|
||||
client.succeed("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
|
||||
client.succeed("curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt")
|
||||
download_ca_certs(client)
|
||||
|
||||
with subtest("Can request certificate with HTTPS-01 challenge"):
|
||||
webserver.wait_for_unit("acme-finished-a.example.test.target")
|
||||
@ -322,6 +334,21 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
check_issuer(webserver, "a.example.test", "pebble")
|
||||
check_connection(client, "a.example.test")
|
||||
|
||||
with subtest("Certificates and accounts have safe + valid permissions"):
|
||||
group = "${nodes.webserver.config.security.acme.certs."a.example.test".group}"
|
||||
webserver.succeed(
|
||||
f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
|
||||
)
|
||||
webserver.succeed(
|
||||
f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/.lego/a.example.test/**/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
|
||||
)
|
||||
webserver.succeed(
|
||||
f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test | tee /dev/stderr | grep '750 acme {group}' | wc -l) -eq 1"
|
||||
)
|
||||
webserver.succeed(
|
||||
f"test $(find /var/lib/acme/accounts -type f -exec stat -L -c \"%a %U %G\" {{}} \\; | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
|
||||
)
|
||||
|
||||
with subtest("Can generate valid selfsigned certs"):
|
||||
webserver.succeed("systemctl clean acme-a.example.test.service --what=state")
|
||||
webserver.succeed("systemctl start acme-selfsigned-a.example.test.service")
|
||||
@ -375,8 +402,15 @@ in import ./make-test-python.nix ({ lib, ... }: {
|
||||
assert keyhash_old == keyhash_new
|
||||
|
||||
with subtest("Can request certificates for vhost + aliases (apache-httpd)"):
|
||||
switch_to(webserver, "httpd-aliases")
|
||||
webserver.wait_for_unit("acme-finished-c.example.test.target")
|
||||
try:
|
||||
switch_to(webserver, "httpd-aliases")
|
||||
webserver.wait_for_unit("acme-finished-c.example.test.target")
|
||||
except Exception as err:
|
||||
_, output = webserver.execute(
|
||||
"cat /var/log/httpd/*.log && ls -al /var/lib/acme/acme-challenge"
|
||||
)
|
||||
print(output)
|
||||
raise err
|
||||
check_issuer(webserver, "c.example.test", "pebble")
|
||||
check_connection(client, "c.example.test")
|
||||
check_connection(client, "d.example.test")
|
||||
|
@ -176,6 +176,7 @@ in
|
||||
initrd-network-ssh = handleTest ./initrd-network-ssh {};
|
||||
initrdNetwork = handleTest ./initrd-network.nix {};
|
||||
initrd-secrets = handleTest ./initrd-secrets.nix {};
|
||||
inspircd = handleTest ./inspircd.nix {};
|
||||
installer = handleTest ./installer.nix {};
|
||||
iodine = handleTest ./iodine.nix {};
|
||||
ipfs = handleTest ./ipfs.nix {};
|
||||
@ -393,6 +394,7 @@ in
|
||||
systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
|
||||
systemd-nspawn = handleTest ./systemd-nspawn.nix {};
|
||||
systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
|
||||
systemd-unit-path = handleTest ./systemd-unit-path.nix {};
|
||||
taskserver = handleTest ./taskserver.nix {};
|
||||
telegraf = handleTest ./telegraf.nix {};
|
||||
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
||||
@ -408,6 +410,7 @@ in
|
||||
trickster = handleTest ./trickster.nix {};
|
||||
trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
|
||||
tuptime = handleTest ./tuptime.nix {};
|
||||
turbovnc-headless-server = handleTest ./turbovnc-headless-server.nix {};
|
||||
ucg = handleTest ./ucg.nix {};
|
||||
udisks2 = handleTest ./udisks2.nix {};
|
||||
unbound = handleTest ./unbound.nix {};
|
||||
|
@ -161,12 +161,18 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
"docker run --rm ${examples.layered-image.imageName} cat extraCommands",
|
||||
)
|
||||
|
||||
with subtest("Ensure building an image on top of a layered Docker images work"):
|
||||
with subtest("Ensure images built on top of layered Docker images work"):
|
||||
docker.succeed(
|
||||
"docker load --input='${examples.layered-on-top}'",
|
||||
"docker run --rm ${examples.layered-on-top.imageName}",
|
||||
)
|
||||
|
||||
with subtest("Ensure layered images built on top of layered Docker images work"):
|
||||
docker.succeed(
|
||||
"docker load --input='${examples.layered-on-top-layered}'",
|
||||
"docker run --rm ${examples.layered-on-top-layered.imageName}",
|
||||
)
|
||||
|
||||
|
||||
def set_of_layers(image_name):
|
||||
return set(
|
||||
@ -205,6 +211,31 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
|
||||
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
|
||||
|
||||
with subtest("Ensure environment variables of layered images are correctly inherited"):
|
||||
docker.succeed(
|
||||
"docker load --input='${examples.environmentVariablesLayered}'"
|
||||
)
|
||||
out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
|
||||
env = out.splitlines()
|
||||
assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
|
||||
assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
|
||||
assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
|
||||
|
||||
with subtest(
|
||||
"Ensure inherited environment variables of layered images are correctly resolved"
|
||||
):
|
||||
# Read environment variables as stored in image config
|
||||
config = docker.succeed(
|
||||
"tar -xOf ${examples.environmentVariablesLayered} manifest.json | ${pkgs.jq}/bin/jq -r .[].Config"
|
||||
).strip()
|
||||
out = docker.succeed(
|
||||
f"tar -xOf ${examples.environmentVariablesLayered} {config} | ${pkgs.jq}/bin/jq -r '.config.Env | .[]'"
|
||||
)
|
||||
env = out.splitlines()
|
||||
assert (
|
||||
sum(entry.startswith("LAST_LAYER") for entry in env) == 1
|
||||
), "envvars overridden by child should be unique"
|
||||
|
||||
with subtest("Ensure image with only 2 layers can be loaded"):
|
||||
docker.succeed(
|
||||
"docker load --input='${examples.two-layered-image}'"
|
||||
@ -219,6 +250,18 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
"docker run bulk-layer ls /bin/hello",
|
||||
)
|
||||
|
||||
with subtest(
|
||||
"Ensure the bulk layer with a base image respects the number of maxLayers"
|
||||
):
|
||||
docker.succeed(
|
||||
"docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
|
||||
# Ensure the image runs correctly
|
||||
"docker run layered-bulk-layer ls /bin/hello",
|
||||
)
|
||||
|
||||
# Ensure the image has the correct number of layers
|
||||
assert len(set_of_layers("layered-bulk-layer")) == 4
|
||||
|
||||
with subtest("Ensure correct behavior when no store is needed"):
|
||||
# This check tests that buildLayeredImage can build images that don't need a store.
|
||||
docker.succeed(
|
||||
|
@ -9,7 +9,7 @@ let
|
||||
sha256 = "4de5ff31d54dd61bbccaf092c9e74c1af3a4c53e07aa59f60457a8f00cfb23a6";
|
||||
};
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
};
|
||||
@ -24,7 +24,7 @@ let
|
||||
sha256 = "e40ed7dd6bbe7fe3363bbbecb4de481d5e42385b5a0f62f6a6ce6bf3a1f9dfa8";
|
||||
};
|
||||
# We need unzip to build this package
|
||||
buildInputs = [ pkgs.unzip ];
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
sourceRoot = ".";
|
||||
# Installing simply means copying all files to the output directory
|
||||
installPhase = "mkdir -p $out; cp -R * $out/";
|
||||
|
93
nixos/tests/inspircd.nix
Normal file
93
nixos/tests/inspircd.nix
Normal file
@ -0,0 +1,93 @@
|
||||
let
|
||||
clients = [
|
||||
"ircclient1"
|
||||
"ircclient2"
|
||||
];
|
||||
server = "inspircd";
|
||||
ircPort = 6667;
|
||||
channel = "nixos-cat";
|
||||
iiDir = "/tmp/irc";
|
||||
in
|
||||
|
||||
import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
||||
name = "inspircd";
|
||||
nodes = {
|
||||
"${server}" = {
|
||||
networking.firewall.allowedTCPPorts = [ ircPort ];
|
||||
services.inspircd = {
|
||||
enable = true;
|
||||
package = pkgs.inspircdMinimal;
|
||||
config = ''
|
||||
<bind address="" port="${toString ircPort}" type="clients">
|
||||
<connect name="main" allow="*" pingfreq="15">
|
||||
'';
|
||||
};
|
||||
};
|
||||
} // lib.listToAttrs (builtins.map (client: lib.nameValuePair client {
|
||||
imports = [
|
||||
./common/user-account.nix
|
||||
];
|
||||
|
||||
systemd.services.ii = {
|
||||
requires = [ "network.target" ];
|
||||
wantedBy = [ "default.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecPreStartPre = "mkdir -p ${iiDir}";
|
||||
ExecStart = ''
|
||||
${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
|
||||
'';
|
||||
User = "alice";
|
||||
};
|
||||
};
|
||||
}) clients);
|
||||
|
||||
testScript =
|
||||
let
|
||||
msg = client: "Hello, my name is ${client}";
|
||||
clientScript = client: [
|
||||
''
|
||||
${client}.wait_for_unit("network.target")
|
||||
${client}.systemctl("start ii")
|
||||
${client}.wait_for_unit("ii")
|
||||
${client}.wait_for_file("${iiDir}/${server}/out")
|
||||
''
|
||||
# wait until first PING from server arrives before joining,
|
||||
# so we don't try it too early
|
||||
''
|
||||
${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
|
||||
''
|
||||
# join ${channel}
|
||||
''
|
||||
${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
|
||||
${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
|
||||
''
|
||||
# send a greeting
|
||||
''
|
||||
${client}.succeed(
|
||||
"echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
|
||||
)
|
||||
''
|
||||
# check that all greetings arrived on all clients
|
||||
] ++ builtins.map (other: ''
|
||||
${client}.succeed(
|
||||
"grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
|
||||
)
|
||||
'') clients;
|
||||
|
||||
# foldl', but requires a non-empty list instead of a start value
|
||||
reduce = f: list:
|
||||
builtins.foldl' f (builtins.head list) (builtins.tail list);
|
||||
in ''
|
||||
start_all()
|
||||
${server}.wait_for_open_port(${toString ircPort})
|
||||
|
||||
# run clientScript for all clients so that every list
|
||||
# entry is executed by every client before advancing
|
||||
# to the next one.
|
||||
'' + lib.concatStrings
|
||||
(reduce
|
||||
(lib.zipListsWith (cs: c: cs + c))
|
||||
(builtins.map clientScript clients));
|
||||
})
|
@ -96,6 +96,15 @@ import ./make-test-python.nix (
|
||||
podman.succeed(su_cmd("podman ps | grep sleeping"))
|
||||
podman.succeed(su_cmd("podman stop sleeping"))
|
||||
podman.succeed(su_cmd("podman rm sleeping"))
|
||||
|
||||
with subtest("Run container with init"):
|
||||
podman.succeed(
|
||||
"tar cv -C ${pkgs.pkgsStatic.busybox} . | podman import - busybox"
|
||||
)
|
||||
pid = podman.succeed("podman run --rm busybox readlink /proc/self").strip()
|
||||
assert pid == "1"
|
||||
pid = podman.succeed("podman run --rm --init busybox readlink /proc/self").strip()
|
||||
assert pid == "2"
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
@ -45,6 +45,10 @@ import ./make-test-python.nix (
|
||||
'';
|
||||
inherit passwordFile initialize paths pruneOpts;
|
||||
};
|
||||
remoteprune = {
|
||||
inherit repository passwordFile;
|
||||
pruneOpts = [ "--keep-last 1" ];
|
||||
};
|
||||
};
|
||||
|
||||
environment.sessionVariables.RCLONE_CONFIG_LOCAL_TYPE = "local";
|
||||
@ -84,6 +88,8 @@ import ./make-test-python.nix (
|
||||
"systemctl start restic-backups-rclonebackup.service",
|
||||
'${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^4 snapshot"',
|
||||
'${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots -c | grep -e "^4 snapshot"',
|
||||
"systemctl start restic-backups-remoteprune.service",
|
||||
'${pkgs.restic}/bin/restic -r ${repository} -p ${passwordFile} snapshots -c | grep -e "^1 snapshot"',
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
@ -17,6 +17,6 @@ in
|
||||
''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
output = machine.succeed("spike -m64 $(which pk) $(which hello)")
|
||||
assert output == "Hello, world!\n"
|
||||
assert "Hello, world!" in output
|
||||
'';
|
||||
})
|
||||
|
47
nixos/tests/systemd-unit-path.nix
Normal file
47
nixos/tests/systemd-unit-path.nix
Normal file
@ -0,0 +1,47 @@
|
||||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
|
||||
let
|
||||
exampleScript = pkgs.writeTextFile {
|
||||
name = "example.sh";
|
||||
text = ''
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
|
||||
while true; do
|
||||
echo "Example script running" >&2
|
||||
${pkgs.coreutils}/bin/sleep 1
|
||||
done
|
||||
'';
|
||||
executable = true;
|
||||
};
|
||||
|
||||
unitFile = pkgs.writeTextFile {
|
||||
name = "example.service";
|
||||
text = ''
|
||||
[Unit]
|
||||
Description=Example systemd service unit file
|
||||
|
||||
[Service]
|
||||
ExecStart=${exampleScript}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "systemd-unit-path";
|
||||
|
||||
machine = { pkgs, lib, ... }: {
|
||||
boot.extraSystemdUnitPaths = [ "/etc/systemd-rw/system" ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("mkdir -p /etc/systemd-rw/system")
|
||||
machine.succeed(
|
||||
"cp ${unitFile} /etc/systemd-rw/system/example.service"
|
||||
)
|
||||
machine.succeed("systemctl start example.service")
|
||||
machine.succeed("systemctl status example.service | grep 'Active: active'")
|
||||
'';
|
||||
})
|
171
nixos/tests/turbovnc-headless-server.nix
Normal file
171
nixos/tests/turbovnc-headless-server.nix
Normal file
@ -0,0 +1,171 @@
|
||||
import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
||||
name = "turbovnc-headless-server";
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ nh2 ];
|
||||
};
|
||||
|
||||
machine = { pkgs, ... }: {
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
glxinfo
|
||||
procps # for `pkill`, `pidof` in the test
|
||||
scrot # for screenshotting Xorg
|
||||
turbovnc
|
||||
];
|
||||
|
||||
programs.turbovnc.ensureHeadlessSoftwareOpenGL = true;
|
||||
|
||||
networking.firewall = {
|
||||
# Reject instead of drop, for failures instead of hangs.
|
||||
rejectPackets = true;
|
||||
allowedTCPPorts = [
|
||||
5900 # VNC :0, for seeing what's going on in the server
|
||||
];
|
||||
};
|
||||
|
||||
# So that we can ssh into the VM, see e.g.
|
||||
# http://blog.patapon.info/nixos-local-vm/#accessing-the-vm-with-ssh
|
||||
services.openssh.enable = true;
|
||||
services.openssh.permitRootLogin = "yes";
|
||||
users.extraUsers.root.password = "";
|
||||
users.mutableUsers = false;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def wait_until_terminated_or_succeeds(
|
||||
termination_check_shell_command,
|
||||
success_check_shell_command,
|
||||
get_detail_message_fn,
|
||||
retries=60,
|
||||
retry_sleep=0.5,
|
||||
):
|
||||
def check_success():
|
||||
command_exit_code, _output = machine.execute(success_check_shell_command)
|
||||
return command_exit_code == 0
|
||||
|
||||
for _ in range(retries):
|
||||
exit_check_exit_code, _output = machine.execute(termination_check_shell_command)
|
||||
is_terminated = exit_check_exit_code != 0
|
||||
if is_terminated:
|
||||
if check_success():
|
||||
return
|
||||
else:
|
||||
details = get_detail_message_fn()
|
||||
raise Exception(
|
||||
f"termination check ({termination_check_shell_command}) triggered without command succeeding ({success_check_shell_command}); details: {details}"
|
||||
)
|
||||
else:
|
||||
if check_success():
|
||||
return
|
||||
time.sleep(retry_sleep)
|
||||
|
||||
if not check_success():
|
||||
details = get_detail_message_fn()
|
||||
raise Exception(
|
||||
f"action timed out ({success_check_shell_command}); details: {details}"
|
||||
)
|
||||
|
||||
|
||||
# Below we use the pattern:
|
||||
# (cmd | tee stdout.log) 3>&1 1>&2 2>&3 | tee stderr.log
|
||||
# to capture both stderr and stdout while also teeing them, see:
|
||||
# https://unix.stackexchange.com/questions/6430/how-to-redirect-stderr-and-stdout-to-different-files-and-also-display-in-termina/6431#6431
|
||||
|
||||
|
||||
# Starts headless VNC server, backgrounding it.
|
||||
def start_xvnc():
|
||||
xvnc_command = " ".join(
|
||||
[
|
||||
"Xvnc",
|
||||
":0",
|
||||
"-iglx",
|
||||
"-auth /root/.Xauthority",
|
||||
"-geometry 1240x900",
|
||||
"-depth 24",
|
||||
"-rfbwait 5000",
|
||||
"-deferupdate 1",
|
||||
"-verbose",
|
||||
"-securitytypes none",
|
||||
# We don't enforce localhost listening such that we
|
||||
# can connect from outside the VM using
|
||||
# env QEMU_NET_OPTS=hostfwd=tcp::5900-:5900 $(nix-build nixos/tests/turbovnc-headless-server.nix -A driver)/bin/nixos-test-driver
|
||||
# for testing purposes, and so that we can in the future
|
||||
# add another test case that connects the TurboVNC client.
|
||||
# "-localhost",
|
||||
]
|
||||
)
|
||||
machine.execute(
|
||||
# Note trailing & for backgrounding.
|
||||
f"({xvnc_command} | tee /tmp/Xvnc.stdout) 3>&1 1>&2 2>&3 | tee /tmp/Xvnc.stderr &",
|
||||
)
|
||||
|
||||
|
||||
# Waits until the server log message that tells us that GLX is ready
|
||||
# (requires `-verbose` above), avoiding screenshoting racing below.
|
||||
def wait_until_xvnc_glx_ready():
|
||||
machine.wait_until_succeeds("test -f /tmp/Xvnc.stderr")
|
||||
wait_until_terminated_or_succeeds(
|
||||
termination_check_shell_command="pidof Xvnc",
|
||||
success_check_shell_command="grep 'GLX: Initialized DRISWRAST' /tmp/Xvnc.stderr",
|
||||
get_detail_message_fn=lambda: "Contents of /tmp/Xvnc.stderr:\n"
|
||||
+ machine.succeed("cat /tmp/Xvnc.stderr"),
|
||||
)
|
||||
|
||||
|
||||
# Checks that we detect glxgears failing when
|
||||
# `LIBGL_DRIVERS_PATH=/nonexistent` is set
|
||||
# (in which case software rendering should not work).
|
||||
def test_glxgears_failing_with_bad_driver_path():
|
||||
machine.execute(
|
||||
# Note trailing & for backgrounding.
|
||||
"(env DISPLAY=:0 LIBGL_DRIVERS_PATH=/nonexistent glxgears -info | tee /tmp/glxgears-should-fail.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears-should-fail.stderr &"
|
||||
)
|
||||
machine.wait_until_succeeds("test -f /tmp/glxgears-should-fail.stderr")
|
||||
wait_until_terminated_or_succeeds(
|
||||
termination_check_shell_command="pidof glxgears",
|
||||
success_check_shell_command="grep 'libGL error: failed to load driver: swrast' /tmp/glxgears-should-fail.stderr",
|
||||
get_detail_message_fn=lambda: "Contents of /tmp/glxgears-should-fail.stderr:\n"
|
||||
+ machine.succeed("cat /tmp/glxgears-should-fail.stderr"),
|
||||
)
|
||||
machine.wait_until_fails("pidof glxgears")
|
||||
|
||||
|
||||
# Starts glxgears, backgrounding it. Waits until it prints the `GL_RENDERER`.
|
||||
# Does not quit glxgears.
|
||||
def test_glxgears_prints_renderer():
|
||||
machine.execute(
|
||||
# Note trailing & for backgrounding.
|
||||
"(env DISPLAY=:0 glxgears -info | tee /tmp/glxgears.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears.stderr &"
|
||||
)
|
||||
machine.wait_until_succeeds("test -f /tmp/glxgears.stderr")
|
||||
wait_until_terminated_or_succeeds(
|
||||
termination_check_shell_command="pidof glxgears",
|
||||
success_check_shell_command="grep 'GL_RENDERER' /tmp/glxgears.stdout",
|
||||
get_detail_message_fn=lambda: "Contents of /tmp/glxgears.stderr:\n"
|
||||
+ machine.succeed("cat /tmp/glxgears.stderr"),
|
||||
)
|
||||
|
||||
|
||||
with subtest("Start Xvnc"):
|
||||
start_xvnc()
|
||||
wait_until_xvnc_glx_ready()
|
||||
|
||||
with subtest("Ensure bad driver path makes glxgears fail"):
|
||||
test_glxgears_failing_with_bad_driver_path()
|
||||
|
||||
with subtest("Run 3D application (glxgears)"):
|
||||
test_glxgears_prints_renderer()
|
||||
|
||||
# Take screenshot; should display the glxgears.
|
||||
machine.succeed("scrot --display :0 /tmp/glxgears.png")
|
||||
|
||||
# Copy files down.
|
||||
machine.copy_from_vm("/tmp/glxgears.png")
|
||||
machine.copy_from_vm("/tmp/glxgears.stdout")
|
||||
machine.copy_from_vm("/tmp/glxgears-should-fail.stdout")
|
||||
machine.copy_from_vm("/tmp/glxgears-should-fail.stderr")
|
||||
machine.copy_from_vm("/tmp/Xvnc.stdout")
|
||||
machine.copy_from_vm("/tmp/Xvnc.stderr")
|
||||
'';
|
||||
|
||||
})
|
@ -1,4 +1,5 @@
|
||||
{ lib
|
||||
{ stdenv
|
||||
, lib
|
||||
, fetchFromGitLab
|
||||
, cairo
|
||||
, dbus
|
||||
@ -7,17 +8,17 @@
|
||||
, glib
|
||||
, gtk3
|
||||
, libhandy_0
|
||||
, libsass
|
||||
, meson
|
||||
, ninja
|
||||
, pango
|
||||
, pkg-config
|
||||
, python3
|
||||
, rustc
|
||||
, rustPlatform
|
||||
, wrapGAppsHook
|
||||
}:
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "contrast";
|
||||
version = "0.0.3";
|
||||
|
||||
@ -30,7 +31,11 @@ rustPlatform.buildRustPackage rec {
|
||||
sha256 = "0kk3mv7a6y258109xvgicmsi0lw0rcs00gfyivl5hdz7qh47iccy";
|
||||
};
|
||||
|
||||
cargoSha256 = "0vi8nv4hkhsgqgz36xacwkk5cxirg6li44nbmk3x7vx7c64hzybq";
|
||||
cargoDeps = rustPlatform.fetchCargoTarball {
|
||||
inherit src;
|
||||
name = "${pname}-${version}";
|
||||
hash = "sha256-ePkPiWGn79PHrMsSEql5OXZW5uRMdTP+w0/DCcm2KG4=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
desktop-file-utils
|
||||
@ -39,6 +44,9 @@ rustPlatform.buildRustPackage rec {
|
||||
ninja
|
||||
pkg-config
|
||||
python3
|
||||
rustPlatform.rust.cargo
|
||||
rustPlatform.cargoSetupHook
|
||||
rustPlatform.rust.rustc
|
||||
wrapGAppsHook
|
||||
glib # for glib-compile-resources
|
||||
];
|
||||
@ -49,6 +57,7 @@ rustPlatform.buildRustPackage rec {
|
||||
glib
|
||||
gtk3
|
||||
libhandy_0
|
||||
libsass
|
||||
pango
|
||||
];
|
||||
|
||||
@ -56,12 +65,6 @@ rustPlatform.buildRustPackage rec {
|
||||
patchShebangs build-aux/meson_post_install.py
|
||||
'';
|
||||
|
||||
# Don't use buildRustPackage phases, only use it for rust deps setup
|
||||
configurePhase = null;
|
||||
buildPhase = null;
|
||||
checkPhase = null;
|
||||
installPhase = null;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Checks whether the contrast between two colors meet the WCAG requirements";
|
||||
homepage = "https://gitlab.gnome.org/World/design/contrast";
|
||||
|
70
pkgs/applications/accessibility/squeekboard/default.nix
Normal file
70
pkgs/applications/accessibility/squeekboard/default.nix
Normal file
@ -0,0 +1,70 @@
|
||||
{ lib
|
||||
, stdenv
|
||||
, fetchFromGitLab
|
||||
, meson
|
||||
, ninja
|
||||
, pkg-config
|
||||
, gnome3
|
||||
, glib
|
||||
, gtk3
|
||||
, wayland
|
||||
, wayland-protocols
|
||||
, libxml2
|
||||
, libxkbcommon
|
||||
, rustPlatform
|
||||
, feedbackd
|
||||
, wrapGAppsHook
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "squeekboard";
|
||||
version = "unstable-2021-03-09";
|
||||
|
||||
src = fetchFromGitLab {
|
||||
domain = "source.puri.sm";
|
||||
owner = "Librem5";
|
||||
repo = pname;
|
||||
rev = "bffd212e102bf71a94c599aac0359a8d30d19008";
|
||||
sha256 = "1j10zhyb8wyrcbryfj6f3drn9b0l9x0l7hnhy2imnjbfbnwwm4w7";
|
||||
};
|
||||
|
||||
cargoDeps = rustPlatform.fetchCargoTarball {
|
||||
inherit src;
|
||||
cargoUpdateHook = ''
|
||||
cat Cargo.toml.in Cargo.deps > Cargo.toml
|
||||
'';
|
||||
name = "${pname}-${version}";
|
||||
sha256 = "1qaqiaxqc4x2x5bd31na4c49vbjwrmz5clmgli7733dv55rxxias";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
meson
|
||||
ninja
|
||||
pkg-config
|
||||
glib
|
||||
wayland
|
||||
wrapGAppsHook
|
||||
] ++ (with rustPlatform; [
|
||||
cargoSetupHook
|
||||
rust.cargo
|
||||
rust.rustc
|
||||
]);
|
||||
|
||||
buildInputs = [
|
||||
gtk3
|
||||
gnome3.gnome-desktop
|
||||
wayland
|
||||
wayland-protocols
|
||||
libxml2
|
||||
libxkbcommon
|
||||
feedbackd
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "A virtual keyboard supporting Wayland";
|
||||
homepage = "https://source.puri.sm/Librem5/squeekboard";
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = with maintainers; [ artturin ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
{ mkDerivation, fetchgit, lib
|
||||
{ mkDerivation, fetchurl, lib
|
||||
, extra-cmake-modules, kdoctools
|
||||
, qca-qt5, qjson, qtquickcontrols2, qtscript, qtwebengine
|
||||
, karchive, kcmutils, kconfig, kdnssd, kguiaddons, kinit, kirigami2, knewstuff, knotifyconfig, ktexteditor, kwindowsystem
|
||||
@ -7,17 +7,12 @@
|
||||
}:
|
||||
|
||||
mkDerivation rec {
|
||||
pname = "amarok-unstable";
|
||||
version = "2020-06-12";
|
||||
pname = "amarok";
|
||||
version = "2.9.71";
|
||||
|
||||
src = fetchgit {
|
||||
# master has the Qt5 version as of April 2018 but a formal release has not
|
||||
# yet been made so change this back to the proper upstream when such a
|
||||
# release is out
|
||||
url = "https://invent.kde.org/multimedia/amarok.git";
|
||||
# url = "mirror://kde/stable/${pname}/${version}/src/${name}.tar.xz";
|
||||
rev = "fece39b0e81db310b6a6e08f93d83b0d498cd02b";
|
||||
sha256 = "12casnq6w5yp2jlvnr466pjpkn0vriry8jzfq2qkjl564y0vhy9x";
|
||||
src = fetchurl {
|
||||
url = "mirror://kde/unstable/${pname}/${version}/${pname}-${version}.tar.xz";
|
||||
sha256 = "0kz8wixjmy4yxq2gk11ybswryxb6alfymd3bzcar9xinscllhh3a";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ extra-cmake-modules kdoctools ];
|
||||
@ -35,7 +30,7 @@ mkDerivation rec {
|
||||
meta = with lib; {
|
||||
homepage = "https://amarok.kde.org";
|
||||
description = "A powerful music player with an intuitive interface";
|
||||
license = licenses.gpl2;
|
||||
license = licenses.gpl2Plus;
|
||||
maintainers = with maintainers; [ peterhoeg ];
|
||||
};
|
||||
}
|
||||
|
@ -1,12 +1,12 @@
|
||||
{ appimageTools, lib, fetchurl }:
|
||||
let
|
||||
pname = "apple-music-electron";
|
||||
version = "1.5.2";
|
||||
version = "1.5.5";
|
||||
name = "Apple.Music-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/iiFir3z/Apple-Music-Electron/releases/download/${version}/${name}.AppImage";
|
||||
sha256 = "1jl0wgwy6ajmfkzygwb7cm9m49nkhp3x6vd8kwmh6ccs3jy4ayp5";
|
||||
url = "https://github.com/cryptofyre/Apple-Music-Electron/releases/download/v${version}/${name}.AppImage";
|
||||
sha256 = "1gb6j3nvam9fcpsgiv56jccg9a4y14vzsyw11h3hckaigy90knpx";
|
||||
};
|
||||
|
||||
appimageContents = appimageTools.extract { inherit name src; };
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user