Merge staging into python-unstable
This commit is contained in:
commit
5871da418f
@ -17,4 +17,5 @@
|
||||
<xi:include href="functions/shell.xml" />
|
||||
<xi:include href="functions/dockertools.xml" />
|
||||
<xi:include href="functions/prefer-remote-fetch.xml" />
|
||||
<xi:include href="functions/nix-gitignore.xml" />
|
||||
</chapter>
|
||||
|
78
doc/functions/nix-gitignore.xml
Normal file
78
doc/functions/nix-gitignore.xml
Normal file
@ -0,0 +1,78 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="sec-pkgs-nix-gitignore">
|
||||
<title>pkgs.nix-gitignore</title>
|
||||
|
||||
<para>
|
||||
<function>pkgs.nix-gitignore</function> is a function that acts similarly to
|
||||
<literal>builtins.filterSource</literal> but also allows filtering with the
|
||||
help of the gitignore format.
|
||||
</para>
|
||||
|
||||
<section xml:id="sec-pkgs-nix-gitignore-usage">
|
||||
<title>Usage</title>
|
||||
|
||||
<para>
|
||||
<literal>pkgs.nix-gitignore</literal> exports a number of functions, but
|
||||
you'll most likely need either <literal>gitignoreSource</literal> or
|
||||
<literal>gitignoreSourcePure</literal>. As their first argument, they both
|
||||
accept either 1. a file with gitignore lines or 2. a string
|
||||
with gitignore lines, or 3. a list of either of the two. They will be
|
||||
concatenated into a single big string.
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
{ pkgs ? import <nixpkgs> {} }:
|
||||
|
||||
nix-gitignore.gitignoreSource [] ./source
|
||||
# Simplest version
|
||||
|
||||
nix-gitignore.gitignoreSource "supplemental-ignores\n" ./source
|
||||
# This one reads the ./source/.gitignore and concats the auxiliary ignores
|
||||
|
||||
nix-gitignore.gitignoreSourcePure "ignore-this\nignore-that\n" ./source
|
||||
# Use this string as gitignore, don't read ./source/.gitignore.
|
||||
|
||||
nix-gitignore.gitignoreSourcePure ["ignore-this\nignore-that\n", ~/.gitignore] ./source
|
||||
# It also accepts a list (of strings and paths) that will be concatenated
|
||||
# once the paths are turned to strings via readFile.
|
||||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
These functions are derived from the <literal>Filter</literal> functions
|
||||
by setting the first filter argument to <literal>(_: _: true)</literal>:
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
gitignoreSourcePure = gitignoreFilterSourcePure (_: _: true);
|
||||
gitignoreSource = gitignoreFilterSource (_: _: true);
|
||||
]]></programlisting>
|
||||
|
||||
<para>
|
||||
Those filter functions accept the same arguments the <literal>builtins.filterSource</literal> function would pass to its filters, thus <literal>fn: gitignoreFilterSourcePure fn ""</literal> should be extensionally equivalent to <literal>filterSource</literal>. The file is blacklisted iff it's blacklisted by either your filter or the gitignoreFilter.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you want to make your own filter from scratch, you may use
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
|
||||
]]></programlisting>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-pkgs-nix-gitignore-usage-recursive">
|
||||
<title>gitignore files in subdirectories</title>
|
||||
|
||||
<para>
|
||||
If you wish to use a filter that would search for .gitignore files in subdirectories, just like git does by default, use this function:
|
||||
</para>
|
||||
|
||||
<programlisting><![CDATA[
|
||||
gitignoreFilterRecursiveSource = filter: patterns: root:
|
||||
# OR
|
||||
gitignoreRecursiveSource = gitignoreFilterSourcePure (_: _: true);
|
||||
]]></programlisting>
|
||||
</section>
|
||||
</section>
|
@ -21,6 +21,7 @@ rec {
|
||||
isSparc = { cpu = { family = "sparc"; }; };
|
||||
isWasm = { cpu = { family = "wasm"; }; };
|
||||
isAvr = { cpu = { family = "avr"; }; };
|
||||
isAlpha = { cpu = { family = "alpha"; }; };
|
||||
|
||||
is32bit = { cpu = { bits = 32; }; };
|
||||
is64bit = { cpu = { bits = 64; }; };
|
||||
|
@ -401,6 +401,15 @@
|
||||
github = "shados";
|
||||
name = "Alexei Robyn";
|
||||
};
|
||||
artemist = {
|
||||
email = "me@artem.ist";
|
||||
github = "artemist";
|
||||
name = "Artemis Tosini";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x4FDC96F161E7BA8A";
|
||||
fingerprint = "3D2B B230 F9FA F0C5 1832 46DD 4FDC 96F1 61E7 BA8A";
|
||||
}];
|
||||
};
|
||||
artuuge = {
|
||||
email = "artuuge@gmail.com";
|
||||
github = "artuuge";
|
||||
@ -743,6 +752,11 @@
|
||||
github = "calbrecht";
|
||||
name = "Christian Albrecht";
|
||||
};
|
||||
callahad = {
|
||||
email = "dan.callahan@gmail.com";
|
||||
github = "callahad";
|
||||
name = "Dan Callahan";
|
||||
};
|
||||
calvertvl = {
|
||||
email = "calvertvl@gmail.com";
|
||||
github = "calvertvl";
|
||||
@ -793,6 +807,11 @@
|
||||
github = "caugner";
|
||||
name = "Claas Augner";
|
||||
};
|
||||
cbley = {
|
||||
email = "claudio.bley@gmail.com";
|
||||
github = "avdv";
|
||||
name = "Claudio Bley";
|
||||
};
|
||||
cdepillabout = {
|
||||
email = "cdep.illabout@gmail.com";
|
||||
github = "cdepillabout";
|
||||
@ -1565,6 +1584,11 @@
|
||||
github = "flokli";
|
||||
name = "Florian Klink";
|
||||
};
|
||||
FlorianFranzen = {
|
||||
email = "Florian.Franzen@gmail.com";
|
||||
github = "FlorianFranzen";
|
||||
name = "Florian Franzen";
|
||||
};
|
||||
florianjacob = {
|
||||
email = "projects+nixos@florianjacob.de";
|
||||
github = "florianjacob";
|
||||
@ -2925,6 +2949,11 @@
|
||||
email = "code@klandest.in";
|
||||
github = "mguentner";
|
||||
name = "Maximilian Güntner";
|
||||
};
|
||||
mhaselsteiner = {
|
||||
email = "magdalena.haselsteiner@gmx.at";
|
||||
github = "mhaselsteiner";
|
||||
name = "Magdalena Haselsteiner";
|
||||
};
|
||||
mic92 = {
|
||||
email = "joerg@thalheim.io";
|
||||
@ -3228,6 +3257,11 @@
|
||||
github = "nequissimus";
|
||||
name = "Tim Steinbach";
|
||||
};
|
||||
netixx = {
|
||||
email = "dev.espinetfrancois@gmail.com";
|
||||
github = "netixx";
|
||||
name = "François Espinet";
|
||||
};
|
||||
nikitavoloboev = {
|
||||
email = "nikita.voloboev@gmail.com";
|
||||
github = "nikitavoloboev";
|
||||
@ -3318,6 +3352,11 @@
|
||||
github = "np";
|
||||
name = "Nicolas Pouillard";
|
||||
};
|
||||
nphilou = {
|
||||
email = "nphilou@gmail.com";
|
||||
github = "nphilou";
|
||||
name = "Philippe Nguyen";
|
||||
};
|
||||
nslqqq = {
|
||||
email = "nslqqq@gmail.com";
|
||||
name = "Nikita Mikhailov";
|
||||
@ -3793,6 +3832,11 @@
|
||||
github = "rbasso";
|
||||
name = "Rafael Basso";
|
||||
};
|
||||
rbrewer = {
|
||||
email = "rwb123@gmail.com";
|
||||
github = "rbrewer123";
|
||||
name = "Rob Brewer";
|
||||
};
|
||||
rdnetto = {
|
||||
email = "rdnetto@gmail.com";
|
||||
github = "rdnetto";
|
||||
@ -3828,6 +3872,11 @@
|
||||
github = "relrod";
|
||||
name = "Ricky Elrod";
|
||||
};
|
||||
rembo10 = {
|
||||
email = "rembo10@users.noreply.github.com";
|
||||
github = "rembo10";
|
||||
name = "rembo10";
|
||||
};
|
||||
renatoGarcia = {
|
||||
email = "fgarcia.renato@gmail.com";
|
||||
github = "renatoGarcia";
|
||||
|
@ -23,5 +23,6 @@
|
||||
<xi:include href="linux-kernel.xml" />
|
||||
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
|
||||
<xi:include href="profiles.xml" />
|
||||
<xi:include href="kubernetes.xml" />
|
||||
<!-- Apache; libvirtd virtualisation -->
|
||||
</part>
|
||||
|
127
nixos/doc/manual/configuration/kubernetes.xml
Normal file
127
nixos/doc/manual/configuration/kubernetes.xml
Normal file
@ -0,0 +1,127 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="sec-kubernetes">
|
||||
<title>Kubernetes</title>
|
||||
|
||||
<para>
|
||||
The NixOS Kubernetes module is a collective term for a handful of
|
||||
individual submodules implementing the Kubernetes cluster components.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
There are generally two ways of enabling Kubernetes on NixOS.
|
||||
One way is to enable and configure cluster components appropriately by hand:
|
||||
<programlisting>
|
||||
services.kubernetes = {
|
||||
apiserver.enable = true;
|
||||
controllerManager.enable = true;
|
||||
scheduler.enable = true;
|
||||
addonManager.enable = true;
|
||||
proxy.enable = true;
|
||||
flannel.enable = true;
|
||||
};
|
||||
</programlisting>
|
||||
Another way is to assign cluster roles ("master" and/or "node") to the host.
|
||||
This enables apiserver, controllerManager, scheduler, addonManager,
|
||||
kube-proxy and etcd:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" ];
|
||||
</programlisting>
|
||||
While this will enable the kubelet and kube-proxy only:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "node" ];
|
||||
</programlisting>
|
||||
Assigning both the master and node roles is usable if you want a single
|
||||
node Kubernetes cluster for dev or testing purposes:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.kubernetes.roles"/> = [ "master" "node" ];
|
||||
</programlisting>
|
||||
Note: Assigning either role will also default both
|
||||
<xref linkend="opt-services.kubernetes.flannel.enable"/> and
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/> to true.
|
||||
This sets up flannel as CNI and activates automatic PKI bootstrapping.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As of kubernetes 1.10.X it has been deprecated to open
|
||||
non-tls-enabled ports on kubernetes components. Thus, from NixOS 19.03 all
|
||||
plain HTTP ports have been disabled by default.
|
||||
While opening insecure ports is still possible, it is recommended not to
|
||||
bind these to other interfaces than loopback.
|
||||
|
||||
To re-enable the insecure port on the apiserver, see options:
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecurePort"/>
|
||||
and
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress"/>
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
As of NixOS 19.03, it is mandatory to configure:
|
||||
<xref linkend="opt-services.kubernetes.masterAddress"/>.
|
||||
The masterAddress must be resolveable and routeable by all cluster nodes.
|
||||
In single node clusters, this can be set to <literal>localhost</literal>.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
Role-based access control (RBAC) authorization mode is enabled by default.
|
||||
This means that anonymous requests to the apiserver secure port will
|
||||
expectedly cause a permission denied error. All cluster components must
|
||||
therefore be configured with x509 certificates for two-way tls communication.
|
||||
The x509 certificate subject section determines the roles and permissions
|
||||
granted by the apiserver to perform clusterwide or namespaced operations.
|
||||
See also:
|
||||
<link
|
||||
xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/rbac/">
|
||||
Using RBAC Authorization</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The NixOS kubernetes module provides an option for automatic certificate
|
||||
bootstrapping and configuration,
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/>.
|
||||
The PKI bootstrapping process involves setting up a certificate authority
|
||||
(CA) daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
|
||||
for the cluster, and uses the CA-cert for signing subordinate certs issued to
|
||||
each of the cluster components. Subsequently, the certmgr daemon monitors
|
||||
active certificates and renews them when needed. For single node Kubernetes
|
||||
clusters, setting <xref linkend="opt-services.kubernetes.easyCerts"/> = true
|
||||
is sufficient and no further action is required. For joining extra node
|
||||
machines to an existing cluster on the other hand, establishing initial trust
|
||||
is mandatory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To add new nodes to the cluster:
|
||||
On any (non-master) cluster node where
|
||||
<xref linkend="opt-services.kubernetes.easyCerts"/> is enabled, the helper
|
||||
script <literal>nixos-kubernetes-node-join</literal> is available on PATH.
|
||||
Given a token on stdin, it will copy the token to the kubernetes
|
||||
secrets directory and restart the certmgr service. As requested
|
||||
certificates are issued, the script will restart kubernetes cluster
|
||||
components as needed for them to pick up new keypairs.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
Multi-master (HA) clusters are not supported by the easyCerts module.
|
||||
</para>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
In order to interact with an RBAC-enabled cluster as an administrator, one
|
||||
needs to have cluster-admin privileges. By default, when easyCerts is
|
||||
enabled, a cluster-admin kubeconfig file is generated and linked into
|
||||
<literal>/etc/kubernetes/cluster-admin.kubeconfig</literal> as determined by
|
||||
<xref linkend="opt-services.kubernetes.pki.etcClusterAdminKubeconfig"/>.
|
||||
<literal>export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig</literal>
|
||||
will make kubectl use this kubeconfig to access and authenticate the cluster.
|
||||
The cluster-admin kubeconfig references an auto-generated keypair owned by
|
||||
root. Thus, only root on the kubernetes master may obtain cluster-admin
|
||||
rights by means of this file.
|
||||
</para>
|
||||
|
||||
</chapter>
|
@ -29,7 +29,10 @@
|
||||
networks are set, it will default to using a configuration file at
|
||||
<literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
|
||||
yourself to define wireless networks, WPA keys and so on (see
|
||||
wpa_supplicant.conf(5)).
|
||||
<citerefentry>
|
||||
<refentrytitle>wpa_supplicant.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -54,6 +54,13 @@
|
||||
</itemizedlist>
|
||||
<para>to <literal>false</literal> and enable your preferred display manager.</para>
|
||||
</note>
|
||||
<para>
|
||||
A major refactoring of the Kubernetes module has been completed.
|
||||
Refactorings primarily focus on decoupling components and enhancing
|
||||
security. Two-way TLS and RBAC has been enabled by default for all
|
||||
components, which slightly changes the way the module is configured.
|
||||
See: <xref linkend="sec-kubernetes"/> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
@ -452,6 +459,23 @@
|
||||
<option>services.matomo.package</option> which determines the used
|
||||
Matomo version.
|
||||
</para>
|
||||
<para>
|
||||
The Matomo module now also comes with the systemd service <literal>matomo-archive-processing.service</literal>
|
||||
and a timer that automatically triggers archive processing every hour.
|
||||
This means that you can safely
|
||||
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
|
||||
disable browser triggers for Matomo archiving
|
||||
</link> at <literal>Administration > System > General Settings</literal>.
|
||||
</para>
|
||||
<para>
|
||||
Additionally, you can enable to
|
||||
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
|
||||
delete old visitor logs
|
||||
</link> at <literal>Administration > System > Privacy</literal>,
|
||||
but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
|
||||
at least once without errors if you have already collected data before,
|
||||
so that the reports get archived before the source data gets deleted.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
@ -555,6 +579,40 @@
|
||||
provisioning.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The use of insecure ports on kubernetes has been deprecated.
|
||||
Thus options:
|
||||
<varname>services.kubernetes.apiserver.port</varname> and
|
||||
<varname>services.kubernetes.controllerManager.port</varname>
|
||||
has been renamed to <varname>.insecurePort</varname>,
|
||||
and default of both options has changed to 0 (disabled).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Note that the default value of
|
||||
<varname>services.kubernetes.apiserver.bindAddress</varname>
|
||||
has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be
|
||||
accessible from outside the master node itself.
|
||||
If the apiserver insecurePort is enabled,
|
||||
it is strongly recommended to only bind on the loopback interface. See:
|
||||
<varname>services.kubernetes.apiserver.insecurebindAddress</varname>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option <varname>services.kubernetes.apiserver.allowPrivileged</varname>
|
||||
and <varname>services.kubernetes.kubelet.allowPrivileged</varname> now
|
||||
defaults to false. Disallowing privileged containers on the cluster.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The kubernetes module does no longer add the kubernetes package to
|
||||
<varname>environment.systemPackages</varname> implicitly.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
@ -156,6 +156,7 @@ in
|
||||
environment.systemPackages = [ pkgs.man-db ];
|
||||
environment.pathsToLink = [ "/share/man" ];
|
||||
environment.extraOutputsToInstall = [ "man" ] ++ optional cfg.dev.enable "devman";
|
||||
environment.etc."man.conf".source = "${pkgs.man-db}/etc/man_db.conf";
|
||||
})
|
||||
|
||||
(mkIf cfg.info.enable {
|
||||
|
@ -290,7 +290,7 @@
|
||||
riak-cs = 263;
|
||||
infinoted = 264;
|
||||
sickbeard = 265;
|
||||
# glance = 266; # unused, removed 2017-12-13
|
||||
headphones = 266;
|
||||
couchpotato = 267;
|
||||
gogs = 268;
|
||||
pdns-recursor = 269;
|
||||
@ -590,7 +590,7 @@
|
||||
riak-cs = 263;
|
||||
infinoted = 264;
|
||||
sickbeard = 265;
|
||||
# glance = 266; # unused, removed 2017-12-13
|
||||
headphones = 266;
|
||||
couchpotato = 267;
|
||||
gogs = 268;
|
||||
kresd = 270;
|
||||
|
@ -82,6 +82,7 @@
|
||||
./misc/version.nix
|
||||
./programs/adb.nix
|
||||
./programs/atop.nix
|
||||
./programs/autojump.nix
|
||||
./programs/bash/bash.nix
|
||||
./programs/bcc.nix
|
||||
./programs/blcr.nix
|
||||
@ -195,9 +196,17 @@
|
||||
./services/backup/tarsnap.nix
|
||||
./services/backup/znapzend.nix
|
||||
./services/cluster/hadoop/default.nix
|
||||
./services/cluster/kubernetes/addons/dns.nix
|
||||
./services/cluster/kubernetes/addons/dashboard.nix
|
||||
./services/cluster/kubernetes/addon-manager.nix
|
||||
./services/cluster/kubernetes/apiserver.nix
|
||||
./services/cluster/kubernetes/controller-manager.nix
|
||||
./services/cluster/kubernetes/default.nix
|
||||
./services/cluster/kubernetes/dns.nix
|
||||
./services/cluster/kubernetes/dashboard.nix
|
||||
./services/cluster/kubernetes/flannel.nix
|
||||
./services/cluster/kubernetes/kubelet.nix
|
||||
./services/cluster/kubernetes/pki.nix
|
||||
./services/cluster/kubernetes/proxy.nix
|
||||
./services/cluster/kubernetes/scheduler.nix
|
||||
./services/computing/boinc/client.nix
|
||||
./services/computing/torque/server.nix
|
||||
./services/computing/torque/mom.nix
|
||||
@ -283,6 +292,7 @@
|
||||
./services/hardware/acpid.nix
|
||||
./services/hardware/actkbd.nix
|
||||
./services/hardware/bluetooth.nix
|
||||
./services/hardware/bolt.nix
|
||||
./services/hardware/brltty.nix
|
||||
./services/hardware/freefall.nix
|
||||
./services/hardware/fwupd.nix
|
||||
@ -383,6 +393,7 @@
|
||||
./services/misc/gogs.nix
|
||||
./services/misc/gollum.nix
|
||||
./services/misc/gpsd.nix
|
||||
./services/misc/headphones.nix
|
||||
./services/misc/home-assistant.nix
|
||||
./services/misc/ihaskell.nix
|
||||
./services/misc/irkerd.nix
|
||||
@ -719,6 +730,8 @@
|
||||
./services/web-apps/atlassian/jira.nix
|
||||
./services/web-apps/codimd.nix
|
||||
./services/web-apps/frab.nix
|
||||
./services/web-apps/icingaweb2/icingaweb2.nix
|
||||
./services/web-apps/icingaweb2/module-monitoring.nix
|
||||
./services/web-apps/mattermost.nix
|
||||
./services/web-apps/nextcloud.nix
|
||||
./services/web-apps/nexus.nix
|
||||
|
33
nixos/modules/programs/autojump.nix
Normal file
33
nixos/modules/programs/autojump.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.programs.autojump;
|
||||
prg = config.programs;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
programs.autojump = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable autojump.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.pathsToLink = [ "/share/autojump" ];
|
||||
environment.systemPackages = [ pkgs.autojump ];
|
||||
|
||||
programs.bash.interactiveShellInit = "source ${pkgs.autojump}/share/autojump/autojump.bash";
|
||||
programs.zsh.interactiveShellInit = mkIf prg.zsh.enable "source ${pkgs.autojump}/share/autojump/autojump.zsh";
|
||||
programs.fish.interactiveShellInit = mkIf prg.fish.enable "source ${pkgs.autojump}/share/autojump/autojump.fish";
|
||||
};
|
||||
}
|
@ -3,18 +3,27 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.programs.singularity;
|
||||
singularity = pkgs.singularity.overrideAttrs (attrs : {
|
||||
installPhase = attrs.installPhase + ''
|
||||
mv $bin/libexec/singularity/bin/starter-suid $bin/libexec/singularity/bin/starter-suid.orig
|
||||
ln -s /run/wrappers/bin/singularity-suid $bin/libexec/singularity/bin/starter-suid
|
||||
'';
|
||||
});
|
||||
in {
|
||||
options.programs.singularity = {
|
||||
enable = mkEnableOption "Singularity";
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.singularity ];
|
||||
systemd.tmpfiles.rules = [ "d /var/singularity/mnt/session 0770 root root -"
|
||||
environment.systemPackages = [ singularity ];
|
||||
security.wrappers.singularity-suid.source = "${singularity}/libexec/singularity/bin/starter-suid.orig";
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/singularity/mnt/session 0770 root root -"
|
||||
"d /var/singularity/mnt/final 0770 root root -"
|
||||
"d /var/singularity/mnt/overlay 0770 root root -"
|
||||
"d /var/singularity/mnt/container 0770 root root -"
|
||||
"d /var/singularity/mnt/source 0770 root root -"];
|
||||
"d /var/singularity/mnt/source 0770 root root -"
|
||||
];
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -88,7 +88,8 @@ in
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra configuration text appended to <filename>ssh_config</filename>.
|
||||
Extra configuration text prepended to <filename>ssh_config</filename>. Other generated
|
||||
options will be added after a <code>Host *</code> pattern.
|
||||
See <citerefentry><refentrytitle>ssh_config</refentrytitle><manvolnum>5</manvolnum></citerefentry>
|
||||
for help.
|
||||
'';
|
||||
@ -203,6 +204,11 @@ in
|
||||
# generation in the sshd service.
|
||||
environment.etc."ssh/ssh_config".text =
|
||||
''
|
||||
# Custom options from `extraConfig`, to override generated options
|
||||
${cfg.extraConfig}
|
||||
|
||||
# Generated options from other settings
|
||||
Host *
|
||||
AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
|
||||
|
||||
${optionalString cfg.setXAuthLocation ''
|
||||
@ -213,8 +219,6 @@ in
|
||||
|
||||
${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
|
||||
${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
environment.etc."ssh/ssh_known_hosts".text = knownHostsText;
|
||||
|
@ -40,9 +40,19 @@ with lib;
|
||||
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "addons" "dashboard" "enableRBAC" ] [ "services" "kubernetes" "addons" "dashboard" "rbac" "enable" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
|
||||
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
|
||||
@ -59,6 +69,7 @@ with lib;
|
||||
(mkRenamedOptionModule [ "services" "statsd" "host" ] [ "services" "statsd" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "subsonic" "host" ] [ "services" "subsonic" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "relay" "port" ])
|
||||
(mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
|
||||
(mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
|
||||
|
||||
(mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
|
||||
|
167
nixos/modules/services/cluster/kubernetes/addon-manager.nix
Normal file
167
nixos/modules/services/cluster/kubernetes/addon-manager.nix
Normal file
@ -0,0 +1,167 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.addonManager;
|
||||
|
||||
isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
|
||||
|
||||
addons = pkgs.runCommand "kubernetes-addons" { } ''
|
||||
mkdir -p $out
|
||||
# since we are mounting the addons to the addon manager, they need to be copied
|
||||
${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
|
||||
pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
|
||||
) (cfg.addons))}
|
||||
'';
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.addonManager = with lib.types; {
|
||||
|
||||
bootstrapAddons = mkOption {
|
||||
description = ''
|
||||
Bootstrap addons are like regular addons, but they are applied with cluster-admin rigths.
|
||||
They are applied at addon-manager startup only.
|
||||
'';
|
||||
default = { };
|
||||
type = attrsOf attrs;
|
||||
example = literalExample ''
|
||||
{
|
||||
"my-service" = {
|
||||
"apiVersion" = "v1";
|
||||
"kind" = "Service";
|
||||
"metadata" = {
|
||||
"name" = "my-service";
|
||||
"namespace" = "default";
|
||||
};
|
||||
"spec" = { ... };
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
addons = mkOption {
|
||||
description = "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
|
||||
default = { };
|
||||
type = attrsOf (either attrs (listOf attrs));
|
||||
example = literalExample ''
|
||||
{
|
||||
"my-service" = {
|
||||
"apiVersion" = "v1";
|
||||
"kind" = "Service";
|
||||
"metadata" = {
|
||||
"name" = "my-service";
|
||||
"namespace" = "default";
|
||||
};
|
||||
"spec" = { ... };
|
||||
};
|
||||
}
|
||||
// import <nixpkgs/nixos/modules/services/cluster/kubernetes/dashboard.nix> { cfg = config.services.kubernetes; };
|
||||
'';
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
environment.etc."kubernetes/addons".source = "${addons}/";
|
||||
|
||||
systemd.services.kube-addon-manager = {
|
||||
description = "Kubernetes addon manager";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
environment.ADDON_PATH = "/etc/kubernetes/addons/";
|
||||
path = [ pkgs.gawk ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = "${top.package}/bin/kube-addons";
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 10;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
|
||||
(let
|
||||
name = system:kube-addon-manager;
|
||||
namespace = "kube-system";
|
||||
in
|
||||
{
|
||||
|
||||
kube-addon-manager-r = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "Role";
|
||||
metadata = {
|
||||
inherit name namespace;
|
||||
};
|
||||
rules = [{
|
||||
apiGroups = ["*"];
|
||||
resources = ["*"];
|
||||
verbs = ["*"];
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-rb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "RoleBinding";
|
||||
metadata = {
|
||||
inherit name namespace;
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "Role";
|
||||
inherit name;
|
||||
};
|
||||
subjects = [{
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "User";
|
||||
inherit name;
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-cluster-lister-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
rules = [{
|
||||
apiGroups = ["*"];
|
||||
resources = ["*"];
|
||||
verbs = ["list"];
|
||||
}];
|
||||
};
|
||||
|
||||
kube-addon-manager-cluster-lister-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "${name}:cluster-lister";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
inherit name;
|
||||
}];
|
||||
};
|
||||
});
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
addonManager = top.lib.mkCert {
|
||||
name = "kube-addon-manager";
|
||||
CN = "system:kube-addon-manager";
|
||||
action = "systemctl restart kube-addon-manager.service";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
@ -8,6 +8,13 @@ in {
|
||||
options.services.kubernetes.addons.dashboard = {
|
||||
enable = mkEnableOption "kubernetes dashboard addon";
|
||||
|
||||
extraArgs = mkOption {
|
||||
description = "Extra arguments to append to the dashboard cmdline";
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = ["--enable-skip-login"];
|
||||
};
|
||||
|
||||
rbac = mkOption {
|
||||
description = "Role-based access control (RBAC) options";
|
||||
default = {};
|
||||
@ -31,7 +38,7 @@ in {
|
||||
version = mkOption {
|
||||
description = "Which version of the kubernetes dashboard to deploy";
|
||||
type = types.str;
|
||||
default = "v1.8.3";
|
||||
default = "v1.10.1";
|
||||
};
|
||||
|
||||
image = mkOption {
|
||||
@ -39,9 +46,9 @@ in {
|
||||
type = types.attrs;
|
||||
default = {
|
||||
imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
|
||||
imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
|
||||
imageDigest = "sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747";
|
||||
finalImageTag = cfg.version;
|
||||
sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
|
||||
sha256 = "01xrr4pwgr2hcjrjsi3d14ifpzdfbxzqpzxbk2fkbjb9zkv38zxy";
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -99,7 +106,7 @@ in {
|
||||
memory = "100Mi";
|
||||
};
|
||||
};
|
||||
args = ["--auto-generate-certificates"];
|
||||
args = ["--auto-generate-certificates"] ++ cfg.extraArgs;
|
||||
volumeMounts = [{
|
||||
name = "tmp-volume";
|
||||
mountPath = "/tmp";
|
@ -3,7 +3,7 @@
|
||||
with lib;
|
||||
|
||||
let
|
||||
version = "1.2.5";
|
||||
version = "1.3.1";
|
||||
cfg = config.services.kubernetes.addons.dns;
|
||||
ports = {
|
||||
dns = 10053;
|
||||
@ -43,9 +43,9 @@ in {
|
||||
type = types.attrs;
|
||||
default = {
|
||||
imageName = "coredns/coredns";
|
||||
imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
|
||||
imageDigest = "sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4";
|
||||
finalImageTag = version;
|
||||
sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
|
||||
sha256 = "0vbylgyxv2jm2mnzk6f28jbsj305zsxmx3jr6ngjq461czcl5fi5";
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -54,21 +54,7 @@ in {
|
||||
services.kubernetes.kubelet.seedDockerImages =
|
||||
singleton (pkgs.dockerTools.pullImage cfg.coredns);
|
||||
|
||||
services.kubernetes.addonManager.addons = {
|
||||
coredns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = {
|
||||
coredns-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
@ -123,6 +109,22 @@ in {
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.addons = {
|
||||
coredns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
coredns-cm = {
|
||||
apiVersion = "v1";
|
428
nixos/modules/services/cluster/kubernetes/apiserver.nix
Normal file
428
nixos/modules/services/cluster/kubernetes/apiserver.nix
Normal file
@ -0,0 +1,428 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.apiserver;
|
||||
|
||||
isRBACEnabled = elem "RBAC" cfg.authorizationMode;
|
||||
|
||||
apiserverServiceIP = (concatStringsSep "." (
|
||||
take 3 (splitString "." cfg.serviceClusterIpRange
|
||||
)) + ".1");
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.apiserver = with lib.types; {
|
||||
|
||||
advertiseAddress = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver IP address on which to advertise the apiserver
|
||||
to members of the cluster. This address must be reachable by the rest
|
||||
of the cluster.
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow privileged containers on Kubernetes.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
authorizationMode = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||
'';
|
||||
default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
|
||||
type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
|
||||
};
|
||||
|
||||
authorizationPolicy = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver authorization policy file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||
'';
|
||||
default = [];
|
||||
type = listOf attrs;
|
||||
};
|
||||
|
||||
basicAuthFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver basic authentication file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = ''
|
||||
The IP address on which to listen for the --secure-port port.
|
||||
The associated interface(s) must be reachable by the rest
|
||||
of the cluster, and by CLI/web clients.
|
||||
'';
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client auth.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
disableAdmissionPlugins = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to disable. See
|
||||
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||
'';
|
||||
default = [];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes apiserver";
|
||||
|
||||
enableAdmissionPlugins = mkOption {
|
||||
description = ''
|
||||
Kubernetes admission control plugins to enable. See
|
||||
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||
'';
|
||||
default = [
|
||||
"NamespaceLifecycle" "LimitRanger" "ServiceAccount"
|
||||
"ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
|
||||
"NodeRestriction"
|
||||
];
|
||||
example = [
|
||||
"NamespaceLifecycle" "NamespaceExists" "LimitRanger"
|
||||
"SecurityContextDeny" "ServiceAccount" "ResourceQuota"
|
||||
"PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
|
||||
];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
etcd = {
|
||||
servers = mkOption {
|
||||
description = "List of etcd servers.";
|
||||
default = ["http://127.0.0.1:2379"];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
description = "Etcd key file.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
certFile = mkOption {
|
||||
description = "Etcd cert file.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
caFile = mkOption {
|
||||
description = "Etcd ca file.";
|
||||
default = top.caFile;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes apiserver extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
extraSANs = mkOption {
|
||||
description = "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
|
||||
default = [];
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
insecureBindAddress = mkOption {
|
||||
description = "The IP address on which to serve the --insecure-port.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = "Kubernetes apiserver insecure listening port. (0 = disabled)";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeletClientCaFile = mkOption {
|
||||
description = "Path to a cert file for connecting to kubelet.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletClientCertFile = mkOption {
|
||||
description = "Client certificate to use for connections to kubelet.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletClientKeyFile = mkOption {
|
||||
description = "Key to use for connections to kubelet.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
kubeletHttps = mkOption {
|
||||
description = "Whether to use https for connections to kubelet.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
runtimeConfig = mkOption {
|
||||
description = ''
|
||||
Api runtime configuration. See
|
||||
<link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
|
||||
'';
|
||||
default = "authentication.k8s.io/v1beta1=true";
|
||||
example = "api/all=false,api/v1=true";
|
||||
type = str;
|
||||
};
|
||||
|
||||
storageBackend = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver storage backend.
|
||||
'';
|
||||
default = "etcd3";
|
||||
type = enum ["etcd2" "etcd3"];
|
||||
};
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes apiserver secure port.";
|
||||
default = 6443;
|
||||
type = int;
|
||||
};
|
||||
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
||||
used to verify ServiceAccount tokens. By default tls private key file
|
||||
is used.
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
serviceClusterIpRange = mkOption {
|
||||
description = ''
|
||||
A CIDR notation IP range from which to assign service cluster IPs.
|
||||
This must not overlap with any IP ranges assigned to nodes for pods.
|
||||
'';
|
||||
default = "10.0.0.0/24";
|
||||
type = str;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "Kubernetes apiserver certificate file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "Kubernetes apiserver private key file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tokenAuthFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver token authentication file. See
|
||||
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
webhookConfig = mkOption {
|
||||
description = ''
|
||||
Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
|
||||
See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
|
||||
(mkIf cfg.enable {
|
||||
systemd.services.kube-apiserver = {
|
||||
description = "Kubernetes APIServer Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-apiserver \
|
||||
--allow-privileged=${boolToString cfg.allowPrivileged} \
|
||||
--authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
|
||||
${optionalString (elem "ABAC" cfg.authorizationMode)
|
||||
"--authorization-policy-file=${
|
||||
pkgs.writeText "kube-auth-policy.jsonl"
|
||||
(concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
|
||||
}"
|
||||
} \
|
||||
${optionalString (elem "Webhook" cfg.authorizationMode)
|
||||
"--authorization-webhook-config-file=${cfg.webhookConfig}"
|
||||
} \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (cfg.advertiseAddress != null)
|
||||
"--advertise-address=${cfg.advertiseAddress}"} \
|
||||
${optionalString (cfg.clientCaFile != null)
|
||||
"--client-ca-file=${cfg.clientCaFile}"} \
|
||||
--disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
|
||||
--enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
|
||||
--etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
|
||||
${optionalString (cfg.etcd.caFile != null)
|
||||
"--etcd-cafile=${cfg.etcd.caFile}"} \
|
||||
${optionalString (cfg.etcd.certFile != null)
|
||||
"--etcd-certfile=${cfg.etcd.certFile}"} \
|
||||
${optionalString (cfg.etcd.keyFile != null)
|
||||
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
${optionalString (cfg.basicAuthFile != null)
|
||||
"--basic-auth-file=${cfg.basicAuthFile}"} \
|
||||
--kubelet-https=${boolToString cfg.kubeletHttps} \
|
||||
${optionalString (cfg.kubeletClientCaFile != null)
|
||||
"--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
|
||||
${optionalString (cfg.kubeletClientCertFile != null)
|
||||
"--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
|
||||
${optionalString (cfg.kubeletClientKeyFile != null)
|
||||
"--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
|
||||
--insecure-bind-address=${cfg.insecureBindAddress} \
|
||||
--insecure-port=${toString cfg.insecurePort} \
|
||||
${optionalString (cfg.runtimeConfig != "")
|
||||
"--runtime-config=${cfg.runtimeConfig}"} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
${optionalString (cfg.serviceAccountKeyFile!=null)
|
||||
"--service-account-key-file=${cfg.serviceAccountKeyFile}"} \
|
||||
--service-cluster-ip-range=${cfg.serviceClusterIpRange} \
|
||||
--storage-backend=${cfg.storageBackend} \
|
||||
${optionalString (cfg.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (cfg.tokenAuthFile != null)
|
||||
"--token-auth-file=${cfg.tokenAuthFile}"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
AmbientCapabilities = "cap_net_bind_service";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
|
||||
services.etcd = {
|
||||
clientCertAuth = mkDefault true;
|
||||
peerClientCertAuth = mkDefault true;
|
||||
listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
|
||||
listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
|
||||
advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
|
||||
initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
|
||||
name = top.masterAddress;
|
||||
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
|
||||
};
|
||||
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
|
||||
|
||||
apiserver-kubelet-api-admin-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
name = "system:kube-apiserver:kubelet-api-admin";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:kubelet-api-admin";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
name = "system:kube-apiserver";
|
||||
}];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
apiServer = mkCert {
|
||||
name = "kube-apiserver";
|
||||
CN = "kubernetes";
|
||||
hosts = [
|
||||
"kubernetes.default.svc"
|
||||
"kubernetes.default.svc.${top.addons.dns.clusterDomain}"
|
||||
cfg.advertiseAddress
|
||||
top.masterAddress
|
||||
apiserverServiceIP
|
||||
"127.0.0.1"
|
||||
] ++ cfg.extraSANs;
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
apiserverKubeletClient = mkCert {
|
||||
name = "kube-apiserver-kubelet-client";
|
||||
CN = "system:kube-apiserver";
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
apiserverEtcdClient = mkCert {
|
||||
name = "kube-apiserver-etcd-client";
|
||||
CN = "etcd-client";
|
||||
action = "systemctl restart kube-apiserver.service";
|
||||
};
|
||||
clusterAdmin = mkCert {
|
||||
name = "cluster-admin";
|
||||
CN = "cluster-admin";
|
||||
fields = {
|
||||
O = "system:masters";
|
||||
};
|
||||
privateKeyOwner = "root";
|
||||
};
|
||||
etcd = mkCert {
|
||||
name = "etcd";
|
||||
CN = top.masterAddress;
|
||||
hosts = [
|
||||
"etcd.local"
|
||||
"etcd.${top.addons.dns.clusterDomain}"
|
||||
top.masterAddress
|
||||
cfg.advertiseAddress
|
||||
];
|
||||
privateKeyOwner = "etcd";
|
||||
action = "systemctl restart etcd.service";
|
||||
};
|
||||
};
|
||||
|
||||
})
|
||||
|
||||
];
|
||||
|
||||
}
|
162
nixos/modules/services/cluster/kubernetes/controller-manager.nix
Normal file
162
nixos/modules/services/cluster/kubernetes/controller-manager.nix
Normal file
@ -0,0 +1,162 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.controllerManager;
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.controllerManager = with lib.types; {
|
||||
|
||||
allocateNodeCIDRs = mkOption {
|
||||
description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = "Kubernetes controller manager listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clusterCidr = mkOption {
|
||||
description = "Kubernetes CIDR Range for Pods in cluster.";
|
||||
default = top.clusterCidr;
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes controller manager.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes controller manager extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = "Kubernetes controller manager insecure listening port.";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
|
||||
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop.";
|
||||
type = bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
rootCaFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager certificate authority file included in
|
||||
service account's token secret.
|
||||
'';
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
securePort = mkOption {
|
||||
description = "Kubernetes controller manager secure listening port.";
|
||||
default = 10252;
|
||||
type = int;
|
||||
};
|
||||
|
||||
serviceAccountKeyFile = mkOption {
|
||||
description = ''
|
||||
Kubernetes controller manager PEM-encoded private RSA key file used to
|
||||
sign service account tokens
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "Kubernetes controller-manager certificate file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "Kubernetes controller-manager private key file.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-controller-manager = {
|
||||
description = "Kubernetes Controller Manager Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
RestartSec = "30s";
|
||||
Restart = "on-failure";
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-controller-manager \
|
||||
--allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (cfg.clusterCidr!=null)
|
||||
"--cluster-cidr=${cfg.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
${optionalString (cfg.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.rootCaFile}"} \
|
||||
--port=${toString cfg.insecurePort} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
${optionalString (cfg.serviceAccountKeyFile!=null)
|
||||
"--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
|
||||
${optionalString (cfg.tlsCertFile!=null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile!=null)
|
||||
"--tls-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (elem "RBAC" top.apiserver.authorizationMode)
|
||||
"--use-service-account-credentials"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
path = top.path;
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
controllerManager = mkCert {
|
||||
name = "kube-controller-manager";
|
||||
CN = "kube-controller-manager";
|
||||
action = "systemctl restart kube-controller-manager.service";
|
||||
};
|
||||
controllerManagerClient = mkCert {
|
||||
name = "kube-controller-manager-client";
|
||||
CN = "system:kube-controller-manager";
|
||||
action = "systemctl restart kube-controller-manager.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
File diff suppressed because it is too large
Load Diff
134
nixos/modules/services/cluster/kubernetes/flannel.nix
Normal file
134
nixos/modules/services/cluster/kubernetes/flannel.nix
Normal file
@ -0,0 +1,134 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.flannel;
|
||||
|
||||
# we want flannel to use kubernetes itself as configuration backend, not direct etcd
|
||||
storageBackend = "kubernetes";
|
||||
|
||||
# needed for flannel to pass options to docker
|
||||
mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh
|
||||
|
||||
# bashInteractive needed for `compgen`
|
||||
makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh"
|
||||
'';
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.flannel = {
|
||||
enable = mkEnableOption "enable flannel networking";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
services.flannel = {
|
||||
|
||||
enable = mkDefault true;
|
||||
network = mkDefault top.clusterCidr;
|
||||
inherit storageBackend;
|
||||
nodeName = config.services.kubernetes.kubelet.hostname;
|
||||
};
|
||||
|
||||
services.kubernetes.kubelet = {
|
||||
networkPlugin = mkDefault "cni";
|
||||
cni.config = mkDefault [{
|
||||
name = "mynet";
|
||||
type = "flannel";
|
||||
delegate = {
|
||||
isDefaultGateway = true;
|
||||
bridge = "docker0";
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
systemd.services."mk-docker-opts" = {
|
||||
description = "Pre-Docker Actions";
|
||||
path = with pkgs; [ gawk gnugrep ];
|
||||
script = ''
|
||||
${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
|
||||
systemctl restart docker
|
||||
'';
|
||||
serviceConfig.Type = "oneshot";
|
||||
};
|
||||
|
||||
systemd.paths."flannel-subnet-env" = {
|
||||
wantedBy = [ "flannel.service" ];
|
||||
pathConfig = {
|
||||
PathModified = "/run/flannel/subnet.env";
|
||||
Unit = "mk-docker-opts.service";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.docker = {
|
||||
environment.DOCKER_OPTS = "-b none";
|
||||
serviceConfig.EnvironmentFile = "-/run/flannel/docker";
|
||||
};
|
||||
|
||||
# read environment variables generated by mk-docker-opts
|
||||
virtualisation.docker.extraOptions = "$DOCKER_OPTS";
|
||||
|
||||
networking = {
|
||||
firewall.allowedUDPPorts = [
|
||||
8285 # flannel udp
|
||||
8472 # flannel vxlan
|
||||
];
|
||||
dhcpcd.denyInterfaces = [ "docker*" "flannel*" ];
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
flannelClient = top.lib.mkCert {
|
||||
name = "flannel-client";
|
||||
CN = "flannel-client";
|
||||
action = "systemctl restart flannel.service";
|
||||
};
|
||||
};
|
||||
|
||||
# give flannel som kubernetes rbac permissions if applicable
|
||||
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
|
||||
|
||||
flannel-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
metadata = { name = "flannel"; };
|
||||
rules = [{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "pods" ];
|
||||
verbs = [ "get" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes" ];
|
||||
verbs = [ "list" "watch" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes/status" ];
|
||||
verbs = [ "patch" ];
|
||||
}];
|
||||
};
|
||||
|
||||
flannel-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = { name = "flannel"; };
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "flannel";
|
||||
};
|
||||
subjects = [{
|
||||
kind = "User";
|
||||
name = "flannel-client";
|
||||
}];
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
}
|
358
nixos/modules/services/cluster/kubernetes/kubelet.nix
Normal file
358
nixos/modules/services/cluster/kubernetes/kubelet.nix
Normal file
@ -0,0 +1,358 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.kubelet;
|
||||
|
||||
cniConfig =
|
||||
if cfg.cni.config != [] && !(isNull cfg.cni.configDir) then
|
||||
throw "Verbatim CNI-config and CNI configDir cannot both be set."
|
||||
else if !(isNull cfg.cni.configDir) then
|
||||
cfg.cni.configDir
|
||||
else
|
||||
(pkgs.buildEnv {
|
||||
name = "kubernetes-cni-config";
|
||||
paths = imap (i: entry:
|
||||
pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
|
||||
) cfg.cni.config;
|
||||
});
|
||||
|
||||
infraContainer = pkgs.dockerTools.buildImage {
|
||||
name = "pause";
|
||||
tag = "latest";
|
||||
contents = top.package.pause;
|
||||
config.Cmd = "/bin/pause";
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
|
||||
|
||||
manifests = pkgs.buildEnv {
|
||||
name = "kubernetes-manifests";
|
||||
paths = mapAttrsToList (name: manifest:
|
||||
pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest)
|
||||
) cfg.manifests;
|
||||
};
|
||||
|
||||
manifestPath = "kubernetes/manifests";
|
||||
|
||||
taintOptions = with lib.types; { name, ... }: {
|
||||
options = {
|
||||
key = mkOption {
|
||||
description = "Key of taint.";
|
||||
default = name;
|
||||
type = str;
|
||||
};
|
||||
value = mkOption {
|
||||
description = "Value of taint.";
|
||||
type = str;
|
||||
};
|
||||
effect = mkOption {
|
||||
description = "Effect of taint.";
|
||||
example = "NoSchedule";
|
||||
type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.kubelet = with lib.types; {
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes kubelet info server listening address.";
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
allowPrivileged = mkOption {
|
||||
description = "Whether to allow Kubernetes containers to request privileged mode.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
clusterDns = mkOption {
|
||||
description = "Use alternative DNS.";
|
||||
default = "10.1.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
clusterDomain = mkOption {
|
||||
description = "Use alternative domain.";
|
||||
default = config.services.kubernetes.addons.dns.clusterDomain;
|
||||
type = str;
|
||||
};
|
||||
|
||||
clientCaFile = mkOption {
|
||||
description = "Kubernetes apiserver CA file for client authentication.";
|
||||
default = top.caFile;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
cni = {
|
||||
packages = mkOption {
|
||||
description = "List of network plugin packages to install.";
|
||||
type = listOf package;
|
||||
default = [];
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
description = "Kubernetes CNI configuration.";
|
||||
type = listOf attrs;
|
||||
default = [];
|
||||
example = literalExample ''
|
||||
[{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.22.0.0/16",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
} {
|
||||
"cniVersion": "0.2.0",
|
||||
"type": "loopback"
|
||||
}]
|
||||
'';
|
||||
};
|
||||
|
||||
configDir = mkOption {
|
||||
description = "Path to Kubernetes CNI configuration directory.";
|
||||
type = nullOr path;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Kubernetes kubelet.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes kubelet extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
healthz = {
|
||||
bind = mkOption {
|
||||
description = "Kubernetes kubelet healthz listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet healthz port.";
|
||||
default = 10248;
|
||||
type = int;
|
||||
};
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
description = "Kubernetes kubelet hostname override.";
|
||||
default = config.networking.hostName;
|
||||
type = str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
|
||||
|
||||
manifests = mkOption {
|
||||
description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
|
||||
type = attrsOf attrs;
|
||||
default = {};
|
||||
};
|
||||
|
||||
networkPlugin = mkOption {
|
||||
description = "Network plugin to use by Kubernetes.";
|
||||
type = nullOr (enum ["cni" "kubenet"]);
|
||||
default = "kubenet";
|
||||
};
|
||||
|
||||
nodeIp = mkOption {
|
||||
description = "IP address of the node. If set, kubelet will use this IP address for the node.";
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
registerNode = mkOption {
|
||||
description = "Whether to auto register kubelet with API server.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes kubelet info server listening port.";
|
||||
default = 10250;
|
||||
type = int;
|
||||
};
|
||||
|
||||
seedDockerImages = mkOption {
|
||||
description = "List of docker images to preload on system";
|
||||
default = [];
|
||||
type = listOf package;
|
||||
};
|
||||
|
||||
taints = mkOption {
|
||||
description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
|
||||
default = {};
|
||||
type = attrsOf (submodule [ taintOptions ]);
|
||||
};
|
||||
|
||||
tlsCertFile = mkOption {
|
||||
description = "File containing x509 Certificate for HTTPS.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
tlsKeyFile = mkOption {
|
||||
description = "File containing x509 private key matching tlsCertFile.";
|
||||
default = null;
|
||||
type = nullOr path;
|
||||
};
|
||||
|
||||
unschedulable = mkOption {
|
||||
description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
|
||||
default = false;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkMerge [
|
||||
(mkIf cfg.enable {
|
||||
services.kubernetes.kubelet.seedDockerImages = [infraContainer];
|
||||
|
||||
systemd.services.kubelet = {
|
||||
description = "Kubernetes Kubelet Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "network.target" "docker.service" "kube-apiserver.service" ];
|
||||
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
|
||||
preStart = ''
|
||||
${concatMapStrings (img: ''
|
||||
echo "Seeding docker image: ${img}"
|
||||
docker load <${img}
|
||||
'') cfg.seedDockerImages}
|
||||
|
||||
rm /opt/cni/bin/* || true
|
||||
${concatMapStrings (package: ''
|
||||
echo "Linking cni package: ${package}"
|
||||
ln -fs ${package}/bin/* /opt/cni/bin
|
||||
'') cfg.cni.packages}
|
||||
'';
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
CPUAccounting = true;
|
||||
MemoryAccounting = true;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "1000ms";
|
||||
ExecStart = ''${top.package}/bin/kubelet \
|
||||
--address=${cfg.address} \
|
||||
--allow-privileged=${boolToString cfg.allowPrivileged} \
|
||||
--authentication-token-webhook \
|
||||
--authentication-token-webhook-cache-ttl="10s" \
|
||||
--authorization-mode=Webhook \
|
||||
${optionalString (cfg.clientCaFile != null)
|
||||
"--client-ca-file=${cfg.clientCaFile}"} \
|
||||
${optionalString (cfg.clusterDns != "")
|
||||
"--cluster-dns=${cfg.clusterDns}"} \
|
||||
${optionalString (cfg.clusterDomain != "")
|
||||
"--cluster-domain=${cfg.clusterDomain}"} \
|
||||
--cni-conf-dir=${cniConfig} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--hairpin-mode=hairpin-veth \
|
||||
--healthz-bind-address=${cfg.healthz.bind} \
|
||||
--healthz-port=${toString cfg.healthz.port} \
|
||||
--hostname-override=${cfg.hostname} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
${optionalString (cfg.networkPlugin != null)
|
||||
"--network-plugin=${cfg.networkPlugin}"} \
|
||||
${optionalString (cfg.nodeIp != null)
|
||||
"--node-ip=${cfg.nodeIp}"} \
|
||||
--pod-infra-container-image=pause \
|
||||
${optionalString (cfg.manifests != {})
|
||||
"--pod-manifest-path=/etc/${manifestPath}"} \
|
||||
--port=${toString cfg.port} \
|
||||
--register-node=${boolToString cfg.registerNode} \
|
||||
${optionalString (taints != "")
|
||||
"--register-with-taints=${taints}"} \
|
||||
--root-dir=${top.dataDir} \
|
||||
${optionalString (cfg.tlsCertFile != null)
|
||||
"--tls-cert-file=${cfg.tlsCertFile}"} \
|
||||
${optionalString (cfg.tlsKeyFile != null)
|
||||
"--tls-private-key-file=${cfg.tlsKeyFile}"} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
};
|
||||
};
|
||||
|
||||
# Allways include cni plugins
|
||||
services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
|
||||
|
||||
boot.kernelModules = ["br_netfilter"];
|
||||
|
||||
services.kubernetes.kubelet.hostname = with config.networking;
|
||||
mkDefault (hostName + optionalString (!isNull domain) ".${domain}");
|
||||
|
||||
services.kubernetes.pki.certs = with top.lib; {
|
||||
kubelet = mkCert {
|
||||
name = "kubelet";
|
||||
CN = top.kubelet.hostname;
|
||||
action = "systemctl restart kubelet.service";
|
||||
|
||||
};
|
||||
kubeletClient = mkCert {
|
||||
name = "kubelet-client";
|
||||
CN = "system:node:${top.kubelet.hostname}";
|
||||
fields = {
|
||||
O = "system:nodes";
|
||||
};
|
||||
action = "systemctl restart kubelet.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
})
|
||||
|
||||
(mkIf (cfg.enable && cfg.manifests != {}) {
|
||||
environment.etc = mapAttrs' (name: manifest:
|
||||
nameValuePair "${manifestPath}/${name}.json" {
|
||||
text = builtins.toJSON manifest;
|
||||
mode = "0755";
|
||||
}
|
||||
) cfg.manifests;
|
||||
})
|
||||
|
||||
(mkIf (cfg.unschedulable && cfg.enable) {
|
||||
services.kubernetes.kubelet.taints.unschedulable = {
|
||||
value = "true";
|
||||
effect = "NoSchedule";
|
||||
};
|
||||
})
|
||||
|
||||
];
|
||||
}
|
388
nixos/modules/services/cluster/kubernetes/pki.nix
Normal file
388
nixos/modules/services/cluster/kubernetes/pki.nix
Normal file
@ -0,0 +1,388 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.pki;
|
||||
|
||||
csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
names = singleton cfg.caSpec;
|
||||
});
|
||||
|
||||
csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
CN = top.masterAddress;
|
||||
});
|
||||
|
||||
cfsslAPITokenBaseName = "apitoken.secret";
|
||||
cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
|
||||
certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
|
||||
cfsslAPITokenLength = 32;
|
||||
|
||||
clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
|
||||
top.lib.mkKubeConfig "cluster-admin" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
|
||||
remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.pki = with lib.types; {
|
||||
|
||||
enable = mkEnableOption "Whether to enable easyCert issuer service.";
|
||||
|
||||
certs = mkOption {
|
||||
description = "List of certificate specs to feed to cert generator.";
|
||||
default = {};
|
||||
type = attrs;
|
||||
};
|
||||
|
||||
genCfsslCACert = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl CA certificate and key,
|
||||
if they don't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
genCfsslAPICerts = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl API webserver TLS cert and key,
|
||||
if they don't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
genCfsslAPIToken = mkOption {
|
||||
description = ''
|
||||
Whether to automatically generate cfssl API-token secret,
|
||||
if they doesn't exist.
|
||||
'';
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
pkiTrustOnBootstrap = mkOption {
|
||||
description = "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
|
||||
default = true;
|
||||
type = bool;
|
||||
};
|
||||
|
||||
caCertPathPrefix = mkOption {
|
||||
description = ''
|
||||
Path-prefrix for the CA-certificate to be used for cfssl signing.
|
||||
Suffixes ".pem" and "-key.pem" will be automatically appended for
|
||||
the public and private keys respectively.
|
||||
'';
|
||||
default = "${config.services.cfssl.dataDir}/ca";
|
||||
type = str;
|
||||
};
|
||||
|
||||
caSpec = mkOption {
|
||||
description = "Certificate specification for the auto-generated CAcert.";
|
||||
default = {
|
||||
CN = "kubernetes-cluster-ca";
|
||||
O = "NixOS";
|
||||
OU = "services.kubernetes.pki.caSpec";
|
||||
L = "auto-generated";
|
||||
};
|
||||
type = attrs;
|
||||
};
|
||||
|
||||
etcClusterAdminKubeconfig = mkOption {
|
||||
description = ''
|
||||
Symlink a kubeconfig with cluster-admin privileges to environment path
|
||||
(/etc/<path>).
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable
|
||||
(let
|
||||
cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
|
||||
cfsslCert = "${cfsslCertPathPrefix}.pem";
|
||||
cfsslKey = "${cfsslCertPathPrefix}-key.pem";
|
||||
in
|
||||
{
|
||||
|
||||
services.cfssl = mkIf (top.apiserver.enable) {
|
||||
enable = true;
|
||||
address = "0.0.0.0";
|
||||
tlsCert = cfsslCert;
|
||||
tlsKey = cfsslKey;
|
||||
configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
|
||||
signing = {
|
||||
profiles = {
|
||||
default = {
|
||||
usages = ["digital signature"];
|
||||
auth_key = "default";
|
||||
expiry = "720h";
|
||||
};
|
||||
};
|
||||
};
|
||||
auth_keys = {
|
||||
default = {
|
||||
type = "standard";
|
||||
key = "file:${cfsslAPITokenPath}";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
|
||||
(concatStringsSep "\n" [
|
||||
"set -e"
|
||||
(optionalString cfg.genCfsslCACert ''
|
||||
if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
|
||||
${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
|
||||
${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
|
||||
fi
|
||||
'')
|
||||
(optionalString cfg.genCfsslAPICerts ''
|
||||
if [ ! -f "${dataDir}/cfssl.pem" ]; then
|
||||
${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
|
||||
${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
|
||||
fi
|
||||
'')
|
||||
(optionalString cfg.genCfsslAPIToken ''
|
||||
if [ ! -f "${cfsslAPITokenPath}" ]; then
|
||||
head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
|
||||
fi
|
||||
chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
|
||||
'')]);
|
||||
|
||||
systemd.services.kube-certmgr-bootstrap = {
|
||||
description = "Kubernetes certmgr bootstrapper";
|
||||
wantedBy = [ "certmgr.service" ];
|
||||
after = [ "cfssl.target" ];
|
||||
script = concatStringsSep "\n" [''
|
||||
set -e
|
||||
|
||||
# If there's a cfssl (cert issuer) running locally, then don't rely on user to
|
||||
# manually paste it in place. Just symlink.
|
||||
# otherwise, create the target file, ready for users to insert the token
|
||||
|
||||
if [ -f "${cfsslAPITokenPath}" ]; then
|
||||
ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
|
||||
else
|
||||
touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
|
||||
fi
|
||||
''
|
||||
(optionalString (cfg.pkiTrustOnBootstrap) ''
|
||||
if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
|
||||
${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
|
||||
${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
|
||||
fi
|
||||
'')
|
||||
];
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
services.certmgr = {
|
||||
enable = true;
|
||||
package = pkgs.certmgr-selfsigned;
|
||||
svcManager = "command";
|
||||
specs =
|
||||
let
|
||||
mkSpec = _: cert: {
|
||||
inherit (cert) action;
|
||||
authority = {
|
||||
inherit remote;
|
||||
file.path = cert.caCert;
|
||||
root_ca = cert.caCert;
|
||||
profile = "default";
|
||||
auth_key_file = certmgrAPITokenPath;
|
||||
};
|
||||
certificate = {
|
||||
path = cert.cert;
|
||||
};
|
||||
private_key = cert.privateKeyOptions;
|
||||
request = {
|
||||
inherit (cert) CN hosts;
|
||||
key = {
|
||||
algo = "rsa";
|
||||
size = 2048;
|
||||
};
|
||||
names = [ cert.fields ];
|
||||
};
|
||||
};
|
||||
in
|
||||
mapAttrs mkSpec cfg.certs;
|
||||
};
|
||||
|
||||
#TODO: Get rid of kube-addon-manager in the future for the following reasons
|
||||
# - it is basically just a shell script wrapped around kubectl
|
||||
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
|
||||
# - it is designed to be used with k8s system components only
|
||||
# - it would be better with a more Nix-oriented way of managing addons
|
||||
systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
|
||||
environment.KUBECONFIG = with cfg.certs.addonManager;
|
||||
top.lib.mkKubeConfig "addon-manager" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
}
|
||||
|
||||
(optionalAttrs (top.addonManager.bootstrapAddons != {}) {
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
preStart = with pkgs;
|
||||
let
|
||||
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
|
||||
top.addonManager.bootstrapAddons;
|
||||
in
|
||||
''
|
||||
export KUBECONFIG=${clusterAdminKubeconfig}
|
||||
${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
'';
|
||||
})]);
|
||||
|
||||
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
|
||||
clusterAdminKubeconfig;
|
||||
|
||||
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
|
||||
(pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
|
||||
set -e
|
||||
exec 1>&2
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
echo "Usage: $(basename $0)"
|
||||
echo ""
|
||||
echo "No args. Apitoken must be provided on stdin."
|
||||
echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(id -u) != 0 ]; then
|
||||
echo "Run as root please."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
read -r token
|
||||
if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
|
||||
echo "Token must be of length ${toString cfsslAPITokenLength}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $token > ${certmgrAPITokenPath}
|
||||
chmod 600 ${certmgrAPITokenPath}
|
||||
|
||||
echo "Restarting certmgr..." >&1
|
||||
systemctl restart certmgr
|
||||
|
||||
echo "Waiting for certs to appear..." >&1
|
||||
|
||||
${optionalString top.kubelet.enable ''
|
||||
while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
|
||||
echo "Restarting kubelet..." >&1
|
||||
systemctl restart kubelet
|
||||
''}
|
||||
|
||||
${optionalString top.proxy.enable ''
|
||||
while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting kube-proxy..." >&1
|
||||
systemctl restart kube-proxy
|
||||
''}
|
||||
|
||||
${optionalString top.flannel.enable ''
|
||||
while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
|
||||
echo "Restarting flannel..." >&1
|
||||
systemctl restart flannel
|
||||
''}
|
||||
|
||||
echo "Node joined succesfully"
|
||||
'')];
|
||||
|
||||
# isolate etcd on loopback at the master node
|
||||
# easyCerts doesn't support multimaster clusters anyway atm.
|
||||
services.etcd = with cfg.certs.etcd; {
|
||||
listenClientUrls = ["https://127.0.0.1:2379"];
|
||||
listenPeerUrls = ["https://127.0.0.1:2380"];
|
||||
advertiseClientUrls = ["https://etcd.local:2379"];
|
||||
initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd.local:2380"];
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
trustedCaFile = mkDefault caCert;
|
||||
};
|
||||
networking.extraHosts = mkIf (config.services.etcd.enable) ''
|
||||
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
|
||||
'';
|
||||
|
||||
services.flannel = with cfg.certs.flannelClient; {
|
||||
kubeconfig = top.lib.mkKubeConfig "flannel" {
|
||||
server = top.apiserverAddress;
|
||||
certFile = cert;
|
||||
keyFile = key;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes = {
|
||||
|
||||
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
|
||||
etcd = with cfg.certs.apiserverEtcdClient; {
|
||||
servers = ["https://etcd.local:2379"];
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
caFile = mkDefault caCert;
|
||||
};
|
||||
clientCaFile = mkDefault caCert;
|
||||
tlsCertFile = mkDefault cert;
|
||||
tlsKeyFile = mkDefault key;
|
||||
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
|
||||
kubeletClientCaFile = mkDefault caCert;
|
||||
kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
|
||||
kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
|
||||
});
|
||||
controllerManager = mkIf top.controllerManager.enable {
|
||||
serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
|
||||
rootCaFile = cfg.certs.controllerManagerClient.caCert;
|
||||
kubeconfig = with cfg.certs.controllerManagerClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
scheduler = mkIf top.scheduler.enable {
|
||||
kubeconfig = with cfg.certs.schedulerClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
kubelet = mkIf top.kubelet.enable {
|
||||
clientCaFile = mkDefault cfg.certs.kubelet.caCert;
|
||||
tlsCertFile = mkDefault cfg.certs.kubelet.cert;
|
||||
tlsKeyFile = mkDefault cfg.certs.kubelet.key;
|
||||
kubeconfig = with cfg.certs.kubeletClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
proxy = mkIf top.proxy.enable {
|
||||
kubeconfig = with cfg.certs.kubeProxyClient; {
|
||||
certFile = mkDefault cert;
|
||||
keyFile = mkDefault key;
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
80
nixos/modules/services/cluster/kubernetes/proxy.nix
Normal file
80
nixos/modules/services/cluster/kubernetes/proxy.nix
Normal file
@ -0,0 +1,80 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.proxy;
|
||||
in
|
||||
{
|
||||
|
||||
###### interface
|
||||
options.services.kubernetes.proxy = with lib.types; {
|
||||
|
||||
bindAddress = mkOption {
|
||||
description = "Kubernetes proxy listening address.";
|
||||
default = "0.0.0.0";
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes proxy.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes proxy extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-proxy = {
|
||||
description = "Kubernetes Proxy Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
path = with pkgs; [ iptables conntrack_tools ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-proxy \
|
||||
--bind-address=${cfg.bindAddress} \
|
||||
${optionalString (top.clusterCidr!=null)
|
||||
"--cluster-cidr=${top.clusterCidr}"} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
kubeProxyClient = top.lib.mkCert {
|
||||
name = "kube-proxy-client";
|
||||
CN = "system:kube-proxy";
|
||||
action = "systemctl restart kube-proxy.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
92
nixos/modules/services/cluster/kubernetes/scheduler.nix
Normal file
92
nixos/modules/services/cluster/kubernetes/scheduler.nix
Normal file
@ -0,0 +1,92 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
top = config.services.kubernetes;
|
||||
cfg = top.scheduler;
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
options.services.kubernetes.scheduler = with lib.types; {
|
||||
|
||||
address = mkOption {
|
||||
description = "Kubernetes scheduler listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
enable = mkEnableOption "Whether to enable Kubernetes scheduler.";
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Kubernetes scheduler extra command line options.";
|
||||
default = "";
|
||||
type = str;
|
||||
};
|
||||
|
||||
featureGates = mkOption {
|
||||
description = "List set of feature gates";
|
||||
default = top.featureGates;
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
|
||||
|
||||
leaderElect = mkOption {
|
||||
description = "Whether to start leader election before executing main loop.";
|
||||
type = bool;
|
||||
default = true;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Kubernetes scheduler listening port.";
|
||||
default = 10251;
|
||||
type = int;
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
description = ''
|
||||
Optional glog verbosity level for logging statements. See
|
||||
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
|
||||
'';
|
||||
default = null;
|
||||
type = nullOr int;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.kube-scheduler = {
|
||||
description = "Kubernetes Scheduler Service";
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kube-apiserver.service" ];
|
||||
serviceConfig = {
|
||||
Slice = "kubernetes.slice";
|
||||
ExecStart = ''${top.package}/bin/kube-scheduler \
|
||||
--address=${cfg.address} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
|
||||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
--port=${toString cfg.port} \
|
||||
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
|
||||
${cfg.extraOpts}
|
||||
'';
|
||||
WorkingDirectory = top.dataDir;
|
||||
User = "kubernetes";
|
||||
Group = "kubernetes";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs = {
|
||||
schedulerClient = top.lib.mkCert {
|
||||
name = "kube-scheduler-client";
|
||||
CN = "system:kube-scheduler";
|
||||
action = "systemctl restart kube-scheduler.service";
|
||||
};
|
||||
};
|
||||
|
||||
services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
|
||||
};
|
||||
}
|
34
nixos/modules/services/hardware/bolt.nix
Normal file
34
nixos/modules/services/hardware/bolt.nix
Normal file
@ -0,0 +1,34 @@
|
||||
# Thunderbolt 3 device manager
|
||||
|
||||
{ config, lib, pkgs, ...}:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
options = {
|
||||
|
||||
services.hardware.bolt = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable Bolt, a userspace daemon to enable
|
||||
security levels for Thunderbolt 3 on GNU/Linux.
|
||||
|
||||
Bolt is used by GNOME 3 to handle Thunderbolt settings.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf config.services.hardware.bolt.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.bolt ];
|
||||
services.udev.packages = [ pkgs.bolt ];
|
||||
systemd.packages = [ pkgs.bolt ];
|
||||
|
||||
};
|
||||
}
|
@ -22,7 +22,8 @@ let
|
||||
password = cfg.databasePassword;
|
||||
username = cfg.databaseUsername;
|
||||
encoding = "utf8";
|
||||
};
|
||||
pool = cfg.databasePool;
|
||||
} // cfg.extraDatabaseConfig;
|
||||
};
|
||||
|
||||
gitalyToml = pkgs.writeText "gitaly.toml" ''
|
||||
@ -253,6 +254,18 @@ in {
|
||||
description = "Gitlab database user.";
|
||||
};
|
||||
|
||||
databasePool = mkOption {
|
||||
type = types.int;
|
||||
default = 5;
|
||||
description = "Database connection pool size.";
|
||||
};
|
||||
|
||||
extraDatabaseConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
description = "Extra configuration in config/database.yml.";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = config.networking.hostName;
|
||||
|
87
nixos/modules/services/misc/headphones.nix
Normal file
87
nixos/modules/services/misc/headphones.nix
Normal file
@ -0,0 +1,87 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
name = "headphones";
|
||||
|
||||
cfg = config.services.headphones;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
services.headphones = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable the headphones server.";
|
||||
};
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/${name}";
|
||||
description = "Path where to store data files.";
|
||||
};
|
||||
configFile = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.dataDir}/config.ini";
|
||||
description = "Path to config file.";
|
||||
};
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = "Host to listen on.";
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.ints.u16;
|
||||
default = 8181;
|
||||
description = "Port to bind to.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = "User to run the service as";
|
||||
};
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = "Group to run the service as";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
users.users = optionalAttrs (cfg.user == name) (singleton {
|
||||
name = name;
|
||||
uid = config.ids.uids.headphones;
|
||||
group = cfg.group;
|
||||
description = "headphones user";
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
});
|
||||
|
||||
users.groups = optionalAttrs (cfg.group == name) (singleton {
|
||||
name = name;
|
||||
gid = config.ids.gids.headphones;
|
||||
});
|
||||
|
||||
systemd.services.headphones = {
|
||||
description = "Headphones Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = "${pkgs.headphones}/bin/headphones --datadir ${cfg.dataDir} --config ${cfg.configFile} --host ${cfg.host} --port ${toString cfg.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -4,11 +4,36 @@ with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.jackett;
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.jackett = {
|
||||
enable = mkEnableOption "Jackett";
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.str;
|
||||
default = "/var/lib/jackett/.config/Jackett";
|
||||
description = "The directory where Jackett stores its data files.";
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Open ports in the firewall for the Jackett web interface.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "jackett";
|
||||
description = "User account under which Jackett runs.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "jackett";
|
||||
description = "Group under which Jackett runs.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -18,30 +43,38 @@ in
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
preStart = ''
|
||||
test -d /var/lib/jackett/ || {
|
||||
echo "Creating jackett data directory in /var/lib/jackett/"
|
||||
mkdir -p /var/lib/jackett/
|
||||
test -d ${cfg.dataDir} || {
|
||||
echo "Creating jackett data directory in ${cfg.dataDir}"
|
||||
mkdir -p ${cfg.dataDir}
|
||||
}
|
||||
chown -R jackett:jackett /var/lib/jackett/
|
||||
chmod 0700 /var/lib/jackett/
|
||||
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
|
||||
chmod 0700 ${cfg.dataDir}
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = "jackett";
|
||||
Group = "jackett";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
PermissionsStartOnly = "true";
|
||||
ExecStart = "${pkgs.jackett}/bin/Jackett";
|
||||
ExecStart = "${pkgs.jackett}/bin/Jackett --NoUpdates --DataFolder '${cfg.dataDir}'";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.jackett = {
|
||||
uid = config.ids.uids.jackett;
|
||||
home = "/var/lib/jackett";
|
||||
group = "jackett";
|
||||
networking.firewall = mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = [ 9117 ];
|
||||
};
|
||||
users.groups.jackett.gid = config.ids.gids.jackett;
|
||||
|
||||
users.users = mkIf (cfg.user == "jackett") {
|
||||
jackett = {
|
||||
group = cfg.group;
|
||||
home = cfg.dataDir;
|
||||
uid = config.ids.uids.jackett;
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = mkIf (cfg.group == "jackett") {
|
||||
jackett.gid = config.ids.gids.jackett;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -4,11 +4,36 @@ with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.radarr;
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.radarr = {
|
||||
enable = mkEnableOption "Radarr";
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.str;
|
||||
default = "/var/lib/radarr/.config/Radarr";
|
||||
description = "The directory where Radarr stores its data files.";
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Open ports in the firewall for the Radarr web interface.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "radarr";
|
||||
description = "User account under which Radarr runs.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "radarr";
|
||||
description = "Group under which Radarr runs.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -18,30 +43,38 @@ in
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
preStart = ''
|
||||
test -d /var/lib/radarr/ || {
|
||||
echo "Creating radarr data directory in /var/lib/radarr/"
|
||||
mkdir -p /var/lib/radarr/
|
||||
test -d ${cfg.dataDir} || {
|
||||
echo "Creating radarr data directory in ${cfg.dataDir}"
|
||||
mkdir -p ${cfg.dataDir}
|
||||
}
|
||||
chown -R radarr:radarr /var/lib/radarr/
|
||||
chmod 0700 /var/lib/radarr/
|
||||
chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
|
||||
chmod 0700 ${cfg.dataDir}
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = "radarr";
|
||||
Group = "radarr";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
PermissionsStartOnly = "true";
|
||||
ExecStart = "${pkgs.radarr}/bin/Radarr";
|
||||
ExecStart = "${pkgs.radarr}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.radarr = {
|
||||
uid = config.ids.uids.radarr;
|
||||
home = "/var/lib/radarr";
|
||||
group = "radarr";
|
||||
networking.firewall = mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = [ 7878 ];
|
||||
};
|
||||
users.groups.radarr.gid = config.ids.gids.radarr;
|
||||
|
||||
users.users = mkIf (cfg.user == "radarr") {
|
||||
radarr = {
|
||||
group = cfg.group;
|
||||
home = cfg.dataDir;
|
||||
uid = config.ids.uids.radarr;
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = mkIf (cfg.group == "radarr") {
|
||||
radarr.gid = config.ids.gids.radarr;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -205,15 +205,13 @@ in {
|
||||
|
||||
mysql = lib.mkIf cfg.database.createLocally {
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
ensureUsers = {
|
||||
ensureUsers = [{
|
||||
name = cfg.database.username;
|
||||
ensurePermissions = [
|
||||
{ "${cfg.database.name}.*" = "ALL PRIVILEGES"; }
|
||||
];
|
||||
ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
|
||||
initialDatabases = [
|
||||
{ inherit (cfg.database) name; schema = "${pkg}/share/zoneminder/db/zm_create.sql"; }
|
||||
];
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
nginx = lib.mkIf useNginx {
|
||||
|
@ -153,7 +153,6 @@ in
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
|
||||
CapabilityBoundingSet = "cap_net_bind_service+=ep";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -73,11 +73,35 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
kubeconfig = mkOption {
|
||||
description = ''
|
||||
Path to kubeconfig to use for storing flannel config using the
|
||||
Kubernetes API
|
||||
'';
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
};
|
||||
|
||||
network = mkOption {
|
||||
description = " IPv4 network in CIDR format to use for the entire flannel network.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
nodeName = mkOption {
|
||||
description = ''
|
||||
Needed when running with Kubernetes as backend as this cannot be auto-detected";
|
||||
'';
|
||||
type = types.nullOr types.str;
|
||||
default = with config.networking; (hostName + optionalString (!isNull domain) ".${domain}");
|
||||
example = "node1.example.com";
|
||||
};
|
||||
|
||||
storageBackend = mkOption {
|
||||
description = "Determines where flannel stores its configuration at runtime";
|
||||
type = types.enum ["etcd" "kubernetes"];
|
||||
default = "etcd";
|
||||
};
|
||||
|
||||
subnetLen = mkOption {
|
||||
description = ''
|
||||
The size of the subnet allocated to each host. Defaults to 24 (i.e. /24)
|
||||
@ -122,17 +146,25 @@ in {
|
||||
after = [ "network.target" ];
|
||||
environment = {
|
||||
FLANNELD_PUBLIC_IP = cfg.publicIp;
|
||||
FLANNELD_IFACE = cfg.iface;
|
||||
} // optionalAttrs (cfg.storageBackend == "etcd") {
|
||||
FLANNELD_ETCD_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
|
||||
FLANNELD_ETCD_KEYFILE = cfg.etcd.keyFile;
|
||||
FLANNELD_ETCD_CERTFILE = cfg.etcd.certFile;
|
||||
FLANNELD_ETCD_CAFILE = cfg.etcd.caFile;
|
||||
FLANNELD_IFACE = cfg.iface;
|
||||
ETCDCTL_CERT_FILE = cfg.etcd.certFile;
|
||||
ETCDCTL_KEY_FILE = cfg.etcd.keyFile;
|
||||
ETCDCTL_CA_FILE = cfg.etcd.caFile;
|
||||
ETCDCTL_PEERS = concatStringsSep "," cfg.etcd.endpoints;
|
||||
} // optionalAttrs (cfg.storageBackend == "kubernetes") {
|
||||
FLANNELD_KUBE_SUBNET_MGR = "true";
|
||||
FLANNELD_KUBECONFIG_FILE = cfg.kubeconfig;
|
||||
NODE_NAME = cfg.nodeName;
|
||||
};
|
||||
preStart = ''
|
||||
mkdir -p /run/flannel
|
||||
touch /run/flannel/docker
|
||||
'' + optionalString (cfg.storageBackend == "etcd") ''
|
||||
echo "setting network configuration"
|
||||
until ${pkgs.etcdctl.bin}/bin/etcdctl set /coreos.com/network/config '${builtins.toJSON networkConfig}'
|
||||
do
|
||||
@ -140,15 +172,19 @@ in {
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
postStart = ''
|
||||
while [ ! -f /run/flannel/subnet.env ]
|
||||
do
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
serviceConfig.ExecStart = "${cfg.package}/bin/flannel";
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/flannel";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
};
|
||||
};
|
||||
|
||||
services.etcd.enable = mkDefault (cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
|
||||
services.etcd.enable = mkDefault (cfg.storageBackend == "etcd" && cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
|
||||
|
||||
# for some reason, flannel doesn't let you configure this path
|
||||
# see: https://github.com/coreos/flannel/blob/master/Documentation/configuration.md#configuration
|
||||
environment.etc."kube-flannel/net-conf.json" = mkIf (cfg.storageBackend == "kubernetes") {
|
||||
source = pkgs.writeText "net-conf.json" (builtins.toJSON networkConfig);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -23,6 +23,22 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
certificateFile = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to the certificate used for SSL connections with clients.
|
||||
'';
|
||||
};
|
||||
|
||||
requireSSL = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Require SSL for connections from clients.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.quasselDaemon;
|
||||
@ -71,6 +87,10 @@ in
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{ assertion = cfg.requireSSL -> cfg.certificateFile != null;
|
||||
message = "Quassel needs a certificate file in order to require SSL";
|
||||
}];
|
||||
|
||||
users.users = mkIf (cfg.user == null) [
|
||||
{ name = "quassel";
|
||||
@ -98,7 +118,13 @@ in
|
||||
|
||||
serviceConfig =
|
||||
{
|
||||
ExecStart = "${quassel}/bin/quasselcore --listen=${concatStringsSep '','' cfg.interfaces} --port=${toString cfg.portNumber} --configdir=${cfg.dataDir}";
|
||||
ExecStart = concatStringsSep " " ([
|
||||
"${quassel}/bin/quasselcore"
|
||||
"--listen=${concatStringsSep "," cfg.interfaces}"
|
||||
"--port=${toString cfg.portNumber}"
|
||||
"--configdir=${cfg.dataDir}"
|
||||
] ++ optional cfg.requireSSL "--require-ssl"
|
||||
++ optional (cfg.certificateFile != null) "--ssl-cert=${cfg.certificateFile}");
|
||||
User = user;
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
|
@ -86,7 +86,12 @@ in {
|
||||
'';
|
||||
description = ''
|
||||
Use this option to configure advanced authentication methods like EAP.
|
||||
See wpa_supplicant.conf(5) for example configurations.
|
||||
See
|
||||
<citerefentry>
|
||||
<refentrytitle>wpa_supplicant.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry>
|
||||
for example configurations.
|
||||
|
||||
Mutually exclusive with <varname>psk</varname> and <varname>pskRaw</varname>.
|
||||
'';
|
||||
@ -122,7 +127,12 @@ in {
|
||||
'';
|
||||
description = ''
|
||||
Extra configuration lines appended to the network block.
|
||||
See wpa_supplicant.conf(5) for available options.
|
||||
See
|
||||
<citerefentry>
|
||||
<refentrytitle>wpa_supplicant.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry>
|
||||
for available options.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -174,7 +184,12 @@ in {
|
||||
'';
|
||||
description = ''
|
||||
Extra lines appended to the configuration file.
|
||||
See wpa_supplicant.conf(5) for available options.
|
||||
See
|
||||
<citerefentry>
|
||||
<refentrytitle>wpa_supplicant.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry>
|
||||
for available options.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
@ -316,6 +316,10 @@ in
|
||||
mkdir -m 0755 -p ${cfg.tempDir}
|
||||
|
||||
mkdir -m 0755 -p /var/lib/cups
|
||||
# While cups will automatically create self-signed certificates if accessed via TLS,
|
||||
# this directory to store the certificates needs to be created manually.
|
||||
mkdir -m 0700 -p /var/lib/cups/ssl
|
||||
|
||||
# Backwards compatibility
|
||||
if [ ! -L /etc/cups ]; then
|
||||
mv /etc/cups/* /var/lib/cups
|
||||
|
626
nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
Normal file
626
nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
Normal file
@ -0,0 +1,626 @@
|
||||
{ config, lib, pkgs, ... }: with lib; let
|
||||
cfg = config.services.icingaweb2;
|
||||
poolName = "icingaweb2";
|
||||
phpfpmSocketName = "/var/run/phpfpm/${poolName}.sock";
|
||||
|
||||
formatBool = b: if b then "1" else "0";
|
||||
|
||||
configIni = let
|
||||
config = cfg.generalConfig;
|
||||
in ''
|
||||
[global]
|
||||
show_stacktraces = "${formatBool config.showStacktraces}"
|
||||
show_application_state_messages = "${formatBool config.showApplicationStateMessages}"
|
||||
module_path = "${pkgs.icingaweb2}/modules${optionalString (builtins.length config.modulePath > 0) ":${concatStringsSep ":" config.modulePath}"}"
|
||||
config_backend = "${config.configBackend}"
|
||||
${optionalString (config.configBackend == "db") ''config_resource = "${config.configResource}"''}
|
||||
|
||||
[logging]
|
||||
log = "${config.log}"
|
||||
${optionalString (config.log != "none") ''level = "${config.logLevel}"''}
|
||||
${optionalString (config.log == "php" || config.log == "syslog") ''application = "${config.logApplication}"''}
|
||||
${optionalString (config.log == "syslog") ''facility = "${config.logFacility}"''}
|
||||
${optionalString (config.log == "file") ''file = "${config.logFile}"''}
|
||||
|
||||
[themes]
|
||||
default = "${config.themeDefault}"
|
||||
disabled = "${formatBool config.themeDisabled}"
|
||||
|
||||
[authentication]
|
||||
${optionalString (config.authDefaultDomain != null) ''default_domain = "${config.authDefaultDomain}"''}
|
||||
'';
|
||||
|
||||
resourcesIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
type = "${config.type}"
|
||||
${optionalString (config.type == "db") ''
|
||||
db = "${config.db}"
|
||||
host = "${config.host}"
|
||||
${optionalString (config.port != null) ''port = "${toString config.port}"''}
|
||||
username = "${config.username}"
|
||||
password = "${config.password}"
|
||||
dbname = "${config.dbname}"
|
||||
${optionalString (config.charset != null) ''charset = "${config.charset}"''}
|
||||
use_ssl = "${formatBool config.useSSL}"
|
||||
${optionalString (config.sslCert != null) ''ssl_cert = "${config.sslCert}"''}
|
||||
${optionalString (config.sslKey != null) ''ssl_cert = "${config.sslKey}"''}
|
||||
${optionalString (config.sslCA != null) ''ssl_cert = "${config.sslCA}"''}
|
||||
${optionalString (config.sslCApath != null) ''ssl_cert = "${config.sslCApath}"''}
|
||||
${optionalString (config.sslCipher != null) ''ssl_cert = "${config.sslCipher}"''}
|
||||
''}
|
||||
${optionalString (config.type == "ldap") ''
|
||||
hostname = "${config.host}"
|
||||
${optionalString (config.port != null) ''port = "${toString config.port}"''}
|
||||
root_dn = "${config.rootDN}"
|
||||
bind_dn = "${config.username}"
|
||||
bind_pw = "${config.password}"
|
||||
encryption = "${config.ldapEncryption}"
|
||||
timeout = "${toString config.ldapTimeout}"
|
||||
''}
|
||||
${optionalString (config.type == "ssh") ''
|
||||
user = "${config.username}"
|
||||
private_key = "${config.sshPrivateKey}"
|
||||
''}
|
||||
|
||||
'') cfg.resources);
|
||||
|
||||
authenticationIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
backend = "${config.backend}"
|
||||
${optionalString (config.domain != null) ''domain = "${config.domain}"''}
|
||||
${optionalString (config.backend == "external" && config.externalStripRegex != null) ''strip_username_regexp = "${config.externalStripRegex}"''}
|
||||
${optionalString (config.backend != "external") ''resource = "${config.resource}"''}
|
||||
${optionalString (config.backend == "ldap" || config.backend == "msldap") ''
|
||||
${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
|
||||
${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
|
||||
${optionalString (config.ldapFilter != null) ''filter = "${config.ldapFilter}"''}
|
||||
''}
|
||||
'') cfg.authentications);
|
||||
|
||||
groupsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
backend = "${config.backend}"
|
||||
resource = "${config.resource}"
|
||||
${optionalString (config.backend != "db") ''
|
||||
${optionalString (config.ldapUserClass != null) ''user_class = "${config.ldapUserClass}"''}
|
||||
${optionalString (config.ldapUserNameAttr != null) ''user_name_attribute = "${config.ldapUserNameAttr}"''}
|
||||
${optionalString (config.ldapGroupClass != null) ''group_class = "${config.ldapGroupClass}"''}
|
||||
${optionalString (config.ldapGroupNameAttr != null) ''group_name_attribute = "${config.ldapGroupNameAttr}"''}
|
||||
${optionalString (config.ldapGroupFilter != null) ''group_filter = "${config.ldapGroupFilter}"''}
|
||||
''}
|
||||
${optionalString (config.backend == "msldap" && config.ldapNestedSearch) ''nested_group_search = "1"''}
|
||||
'') cfg.groupBackends);
|
||||
|
||||
rolesIni = let
|
||||
optionalList = var: attribute: optionalString (builtins.length var > 0) ''${attribute} = "${concatStringsSep "," var}"'';
|
||||
in concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
${optionalList config.users "users"}
|
||||
${optionalList config.groups "groups"}
|
||||
${optionalList config.permissions "permissions"}
|
||||
${optionalList config.permissions "permissions"}
|
||||
${concatStringsSep "\n" (mapAttrsToList (key: value: optionalList value key) config.extraAssignments)}
|
||||
'') cfg.roles);
|
||||
|
||||
in {
|
||||
options.services.icingaweb2 = with types; {
|
||||
enable = mkEnableOption "the icingaweb2 web interface";
|
||||
|
||||
pool = mkOption {
|
||||
type = str;
|
||||
default = "${poolName}";
|
||||
description = ''
|
||||
Name of existing PHP-FPM pool that is used to run Icingaweb2.
|
||||
If not specified, a pool will automatically created with default values.
|
||||
'';
|
||||
};
|
||||
|
||||
virtualHost = mkOption {
|
||||
type = nullOr str;
|
||||
default = "icingaweb2";
|
||||
description = ''
|
||||
Name of the nginx virtualhost to use and setup. If null, no virtualhost is set up.
|
||||
'';
|
||||
};
|
||||
|
||||
timezone = mkOption {
|
||||
type = str;
|
||||
default = "UTC";
|
||||
example = "Europe/Berlin";
|
||||
description = "PHP-compliant timezone specification";
|
||||
};
|
||||
|
||||
modules = {
|
||||
doc.enable = mkEnableOption "the icingaweb2 doc module";
|
||||
migrate.enable = mkEnableOption "the icingaweb2 migrate module";
|
||||
setup.enable = mkEnableOption "the icingaweb2 setup module";
|
||||
test.enable = mkEnableOption "the icingaweb2 test module";
|
||||
translation.enable = mkEnableOption "the icingaweb2 translation module";
|
||||
};
|
||||
|
||||
modulePackages = mkOption {
|
||||
type = attrsOf package;
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
"snow" = pkgs.icingaweb2Modules.theme-snow;
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Name-package attrset of Icingaweb 2 modules packages to enable.
|
||||
|
||||
If you enable modules manually (e.g. via the web ui), they will not be touched.
|
||||
'';
|
||||
};
|
||||
|
||||
generalConfig = {
|
||||
mutable = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Make config.ini mutable (e.g. via the web interface).
|
||||
Not that you need to update module_path manually.
|
||||
'';
|
||||
};
|
||||
|
||||
showStacktraces = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Enable stack traces in the Web UI";
|
||||
};
|
||||
|
||||
showApplicationStateMessages = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Enable application state messages in the Web UI";
|
||||
};
|
||||
|
||||
modulePath = mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
description = "List of additional module search paths";
|
||||
};
|
||||
|
||||
configBackend = mkOption {
|
||||
type = enum [ "ini" "db" "none" ];
|
||||
default = "db";
|
||||
description = "Where to store user preferences";
|
||||
};
|
||||
|
||||
configResource = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Database resource where user preferences are stored (if they are stored in a database)";
|
||||
};
|
||||
|
||||
log = mkOption {
|
||||
type = enum [ "syslog" "php" "file" "none" ];
|
||||
default = "syslog";
|
||||
description = "Logging target";
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
type = enum [ "ERROR" "WARNING" "INFO" "DEBUG" ];
|
||||
default = "ERROR";
|
||||
description = "Maximum logging level to emit";
|
||||
};
|
||||
|
||||
logApplication = mkOption {
|
||||
type = str;
|
||||
default = "icingaweb2";
|
||||
description = "Application name to log under (syslog and php log)";
|
||||
};
|
||||
|
||||
logFacility = mkOption {
|
||||
type = enum [ "user" "local0" "local1" "local2" "local3" "local4" "local5" "local6" "local7" ];
|
||||
default = "user";
|
||||
description = "Syslog facility to log to";
|
||||
};
|
||||
|
||||
logFile = mkOption {
|
||||
type = str;
|
||||
default = "/var/log/icingaweb2/icingaweb2.log";
|
||||
description = "File to log to";
|
||||
};
|
||||
|
||||
themeDefault = mkOption {
|
||||
type = str;
|
||||
default = "Icinga";
|
||||
description = "Name of the default theme";
|
||||
};
|
||||
|
||||
themeDisabled = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Disallow users to change the theme";
|
||||
};
|
||||
|
||||
authDefaultDomain = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Domain for users logging in without a qualified domain";
|
||||
};
|
||||
};
|
||||
|
||||
mutableResources = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Make resources.ini mutable (e.g. via the web interface)";
|
||||
};
|
||||
|
||||
resources = mkOption {
|
||||
default = {};
|
||||
description = "Icingaweb 2 resources to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this resource";
|
||||
};
|
||||
|
||||
type = mkOption {
|
||||
type = enum [ "db" "ldap" "ssh" ];
|
||||
default = "db";
|
||||
description = "Type of this resouce";
|
||||
};
|
||||
|
||||
db = mkOption {
|
||||
type = enum [ "mysql" "pgsql" ];
|
||||
default = "mysql";
|
||||
description = "Type of this database resource";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = str;
|
||||
description = "Host to connect to";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = nullOr port;
|
||||
default = null;
|
||||
description = "Port to connect on";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "Database or SSH user or LDAP bind DN to connect with";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = str;
|
||||
description = "Password for the database user or LDAP bind DN";
|
||||
};
|
||||
|
||||
dbname = mkOption {
|
||||
type = str;
|
||||
description = "Name of the database to connect to";
|
||||
};
|
||||
|
||||
charset = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
example = "utf8";
|
||||
description = "Database character set to connect with";
|
||||
};
|
||||
|
||||
useSSL = mkOption {
|
||||
type = nullOr bool;
|
||||
default = false;
|
||||
description = "Whether to connect to the database using SSL";
|
||||
};
|
||||
|
||||
sslCert = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "The file path to the SSL certificate. Only available for the mysql database.";
|
||||
};
|
||||
|
||||
sslKey = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "The file path to the SSL key. Only available for the mysql database.";
|
||||
};
|
||||
|
||||
sslCA = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "The file path to the SSL certificate authority. Only available for the mysql database.";
|
||||
};
|
||||
|
||||
sslCApath = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "The file path to the directory that contains the trusted SSL CA certificates in PEM format. Only available for the mysql database.";
|
||||
};
|
||||
|
||||
sslCipher = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "A list of one or more permissible ciphers to use for SSL encryption, in a format understood by OpenSSL. Only available for the mysql database.";
|
||||
};
|
||||
|
||||
rootDN = mkOption {
|
||||
type = str;
|
||||
description = "Root object of the LDAP tree";
|
||||
};
|
||||
|
||||
ldapEncryption = mkOption {
|
||||
type = enum [ "none" "starttls" "ldaps" ];
|
||||
default = "none";
|
||||
description = "LDAP encryption to use";
|
||||
};
|
||||
|
||||
ldapTimeout = mkOption {
|
||||
type = ints.positive;
|
||||
default = 5;
|
||||
description = "Connection timeout for every LDAP connection";
|
||||
};
|
||||
|
||||
sshPrivateKey = mkOption {
|
||||
type = str;
|
||||
description = "The path to the private key of the user";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
mutableAuthConfig = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Make authentication.ini mutable (e.g. via the web interface)";
|
||||
};
|
||||
|
||||
authentications = mkOption {
|
||||
default = {};
|
||||
description = "Icingaweb 2 authentications to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this authentication";
|
||||
};
|
||||
|
||||
backend = mkOption {
|
||||
type = enum [ "external" "ldap" "msldap" "db" ];
|
||||
default = "db";
|
||||
description = "The type of this authentication backend";
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Domain for domain-aware authentication";
|
||||
};
|
||||
|
||||
externalStripRegex = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Regular expression to strip off specific user name parts";
|
||||
};
|
||||
|
||||
resource = mkOption {
|
||||
type = str;
|
||||
description = "Name of the database/LDAP resource";
|
||||
};
|
||||
|
||||
ldapUserClass = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP user class";
|
||||
};
|
||||
|
||||
ldapUserNameAttr = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP attribute which contains the username";
|
||||
};
|
||||
|
||||
ldapFilter = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP search filter";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
mutableGroupsConfig = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Make groups.ini mutable (e.g. via the web interface)";
|
||||
};
|
||||
|
||||
groupBackends = mkOption {
|
||||
default = {};
|
||||
description = "Icingaweb 2 group backends to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this group backend";
|
||||
};
|
||||
|
||||
backend = mkOption {
|
||||
type = enum [ "ldap" "msldap" "db" ];
|
||||
default = "db";
|
||||
description = "The type of this group backend";
|
||||
};
|
||||
|
||||
resource = mkOption {
|
||||
type = str;
|
||||
description = "Name of the database/LDAP resource";
|
||||
};
|
||||
|
||||
ldapUserClass = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP user class";
|
||||
};
|
||||
|
||||
ldapUserNameAttr = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP attribute which contains the username";
|
||||
};
|
||||
|
||||
ldapGroupClass = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP group class";
|
||||
};
|
||||
|
||||
ldapGroupNameAttr = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP attribute which contains the groupname";
|
||||
};
|
||||
|
||||
ldapGroupFilter = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "LDAP group search filter";
|
||||
};
|
||||
|
||||
ldapNestedSearch = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Enable nested group search in Active Directory based on the user";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
mutableRolesConfig = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Make roles.ini mutable (e.g. via the web interface)";
|
||||
};
|
||||
|
||||
roles = mkOption {
|
||||
default = {};
|
||||
description = "Icingaweb 2 roles to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this role";
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
description = "List of users that are assigned to the role";
|
||||
};
|
||||
|
||||
groups = mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
description = "List of groups that are assigned to the role";
|
||||
};
|
||||
|
||||
permissions = mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
example = [ "application/share/navigation" "config/*" ];
|
||||
description = "The permissions to grant";
|
||||
};
|
||||
|
||||
extraAssignments = mkOption {
|
||||
type = attrsOf (listOf str);
|
||||
default = {};
|
||||
example = { "monitoring/blacklist/properties" = [ "sla" "customer"]; };
|
||||
description = "Additional assignments of this role";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.phpfpm.poolConfigs = mkIf (cfg.pool == "${poolName}") {
|
||||
"${poolName}" = ''
|
||||
listen = "${phpfpmSocketName}"
|
||||
listen.owner = nginx
|
||||
listen.group = nginx
|
||||
listen.mode = 0600
|
||||
user = icingaweb2
|
||||
pm = dynamic
|
||||
pm.max_children = 75
|
||||
pm.start_servers = 2
|
||||
pm.min_spare_servers = 2
|
||||
pm.max_spare_servers = 10
|
||||
'';
|
||||
};
|
||||
|
||||
services.phpfpm.phpOptions = mkIf (cfg.pool == "${poolName}")
|
||||
''
|
||||
extension = ${pkgs.phpPackages.imagick}/lib/php/extensions/imagick.so
|
||||
date.timezone = "${cfg.timezone}"
|
||||
'';
|
||||
|
||||
systemd.services."phpfpm-${poolName}".serviceConfig.ReadWritePaths = [ "/etc/icingaweb2" ];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = mkIf (cfg.virtualHost != null) {
|
||||
"${cfg.virtualHost}" = {
|
||||
root = "${pkgs.icingaweb2}/public";
|
||||
|
||||
extraConfig = ''
|
||||
index index.php;
|
||||
try_files $1 $uri $uri/ /index.php$is_args$args;
|
||||
'';
|
||||
|
||||
locations."~ ..*/.*.php$".extraConfig = ''
|
||||
return 403;
|
||||
'';
|
||||
|
||||
locations."~ ^/index.php(.*)$".extraConfig = ''
|
||||
fastcgi_intercept_errors on;
|
||||
fastcgi_index index.php;
|
||||
include ${config.services.nginx.package}/conf/fastcgi.conf;
|
||||
try_files $uri =404;
|
||||
fastcgi_split_path_info ^(.+\.php)(/.+)$;
|
||||
fastcgi_pass unix:${phpfpmSocketName};
|
||||
fastcgi_param SCRIPT_FILENAME ${pkgs.icingaweb2}/public/index.php;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# /etc/icingaweb2
|
||||
environment.etc = let
|
||||
doModule = name: optionalAttrs (cfg.modules."${name}".enable) (nameValuePair "icingaweb2/enabledModules/${name}" { source = "${pkgs.icingaweb2}/modules/${name}"; });
|
||||
in {}
|
||||
# Module packages
|
||||
// (mapAttrs' (k: v: nameValuePair "icingaweb2/enabledModules/${k}" { source = v; }) cfg.modulePackages)
|
||||
# Built-in modules
|
||||
// doModule "doc"
|
||||
// doModule "migrate"
|
||||
// doModule "setup"
|
||||
// doModule "test"
|
||||
// doModule "translation"
|
||||
# Configs
|
||||
// optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/config.ini".text = configIni; }
|
||||
// optionalAttrs (!cfg.mutableResources) { "icingaweb2/resources.ini".text = resourcesIni; }
|
||||
// optionalAttrs (!cfg.mutableAuthConfig) { "icingaweb2/authentication.ini".text = authenticationIni; }
|
||||
// optionalAttrs (!cfg.mutableGroupsConfig) { "icingaweb2/groups.ini".text = groupsIni; }
|
||||
// optionalAttrs (!cfg.mutableRolesConfig) { "icingaweb2/roles.ini".text = rolesIni; };
|
||||
|
||||
# User and group
|
||||
users.groups.icingaweb2 = {};
|
||||
users.users.icingaweb2 = {
|
||||
description = "Icingaweb2 service user";
|
||||
group = "icingaweb2";
|
||||
isSystemUser = true;
|
||||
};
|
||||
};
|
||||
}
|
157
nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
Normal file
157
nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
Normal file
@ -0,0 +1,157 @@
|
||||
{ config, lib, pkgs, ... }: with lib; let
|
||||
cfg = config.services.icingaweb2.modules.monitoring;
|
||||
|
||||
configIni = ''
|
||||
[security]
|
||||
protected_customvars = "${concatStringsSep "," cfg.generalConfig.protectedVars}"
|
||||
'';
|
||||
|
||||
backendsIni = let
|
||||
formatBool = b: if b then "1" else "0";
|
||||
in concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
type = "ido"
|
||||
resource = "${config.resource}"
|
||||
disabled = "${formatBool config.disabled}"
|
||||
'') cfg.backends);
|
||||
|
||||
transportsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
|
||||
[${name}]
|
||||
type = "${config.type}"
|
||||
${optionalString (config.instance != null) ''instance = "${config.instance}"''}
|
||||
${optionalString (config.type == "local" || config.type == "remote") ''path = "${config.path}"''}
|
||||
${optionalString (config.type != "local") ''
|
||||
host = "${config.host}"
|
||||
${optionalString (config.port != null) ''port = "${toString config.port}"''}
|
||||
user${optionalString (config.type == "api") "name"} = "${config.username}"
|
||||
''}
|
||||
${optionalString (config.type == "api") ''password = "${config.password}"''}
|
||||
${optionalString (config.type == "remote") ''resource = "${config.resource}"''}
|
||||
'') cfg.transports);
|
||||
|
||||
in {
|
||||
options.services.icingaweb2.modules.monitoring = with types; {
|
||||
enable = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Whether to enable the icingaweb2 monitoring module.";
|
||||
};
|
||||
|
||||
generalConfig = {
|
||||
mutable = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Make config.ini of the monitoring module mutable (e.g. via the web interface).";
|
||||
};
|
||||
|
||||
protectedVars = mkOption {
|
||||
type = listOf str;
|
||||
default = [ "*pw*" "*pass*" "community" ];
|
||||
description = "List of string patterns for custom variables which should be excluded from user’s view.";
|
||||
};
|
||||
};
|
||||
|
||||
mutableBackends = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Make backends.ini of the monitoring module mutable (e.g. via the web interface).";
|
||||
};
|
||||
|
||||
backends = mkOption {
|
||||
default = { "icinga" = { resource = "icinga_ido"; }; };
|
||||
description = "Monitoring backends to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this backend";
|
||||
};
|
||||
|
||||
resource = mkOption {
|
||||
type = str;
|
||||
description = "Name of the IDO resource";
|
||||
};
|
||||
|
||||
disabled = mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
description = "Disable this backend";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
|
||||
mutableTransports = mkOption {
|
||||
type = bool;
|
||||
default = true;
|
||||
description = "Make commandtransports.ini of the monitoring module mutable (e.g. via the web interface).";
|
||||
};
|
||||
|
||||
transports = mkOption {
|
||||
default = {};
|
||||
description = "Command transports to define";
|
||||
type = attrsOf (submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
visible = false;
|
||||
default = name;
|
||||
type = str;
|
||||
description = "Name of this transport";
|
||||
};
|
||||
|
||||
type = mkOption {
|
||||
type = enum [ "api" "local" "remote" ];
|
||||
default = "api";
|
||||
description = "Type of this transport";
|
||||
};
|
||||
|
||||
instance = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Assign a icinga instance to this transport";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
type = str;
|
||||
description = "Path to the socket for local or remote transports";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = str;
|
||||
description = "Host for the api or remote transport";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Port to connect to for the api or remote transport";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "Username for the api or remote transport";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = str;
|
||||
description = "Password for the api transport";
|
||||
};
|
||||
|
||||
resource = mkOption {
|
||||
type = str;
|
||||
description = "SSH identity resource for the remote transport";
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (config.services.icingaweb2.enable && cfg.enable) {
|
||||
environment.etc = { "icingaweb2/enabledModules/monitoring" = { source = "${pkgs.icingaweb2}/modules/monitoring"; }; }
|
||||
// optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/modules/monitoring/config.ini".text = configIni; }
|
||||
// optionalAttrs (!cfg.mutableBackends) { "icingaweb2/modules/monitoring/backends.ini".text = backendsIni; }
|
||||
// optionalAttrs (!cfg.mutableTransports) { "icingaweb2/modules/monitoring/commandtransports.ini".text = transportsIni; };
|
||||
};
|
||||
}
|
@ -12,9 +12,9 @@
|
||||
An automatic setup is not suported by Matomo, so you need to configure Matomo
|
||||
itself in the browser-based Matomo setup.
|
||||
</para>
|
||||
|
||||
<section xml:id="module-services-matomo-database-setup">
|
||||
<title>Database Setup</title>
|
||||
|
||||
<para>
|
||||
You also need to configure a MariaDB or MySQL database and -user for Matomo
|
||||
yourself, and enter those credentials in your browser. You can use
|
||||
@ -46,9 +46,30 @@
|
||||
database is not on the same host.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-matomo-archive-processing">
|
||||
<title>Archive Processing</title>
|
||||
<para>
|
||||
This module comes with the systemd service <literal>matomo-archive-processing.service</literal>
|
||||
and a timer that automatically triggers archive processing every hour.
|
||||
This means that you can safely
|
||||
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">
|
||||
disable browser triggers for Matomo archiving
|
||||
</link> at <literal>Administration > System > General Settings</literal>.
|
||||
</para>
|
||||
<para>
|
||||
With automatic archive processing, you can now also enable to
|
||||
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">
|
||||
delete old visitor logs
|
||||
</link> at <literal>Administration > System > Privacy</literal>,
|
||||
but make sure that you run <literal>systemctl start matomo-archive-processing.service</literal>
|
||||
at least once without errors if you have already collected data before,
|
||||
so that the reports get archived before the source data gets deleted.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-matomo-backups">
|
||||
<title>Backup</title>
|
||||
|
||||
<para>
|
||||
You only need to take backups of your MySQL database and the
|
||||
<filename>/var/lib/matomo/config/config.ini.php</filename> file. Use a user
|
||||
@ -57,9 +78,9 @@
|
||||
<link xlink:href="https://matomo.org/faq/how-to-install/faq_138/" />.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-matomo-issues">
|
||||
<title>Issues</title>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
@ -76,6 +97,7 @@
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-matomo-other-web-servers">
|
||||
<title>Using other Web Servers than nginx</title>
|
||||
|
||||
|
@ -23,20 +23,24 @@ in {
|
||||
options = {
|
||||
services.matomo = {
|
||||
# NixOS PR for database setup: https://github.com/NixOS/nixpkgs/pull/6963
|
||||
# matomo issue for automatic matomo setup: https://github.com/matomo-org/matomo/issues/10257
|
||||
# TODO: find a nice way to do this when more NixOS MySQL and / or matomo automatic setup stuff is implemented.
|
||||
# Matomo issue for automatic Matomo setup: https://github.com/matomo-org/matomo/issues/10257
|
||||
# TODO: find a nice way to do this when more NixOS MySQL and / or Matomo automatic setup stuff is implemented.
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable matomo web analytics with php-fpm backend.
|
||||
Enable Matomo web analytics with php-fpm backend.
|
||||
Either the nginx option or the webServerUser option is mandatory.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
description = "Matomo package to use";
|
||||
description = ''
|
||||
Matomo package for the service to use.
|
||||
This can be used to point to newer releases from nixos-unstable,
|
||||
as they don't get backported if they are not security-relevant.
|
||||
'';
|
||||
default = pkgs.matomo;
|
||||
defaultText = "pkgs.matomo";
|
||||
};
|
||||
@ -45,12 +49,25 @@ in {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "lighttpd";
|
||||
# TODO: piwik.php might get renamed to matomo.php in future releases
|
||||
description = ''
|
||||
Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for matomo if the nginx
|
||||
Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for Matomo if the nginx
|
||||
option is not used. Either this option or the nginx option is mandatory.
|
||||
If you want to use another webserver than nginx, you need to set this to that server's user
|
||||
and pass fastcgi requests to `index.php` and `piwik.php` to this socket.
|
||||
and pass fastcgi requests to `index.php`, `matomo.php` and `piwik.php` (legacy name) to this socket.
|
||||
'';
|
||||
};
|
||||
|
||||
periodicArchiveProcessing = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Enable periodic archive processing, which generates aggregated reports from the visits.
|
||||
|
||||
This means that you can safely disable browser triggers for Matomo archiving,
|
||||
and safely enable to delete old visitor logs.
|
||||
Before deleting visitor logs,
|
||||
make sure though that you run <literal>systemctl start matomo-archive-processing.service</literal>
|
||||
at least once without errors if you have already collected data before.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -69,7 +86,7 @@ in {
|
||||
catch_workers_output = yes
|
||||
'';
|
||||
description = ''
|
||||
Settings for phpfpm's process manager. You might need to change this depending on the load for matomo.
|
||||
Settings for phpfpm's process manager. You might need to change this depending on the load for Matomo.
|
||||
'';
|
||||
};
|
||||
|
||||
@ -79,7 +96,7 @@ in {
|
||||
(import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
|
||||
{
|
||||
# enable encryption by default,
|
||||
# as sensitive login and matomo data should not be transmitted in clear text.
|
||||
# as sensitive login and Matomo data should not be transmitted in clear text.
|
||||
options.forceSSL.default = true;
|
||||
options.enableACME.default = true;
|
||||
}
|
||||
@ -94,7 +111,7 @@ in {
|
||||
enableACME = false;
|
||||
};
|
||||
description = ''
|
||||
With this option, you can customize an nginx virtualHost which already has sensible defaults for matomo.
|
||||
With this option, you can customize an nginx virtualHost which already has sensible defaults for Matomo.
|
||||
Either this option or the webServerUser option is mandatory.
|
||||
Set this to {} to just enable the virtualHost if you don't need any customization.
|
||||
If enabled, then by default, the <option>serverName</option> is
|
||||
@ -124,29 +141,30 @@ in {
|
||||
};
|
||||
users.groups.${user} = {};
|
||||
|
||||
systemd.services.matomo_setup_update = {
|
||||
# everything needs to set up and up to date before matomo php files are executed
|
||||
systemd.services.matomo-setup-update = {
|
||||
# everything needs to set up and up to date before Matomo php files are executed
|
||||
requiredBy = [ "${phpExecutionUnit}.service" ];
|
||||
before = [ "${phpExecutionUnit}.service" ];
|
||||
# the update part of the script can only work if the database is already up and running
|
||||
requires = [ databaseService ];
|
||||
after = [ databaseService ];
|
||||
path = [ cfg.package ];
|
||||
environment.PIWIK_USER_PATH = dataDir;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = user;
|
||||
# hide especially config.ini.php from other
|
||||
UMask = "0007";
|
||||
# TODO: might get renamed to MATOMO_USER_PATH in future versions
|
||||
Environment = "PIWIK_USER_PATH=${dataDir}";
|
||||
# chown + chmod in preStart needs root
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
|
||||
# correct ownership and permissions in case they're not correct anymore,
|
||||
# e.g. after restoring from backup or moving from another system.
|
||||
# Note that ${dataDir}/config/config.ini.php might contain the MySQL password.
|
||||
preStart = ''
|
||||
# migrate data from piwik to matomo folder
|
||||
# migrate data from piwik to Matomo folder
|
||||
if [ -d ${deprecatedDataDir} ]; then
|
||||
echo "Migrating from ${deprecatedDataDir} to ${dataDir}"
|
||||
mv -T ${deprecatedDataDir} ${dataDir}
|
||||
@ -155,7 +173,7 @@ in {
|
||||
chmod -R ug+rwX,o-rwx ${dataDir}
|
||||
'';
|
||||
script = ''
|
||||
# Use User-Private Group scheme to protect matomo data, but allow administration / backup via matomo group
|
||||
# Use User-Private Group scheme to protect Matomo data, but allow administration / backup via 'matomo' group
|
||||
# Copy config folder
|
||||
chmod g+s "${dataDir}"
|
||||
cp -r "${cfg.package}/config" "${dataDir}/"
|
||||
@ -169,8 +187,39 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
# If this is run regularly via the timer,
|
||||
# 'Browser trigger archiving' can be disabled in Matomo UI > Settings > General Settings.
|
||||
systemd.services.matomo-archive-processing = {
|
||||
description = "Archive Matomo reports";
|
||||
# the archiving can only work if the database is already up and running
|
||||
requires = [ databaseService ];
|
||||
after = [ databaseService ];
|
||||
|
||||
# TODO: might get renamed to MATOMO_USER_PATH in future versions
|
||||
environment.PIWIK_USER_PATH = dataDir;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = user;
|
||||
UMask = "0007";
|
||||
CPUSchedulingPolicy = "idle";
|
||||
IOSchedulingClass = "idle";
|
||||
ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${user}.${fqdn}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.matomo-archive-processing = mkIf cfg.periodicArchiveProcessing {
|
||||
description = "Automatically archive Matomo reports every hour";
|
||||
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "hourly";
|
||||
Persistent = "yes";
|
||||
AccuracySec = "10m";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.${phpExecutionUnit} = {
|
||||
# stop phpfpm on package upgrade, do database upgrade via matomo_setup_update, and then restart
|
||||
# stop phpfpm on package upgrade, do database upgrade via matomo-setup-update, and then restart
|
||||
restartTriggers = [ cfg.package ];
|
||||
# stop config.ini.php from getting written with read permission for others
|
||||
serviceConfig.UMask = "0007";
|
||||
@ -200,13 +249,13 @@ in {
|
||||
# https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
|
||||
# https://github.com/perusio/piwik-nginx
|
||||
"${user}.${fqdn}" = mkMerge [ cfg.nginx {
|
||||
# don't allow to override the root easily, as it will almost certainly break matomo.
|
||||
# don't allow to override the root easily, as it will almost certainly break Matomo.
|
||||
# disadvantage: not shown as default in docs.
|
||||
root = mkForce "${cfg.package}/share";
|
||||
|
||||
# define locations here instead of as the submodule option's default
|
||||
# so that they can easily be extended with additional locations if required
|
||||
# without needing to redefine the matomo ones.
|
||||
# without needing to redefine the Matomo ones.
|
||||
# disadvantage: not shown as default in docs.
|
||||
locations."/" = {
|
||||
index = "index.php";
|
||||
@ -215,8 +264,11 @@ in {
|
||||
locations."= /index.php".extraConfig = ''
|
||||
fastcgi_pass unix:${phpSocket};
|
||||
'';
|
||||
# TODO: might get renamed to matomo.php in future versions
|
||||
# allow piwik.php for tracking
|
||||
# allow matomo.php for tracking
|
||||
locations."= /matomo.php".extraConfig = ''
|
||||
fastcgi_pass unix:${phpSocket};
|
||||
'';
|
||||
# allow piwik.php for tracking (deprecated name)
|
||||
locations."= /piwik.php".extraConfig = ''
|
||||
fastcgi_pass unix:${phpSocket};
|
||||
'';
|
||||
@ -237,8 +289,11 @@ in {
|
||||
locations."= /robots.txt".extraConfig = ''
|
||||
return 200 "User-agent: *\nDisallow: /\n";
|
||||
'';
|
||||
# TODO: might get renamed to matomo.js in future versions
|
||||
# let browsers cache piwik.js
|
||||
# let browsers cache matomo.js
|
||||
locations."= /matomo.js".extraConfig = ''
|
||||
expires 1M;
|
||||
'';
|
||||
# let browsers cache piwik.js (deprecated name)
|
||||
locations."= /piwik.js".extraConfig = ''
|
||||
expires 1M;
|
||||
'';
|
||||
|
@ -40,7 +40,7 @@ let
|
||||
else if (cfg.database.passwordFile != null) then
|
||||
"file_get_contents('${cfg.database.passwordFile}')"
|
||||
else
|
||||
""
|
||||
"''"
|
||||
});
|
||||
define('DB_PORT', '${toString dbPort}');
|
||||
|
||||
@ -53,7 +53,17 @@ let
|
||||
define('SINGLE_USER_MODE', ${boolToString cfg.singleUserMode});
|
||||
|
||||
define('SIMPLE_UPDATE_MODE', ${boolToString cfg.simpleUpdateMode});
|
||||
define('CHECK_FOR_UPDATES', ${boolToString cfg.checkForUpdates});
|
||||
|
||||
// Never check for updates - the running version of the code should be
|
||||
// controlled entirely by the version of TT-RSS active in the current Nix
|
||||
// profile. If TT-RSS updates itself to a version requiring a database
|
||||
// schema upgrade, and then the SystemD tt-rss.service is restarted, the
|
||||
// old code copied from the Nix store will overwrite the updated version,
|
||||
// causing the code to detect the need for a schema "upgrade" (since the
|
||||
// schema version in the database is different than in the code), but the
|
||||
// update schema operation in TT-RSS will do nothing because the schema
|
||||
// version in the database is newer than that in the code.
|
||||
define('CHECK_FOR_UPDATES', false);
|
||||
|
||||
define('FORCE_ARTICLE_PURGE', ${toString cfg.forceArticlePurge});
|
||||
define('SESSION_COOKIE_LIFETIME', ${toString cfg.sessionCookieLifetime});
|
||||
@ -414,14 +424,6 @@ let
|
||||
'';
|
||||
};
|
||||
|
||||
checkForUpdates = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Check for updates automatically if running Git version
|
||||
'';
|
||||
};
|
||||
|
||||
enableGZipOutput = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
@ -489,6 +491,14 @@ let
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
(mkRemovedOptionModule ["services" "tt-rss" "checkForUpdates"] ''
|
||||
This option was removed because setting this to true will cause TT-RSS
|
||||
to be unable to start if an automatic update of the code in
|
||||
services.tt-rss.root leads to a database schema upgrade that is not
|
||||
supported by the code active in the Nix store.
|
||||
'')
|
||||
];
|
||||
|
||||
###### implementation
|
||||
|
||||
@ -552,7 +562,7 @@ let
|
||||
callSql = e:
|
||||
if cfg.database.type == "pgsql" then ''
|
||||
${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
|
||||
${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile}"}) \
|
||||
${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile})"} \
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} ${config.services.postgresql.package}/bin/psql \
|
||||
-U ${cfg.database.user} \
|
||||
${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
|
||||
|
@ -376,6 +376,8 @@ let
|
||||
Include ${httpd}/conf/extra/httpd-multilang-errordoc.conf
|
||||
Include ${httpd}/conf/extra/httpd-languages.conf
|
||||
|
||||
TraceEnable off
|
||||
|
||||
${if enableSSL then sslConf else ""}
|
||||
|
||||
# Fascist default - deny access to everything.
|
||||
@ -495,8 +497,8 @@ in
|
||||
default = false;
|
||||
description = ''
|
||||
If enabled, each virtual host gets its own
|
||||
<filename>access_log</filename> and
|
||||
<filename>error_log</filename>, namely suffixed by the
|
||||
<filename>access.log</filename> and
|
||||
<filename>error.log</filename>, namely suffixed by the
|
||||
<option>hostName</option> of the virtual host.
|
||||
'';
|
||||
};
|
||||
@ -639,8 +641,8 @@ in
|
||||
|
||||
sslProtocols = mkOption {
|
||||
type = types.str;
|
||||
default = "All -SSLv2 -SSLv3";
|
||||
example = "All -SSLv2 -SSLv3 -TLSv1";
|
||||
default = "All -SSLv2 -SSLv3 -TLSv1";
|
||||
example = "All -SSLv2 -SSLv3";
|
||||
description = "Allowed SSL/TLS protocol versions.";
|
||||
};
|
||||
}
|
||||
@ -684,6 +686,9 @@ in
|
||||
''
|
||||
; Needed for PHP's mail() function.
|
||||
sendmail_path = sendmail -t -i
|
||||
|
||||
; Don't advertise PHP
|
||||
expose_php = off
|
||||
'' + optionalString (!isNull config.time.timeZone) ''
|
||||
|
||||
; Apparently PHP doesn't use $TZ.
|
||||
|
@ -14,11 +14,13 @@ let
|
||||
|
||||
mapPoolConfig = n: p: {
|
||||
phpPackage = cfg.phpPackage;
|
||||
phpOptions = cfg.phpOptions;
|
||||
config = p;
|
||||
};
|
||||
|
||||
mapPool = n: p: {
|
||||
phpPackage = p.phpPackage;
|
||||
phpOptions = p.phpOptions;
|
||||
config = ''
|
||||
listen = ${p.listen}
|
||||
${p.extraConfig}
|
||||
@ -35,8 +37,8 @@ let
|
||||
${conf}
|
||||
'';
|
||||
|
||||
phpIni = pkgs.runCommand "php.ini" {
|
||||
inherit (cfg) phpPackage phpOptions;
|
||||
phpIni = pool: pkgs.runCommand "php.ini" {
|
||||
inherit (pool) phpPackage phpOptions;
|
||||
nixDefaults = ''
|
||||
sendmail_path = "/run/wrappers/bin/sendmail -t -i"
|
||||
'';
|
||||
@ -156,6 +158,7 @@ in {
|
||||
'';
|
||||
serviceConfig = let
|
||||
cfgFile = fpmCfgFile pool poolConfig.config;
|
||||
iniFile = phpIni poolConfig;
|
||||
in {
|
||||
Slice = "phpfpm.slice";
|
||||
PrivateDevices = true;
|
||||
@ -164,7 +167,7 @@ in {
|
||||
# XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
|
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||
Type = "notify";
|
||||
ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${phpIni}";
|
||||
ExecStart = "${poolConfig.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${iniFile}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
|
||||
};
|
||||
}
|
||||
|
@ -25,6 +25,15 @@ with lib; {
|
||||
'';
|
||||
};
|
||||
|
||||
phpOptions = mkOption {
|
||||
type = types.lines;
|
||||
default = fpmCfg.phpOptions;
|
||||
defaultText = "config.services.phpfpm.phpOptions";
|
||||
description = ''
|
||||
"Options appended to the PHP configuration file <filename>php.ini</filename> used for this PHP-FPM pool."
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
example = ''
|
||||
|
@ -151,6 +151,7 @@ in {
|
||||
services.colord.enable = mkDefault true;
|
||||
services.packagekit.enable = mkDefault true;
|
||||
hardware.bluetooth.enable = mkDefault true;
|
||||
services.hardware.bolt.enable = mkDefault true;
|
||||
services.xserver.libinput.enable = mkDefault true; # for controlling touchpad settings via gnome control center
|
||||
services.udev.packages = [ pkgs.gnome3.gnome-settings-daemon ];
|
||||
systemd.packages = [ pkgs.gnome3.vino ];
|
||||
|
@ -650,6 +650,18 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
services.logind.lidSwitchExternalPower = mkOption {
|
||||
default = config.services.logind.lidSwitch;
|
||||
example = "ignore";
|
||||
type = logindHandlerType;
|
||||
|
||||
description = ''
|
||||
Specifies what to do when the laptop lid is closed and the system is
|
||||
on external power. By default use the same action as specified in
|
||||
services.logind.lidSwitch.
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.user.extraConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
@ -797,6 +809,7 @@ in
|
||||
KillUserProcesses=${if config.services.logind.killUserProcesses then "yes" else "no"}
|
||||
HandleLidSwitch=${config.services.logind.lidSwitch}
|
||||
HandleLidSwitchDocked=${config.services.logind.lidSwitchDocked}
|
||||
HandleLidSwitchExternalPower=${config.services.logind.lidSwitchExternalPower}
|
||||
${config.services.logind.extraConfig}
|
||||
'';
|
||||
|
||||
|
@ -3,13 +3,12 @@
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.vmwareGuest;
|
||||
cfg = config.virtualisation.vmware.guest;
|
||||
open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
|
||||
xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.vmwareGuest = {
|
||||
options.virtualisation.vmware.guest = {
|
||||
enable = mkEnableOption "VMWare Guest Support";
|
||||
headless = mkOption {
|
||||
type = types.bool;
|
||||
@ -17,7 +16,6 @@ in
|
||||
description = "Whether to disable X11-related features.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [ {
|
||||
@ -25,6 +23,8 @@ in
|
||||
message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
|
||||
} ];
|
||||
|
||||
boot.initrd.kernelModules = [ "vmw_pvscsi" ];
|
||||
|
||||
environment.systemPackages = [ open-vm-tools ];
|
||||
|
||||
systemd.services.vmware =
|
||||
|
@ -73,6 +73,7 @@ in
|
||||
ferm = handleTest ./ferm.nix {};
|
||||
firefox = handleTest ./firefox.nix {};
|
||||
firewall = handleTest ./firewall.nix {};
|
||||
flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
|
||||
flatpak = handleTest ./flatpak.nix {};
|
||||
fsck = handleTest ./fsck.nix {};
|
||||
fwupd = handleTestOn ["x86_64-linux"] ./fwupd.nix {}; # libsmbios is unsupported on aarch64
|
||||
|
@ -21,8 +21,9 @@ import ./make-test.nix ({ pkgs, ...} : rec {
|
||||
services = {
|
||||
etcd = {
|
||||
enable = true;
|
||||
listenClientUrls = ["http://etcd:2379"];
|
||||
listenPeerUrls = ["http://etcd:2380"];
|
||||
listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
|
||||
listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
|
||||
advertiseClientUrls = ["http://etcd:2379"];
|
||||
initialAdvertisePeerUrls = ["http://etcd:2379"];
|
||||
initialCluster = ["etcd=http://etcd:2379"];
|
||||
};
|
||||
|
@ -10,7 +10,6 @@ let
|
||||
mkKubernetesBaseTest =
|
||||
{ name, domain ? "my.zyx", test, machines
|
||||
, pkgs ? import <nixpkgs> { inherit system; }
|
||||
, certs ? import ./certs.nix { inherit pkgs; externalDomain = domain; kubelets = attrNames machines; }
|
||||
, extraConfiguration ? null }:
|
||||
let
|
||||
masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
|
||||
@ -20,6 +19,10 @@ let
|
||||
${master.ip} api.${domain}
|
||||
${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip} ${machineName}.${domain}") (attrNames machines)}
|
||||
'';
|
||||
kubectl = with pkgs; runCommand "wrap-kubectl" { buildInputs = [ makeWrapper ]; } ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
|
||||
'';
|
||||
in makeTest {
|
||||
inherit name;
|
||||
|
||||
@ -27,6 +30,7 @@ let
|
||||
{ config, pkgs, lib, nodes, ... }:
|
||||
mkMerge [
|
||||
{
|
||||
boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
|
||||
virtualisation.memorySize = mkDefault 1536;
|
||||
virtualisation.diskSize = mkDefault 4096;
|
||||
networking = {
|
||||
@ -45,34 +49,25 @@ let
|
||||
};
|
||||
};
|
||||
programs.bash.enableCompletion = true;
|
||||
environment.variables = {
|
||||
ETCDCTL_CERT_FILE = "${certs.worker}/etcd-client.pem";
|
||||
ETCDCTL_KEY_FILE = "${certs.worker}/etcd-client-key.pem";
|
||||
ETCDCTL_CA_FILE = "${certs.worker}/ca.pem";
|
||||
ETCDCTL_PEERS = "https://etcd.${domain}:2379";
|
||||
};
|
||||
environment.systemPackages = [ kubectl ];
|
||||
services.flannel.iface = "eth1";
|
||||
services.kubernetes.apiserver.advertiseAddress = master.ip;
|
||||
services.kubernetes = {
|
||||
addons.dashboard.enable = true;
|
||||
|
||||
easyCerts = true;
|
||||
inherit (machine) roles;
|
||||
apiserver = {
|
||||
securePort = 443;
|
||||
advertiseAddress = master.ip;
|
||||
};
|
||||
masterAddress = "${masterName}.${config.networking.domain}";
|
||||
};
|
||||
}
|
||||
(optionalAttrs (any (role: role == "master") machine.roles) {
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
2379 2380 # etcd
|
||||
443 # kubernetes apiserver
|
||||
];
|
||||
services.etcd = {
|
||||
enable = true;
|
||||
certFile = "${certs.master}/etcd.pem";
|
||||
keyFile = "${certs.master}/etcd-key.pem";
|
||||
trustedCaFile = "${certs.master}/ca.pem";
|
||||
peerClientCertAuth = true;
|
||||
listenClientUrls = ["https://0.0.0.0:2379"];
|
||||
listenPeerUrls = ["https://0.0.0.0:2380"];
|
||||
advertiseClientUrls = ["https://etcd.${config.networking.domain}:2379"];
|
||||
initialCluster = ["${masterName}=https://etcd.${config.networking.domain}:2380"];
|
||||
initialAdvertisePeerUrls = ["https://etcd.${config.networking.domain}:2380"];
|
||||
};
|
||||
})
|
||||
(import ./kubernetes-common.nix { inherit (machine) roles; inherit pkgs config certs; })
|
||||
(optionalAttrs (machine ? "extraConfiguration") (machine.extraConfiguration { inherit config pkgs lib nodes; }))
|
||||
(optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
|
||||
]
|
||||
|
@ -1,219 +0,0 @@
|
||||
{
|
||||
pkgs ? import <nixpkgs> {},
|
||||
externalDomain ? "myawesomecluster.cluster.yourdomain.net",
|
||||
serviceClusterIp ? "10.0.0.1",
|
||||
kubelets,
|
||||
...
|
||||
}:
|
||||
let
|
||||
runWithCFSSL = name: cmd:
|
||||
let secrets = pkgs.runCommand "${name}-cfss.json" {
|
||||
buildInputs = [ pkgs.cfssl pkgs.jq ];
|
||||
outputs = [ "out" "cert" "key" "csr" ];
|
||||
}
|
||||
''
|
||||
(
|
||||
echo "${cmd}"
|
||||
cfssl ${cmd} > tmp
|
||||
cat tmp | jq -r .key > $key
|
||||
cat tmp | jq -r .cert > $cert
|
||||
cat tmp | jq -r .csr > $csr
|
||||
|
||||
touch $out
|
||||
) 2>&1 | fold -w 80 -s
|
||||
'';
|
||||
in {
|
||||
key = secrets.key;
|
||||
cert = secrets.cert;
|
||||
csr = secrets.csr;
|
||||
};
|
||||
|
||||
writeCFSSL = content:
|
||||
pkgs.runCommand content.name {
|
||||
buildInputs = [ pkgs.cfssl pkgs.jq ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
cd $out
|
||||
|
||||
json=${pkgs.lib.escapeShellArg (builtins.toJSON content)}
|
||||
|
||||
# for a given $field in the $json, treat the associated value as a
|
||||
# file path and substitute the contents thereof into the $json
|
||||
# object.
|
||||
expandFileField() {
|
||||
local field=$1
|
||||
if jq -e --arg field "$field" 'has($field)'; then
|
||||
local path="$(echo "$json" | jq -r ".$field")"
|
||||
json="$(echo "$json" | jq --arg val "$(cat "$path")" ".$field = \$val")"
|
||||
fi
|
||||
}
|
||||
|
||||
expandFileField key
|
||||
expandFileField ca
|
||||
expandFileField cert
|
||||
|
||||
echo "$json" | cfssljson -bare ${content.name}
|
||||
'';
|
||||
|
||||
noCSR = content: pkgs.lib.filterAttrs (n: v: n != "csr") content;
|
||||
noKey = content: pkgs.lib.filterAttrs (n: v: n != "key") content;
|
||||
|
||||
writeFile = content:
|
||||
if pkgs.lib.isDerivation content
|
||||
then content
|
||||
else pkgs.writeText "content" (builtins.toJSON content);
|
||||
|
||||
createServingCertKey = { ca, cn, hosts? [], size ? 2048, name ? cn }:
|
||||
noCSR (
|
||||
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=server -config=${writeFile ca.config} ${writeFile {
|
||||
CN = cn;
|
||||
hosts = hosts;
|
||||
key = { algo = "rsa"; inherit size; };
|
||||
}}") // { inherit name; }
|
||||
);
|
||||
|
||||
createClientCertKey = { ca, cn, groups ? [], size ? 2048, name ? cn }:
|
||||
noCSR (
|
||||
(runWithCFSSL name "gencert -ca=${writeFile ca.cert} -ca-key=${writeFile ca.key} -profile=client -config=${writeFile ca.config} ${writeFile {
|
||||
CN = cn;
|
||||
names = map (group: {O = group;}) groups;
|
||||
hosts = [""];
|
||||
key = { algo = "rsa"; inherit size; };
|
||||
}}") // { inherit name; }
|
||||
);
|
||||
|
||||
createSigningCertKey = { C ? "xx", ST ? "x", L ? "x", O ? "x", OU ? "x", CN ? "ca", emailAddress ? "x", expiry ? "43800h", size ? 2048, name ? CN }:
|
||||
(noCSR (runWithCFSSL CN "genkey -initca ${writeFile {
|
||||
key = { algo = "rsa"; inherit size; };
|
||||
names = [{ inherit C ST L O OU CN emailAddress; }];
|
||||
}}")) // {
|
||||
inherit name;
|
||||
config.signing = {
|
||||
default.expiry = expiry;
|
||||
profiles = {
|
||||
server = {
|
||||
inherit expiry;
|
||||
usages = [
|
||||
"signing"
|
||||
"key encipherment"
|
||||
"server auth"
|
||||
];
|
||||
};
|
||||
client = {
|
||||
inherit expiry;
|
||||
usages = [
|
||||
"signing"
|
||||
"key encipherment"
|
||||
"client auth"
|
||||
];
|
||||
};
|
||||
peer = {
|
||||
inherit expiry;
|
||||
usages = [
|
||||
"signing"
|
||||
"key encipherment"
|
||||
"server auth"
|
||||
"client auth"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
ca = createSigningCertKey {};
|
||||
|
||||
kube-apiserver = createServingCertKey {
|
||||
inherit ca;
|
||||
cn = "kube-apiserver";
|
||||
hosts = ["kubernetes.default" "kubernetes.default.svc" "localhost" "api.${externalDomain}" serviceClusterIp];
|
||||
};
|
||||
|
||||
kubelet = createServingCertKey {
|
||||
inherit ca;
|
||||
cn = "kubelet";
|
||||
hosts = ["*.${externalDomain}"];
|
||||
};
|
||||
|
||||
service-accounts = createServingCertKey {
|
||||
inherit ca;
|
||||
cn = "kube-service-accounts";
|
||||
};
|
||||
|
||||
etcd = createServingCertKey {
|
||||
inherit ca;
|
||||
cn = "etcd";
|
||||
hosts = ["etcd.${externalDomain}"];
|
||||
};
|
||||
|
||||
etcd-client = createClientCertKey {
|
||||
inherit ca;
|
||||
cn = "etcd-client";
|
||||
};
|
||||
|
||||
kubelet-client = createClientCertKey {
|
||||
inherit ca;
|
||||
cn = "kubelet-client";
|
||||
groups = ["system:masters"];
|
||||
};
|
||||
|
||||
apiserver-client = {
|
||||
kubelet = hostname: createClientCertKey {
|
||||
inherit ca;
|
||||
name = "apiserver-client-kubelet-${hostname}";
|
||||
cn = "system:node:${hostname}.${externalDomain}";
|
||||
groups = ["system:nodes"];
|
||||
};
|
||||
|
||||
kube-proxy = createClientCertKey {
|
||||
inherit ca;
|
||||
name = "apiserver-client-kube-proxy";
|
||||
cn = "system:kube-proxy";
|
||||
groups = ["system:kube-proxy" "system:nodes"];
|
||||
};
|
||||
|
||||
kube-controller-manager = createClientCertKey {
|
||||
inherit ca;
|
||||
name = "apiserver-client-kube-controller-manager";
|
||||
cn = "system:kube-controller-manager";
|
||||
groups = ["system:masters"];
|
||||
};
|
||||
|
||||
kube-scheduler = createClientCertKey {
|
||||
inherit ca;
|
||||
name = "apiserver-client-kube-scheduler";
|
||||
cn = "system:kube-scheduler";
|
||||
groups = ["system:kube-scheduler"];
|
||||
};
|
||||
|
||||
admin = createClientCertKey {
|
||||
inherit ca;
|
||||
cn = "admin";
|
||||
groups = ["system:masters"];
|
||||
};
|
||||
};
|
||||
in {
|
||||
master = pkgs.buildEnv {
|
||||
name = "master-keys";
|
||||
paths = [
|
||||
(writeCFSSL (noKey ca))
|
||||
(writeCFSSL kube-apiserver)
|
||||
(writeCFSSL kubelet-client)
|
||||
(writeCFSSL apiserver-client.kube-controller-manager)
|
||||
(writeCFSSL apiserver-client.kube-scheduler)
|
||||
(writeCFSSL service-accounts)
|
||||
(writeCFSSL etcd)
|
||||
];
|
||||
};
|
||||
|
||||
worker = pkgs.buildEnv {
|
||||
name = "worker-keys";
|
||||
paths = [
|
||||
(writeCFSSL (noKey ca))
|
||||
(writeCFSSL kubelet)
|
||||
(writeCFSSL apiserver-client.kube-proxy)
|
||||
(writeCFSSL etcd-client)
|
||||
] ++ map (hostname: writeCFSSL (apiserver-client.kubelet hostname)) kubelets;
|
||||
};
|
||||
|
||||
admin = writeCFSSL apiserver-client.admin;
|
||||
}
|
@ -71,17 +71,17 @@ let
|
||||
|
||||
base = {
|
||||
name = "dns";
|
||||
inherit domain certs extraConfiguration;
|
||||
inherit domain extraConfiguration;
|
||||
};
|
||||
|
||||
singleNodeTest = {
|
||||
test = ''
|
||||
# prepare machine1 for test
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
|
||||
$machine1->execute("docker load < ${redisImage}");
|
||||
$machine1->waitUntilSucceeds("docker load < ${redisImage}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
|
||||
$machine1->execute("docker load < ${probeImage}");
|
||||
$machine1->waitUntilSucceeds("docker load < ${probeImage}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
|
||||
|
||||
# check if pods are running
|
||||
@ -99,13 +99,16 @@ let
|
||||
|
||||
multiNodeTest = {
|
||||
test = ''
|
||||
# Node token exchange
|
||||
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
|
||||
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
|
||||
|
||||
# prepare machines for test
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
|
||||
$machine2->execute("docker load < ${redisImage}");
|
||||
$machine2->waitUntilSucceeds("docker load < ${redisImage}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
|
||||
$machine2->execute("docker load < ${probeImage}");
|
||||
$machine2->waitUntilSucceeds("docker load < ${probeImage}");
|
||||
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
|
||||
|
||||
# check if pods are running
|
||||
|
@ -1,57 +0,0 @@
|
||||
{ roles, config, pkgs, certs }:
|
||||
with pkgs.lib;
|
||||
let
|
||||
base = {
|
||||
inherit roles;
|
||||
flannel.enable = true;
|
||||
addons.dashboard.enable = true;
|
||||
|
||||
caFile = "${certs.master}/ca.pem";
|
||||
apiserver = {
|
||||
tlsCertFile = "${certs.master}/kube-apiserver.pem";
|
||||
tlsKeyFile = "${certs.master}/kube-apiserver-key.pem";
|
||||
kubeletClientCertFile = "${certs.master}/kubelet-client.pem";
|
||||
kubeletClientKeyFile = "${certs.master}/kubelet-client-key.pem";
|
||||
serviceAccountKeyFile = "${certs.master}/kube-service-accounts.pem";
|
||||
};
|
||||
etcd = {
|
||||
servers = ["https://etcd.${config.networking.domain}:2379"];
|
||||
certFile = "${certs.worker}/etcd-client.pem";
|
||||
keyFile = "${certs.worker}/etcd-client-key.pem";
|
||||
};
|
||||
kubeconfig = {
|
||||
server = "https://api.${config.networking.domain}";
|
||||
};
|
||||
kubelet = {
|
||||
tlsCertFile = "${certs.worker}/kubelet.pem";
|
||||
tlsKeyFile = "${certs.worker}/kubelet-key.pem";
|
||||
hostname = "${config.networking.hostName}.${config.networking.domain}";
|
||||
kubeconfig = {
|
||||
certFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}.pem";
|
||||
keyFile = "${certs.worker}/apiserver-client-kubelet-${config.networking.hostName}-key.pem";
|
||||
};
|
||||
};
|
||||
controllerManager = {
|
||||
serviceAccountKeyFile = "${certs.master}/kube-service-accounts-key.pem";
|
||||
kubeconfig = {
|
||||
certFile = "${certs.master}/apiserver-client-kube-controller-manager.pem";
|
||||
keyFile = "${certs.master}/apiserver-client-kube-controller-manager-key.pem";
|
||||
};
|
||||
};
|
||||
scheduler = {
|
||||
kubeconfig = {
|
||||
certFile = "${certs.master}/apiserver-client-kube-scheduler.pem";
|
||||
keyFile = "${certs.master}/apiserver-client-kube-scheduler-key.pem";
|
||||
};
|
||||
};
|
||||
proxy = {
|
||||
kubeconfig = {
|
||||
certFile = "${certs.worker}/apiserver-client-kube-proxy.pem";
|
||||
keyFile = "${certs.worker}//apiserver-client-kube-proxy-key.pem";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
services.kubernetes = base;
|
||||
}
|
@ -96,7 +96,7 @@ let
|
||||
test = ''
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
|
||||
|
||||
$machine1->execute("docker load < ${kubectlImage}");
|
||||
$machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
|
||||
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
|
||||
@ -105,7 +105,7 @@ let
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
|
||||
|
||||
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
|
||||
$machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
|
||||
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
|
||||
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
|
||||
'';
|
||||
@ -113,10 +113,13 @@ let
|
||||
|
||||
multinode = base // {
|
||||
test = ''
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
|
||||
# Node token exchange
|
||||
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
|
||||
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
|
||||
|
||||
$machine2->execute("docker load < ${kubectlImage}");
|
||||
$machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
|
||||
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
|
||||
@ -125,7 +128,7 @@ let
|
||||
|
||||
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
|
||||
|
||||
$machine1->succeed("kubectl exec -ti kubectl -- kubectl get pods");
|
||||
$machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
|
||||
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
|
||||
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
|
||||
'';
|
||||
|
@ -39,6 +39,8 @@ import ./make-test.nix ({pkgs, ... }: {
|
||||
$client->waitForUnit("cups.service");
|
||||
$client->sleep(10); # wait until cups is fully initialized
|
||||
$client->succeed("lpstat -r") =~ /scheduler is running/ or die;
|
||||
# check local encrypted connections work without error
|
||||
$client->succeed("lpstat -E -r") =~ /scheduler is running/ or die;
|
||||
# Test that UNIX socket is used for connections.
|
||||
$client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
|
||||
# Test that HTTP server is available too.
|
||||
|
@ -18,8 +18,17 @@ import ./make-test.nix ({ pkgs, ...} : {
|
||||
testScript = {nodes, ...}: let
|
||||
originalSystem = nodes.machine.config.system.build.toplevel;
|
||||
otherSystem = nodes.other.config.system.build.toplevel;
|
||||
|
||||
# Ensures failures pass through using pipefail, otherwise failing to
|
||||
# switch-to-configuration is hidden by the success of `tee`.
|
||||
stderrRunner = pkgs.writeScript "stderr-runner" ''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
set -e
|
||||
set -o pipefail
|
||||
exec env -i "$@" | tee /dev/stderr
|
||||
'';
|
||||
in ''
|
||||
$machine->succeed("env -i ${originalSystem}/bin/switch-to-configuration test | tee /dev/stderr");
|
||||
$machine->succeed("env -i ${otherSystem}/bin/switch-to-configuration test | tee /dev/stderr");
|
||||
$machine->succeed("${stderrRunner} ${originalSystem}/bin/switch-to-configuration test");
|
||||
$machine->succeed("${stderrRunner} ${otherSystem}/bin/switch-to-configuration test");
|
||||
'';
|
||||
})
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
buildGoPackage rec {
|
||||
name = "go-ethereum-${version}";
|
||||
version = "1.8.21";
|
||||
version = "1.8.22";
|
||||
goPackagePath = "github.com/ethereum/go-ethereum";
|
||||
|
||||
# Fix for usb-related segmentation faults on darwin
|
||||
@ -16,13 +16,13 @@ buildGoPackage rec {
|
||||
owner = "ethereum";
|
||||
repo = "go-ethereum";
|
||||
rev = "v${version}";
|
||||
sha256 = "1p4qfxa90l26s9q4hddyb93gdf7vb0sb46z9n26ijiqlxdq3z7v2";
|
||||
sha256 = "0ag9qxrf7n0qkccaf6v4jaysivpxvsy5zfzar3mcm65223pqy375";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = https://ethereum.github.io/go-ethereum/;
|
||||
description = "Official golang implementation of the Ethereum protocol";
|
||||
license = with licenses; [ lgpl3 gpl3 ];
|
||||
maintainers = [ maintainers.adisbladis maintainers.lionello ];
|
||||
maintainers = with maintainers; [ adisbladis asymmetric lionello ];
|
||||
};
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ stdenv, fetchFromGitHub, cmake, pkgconfig, git
|
||||
, boost, miniupnpc, openssl, unbound, cppzmq
|
||||
, zeromq, pcsclite, readline, libsodium
|
||||
, boost, miniupnpc_2, openssl, unbound, cppzmq
|
||||
, zeromq, pcsclite, readline, libsodium, rapidjson
|
||||
, CoreData, IOKit, PCSC
|
||||
}:
|
||||
|
||||
@ -11,19 +11,18 @@ with stdenv.lib;
|
||||
stdenv.mkDerivation rec {
|
||||
name = "wownero-${version}";
|
||||
|
||||
version = "0.4.0.0";
|
||||
version = "0.5.0.0";
|
||||
src = fetchFromGitHub {
|
||||
owner = "wownero";
|
||||
repo = "wownero";
|
||||
fetchSubmodules = true;
|
||||
rev = "v${version}";
|
||||
sha256 = "1z5fpl4gwys4v8ffrymlzwrbnrbg73x553a9lxwny7ba8yg2k14p";
|
||||
sha256 = "1dy9ycabva2z0896al1k2avl9xppkxvm1p2jwmg509ahjl98k3sy";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake pkgconfig git ];
|
||||
|
||||
buildInputs = [
|
||||
boost miniupnpc openssl unbound
|
||||
boost miniupnpc_2 openssl unbound rapidjson
|
||||
cppzmq zeromq pcsclite readline libsodium
|
||||
] ++ optionals stdenv.isDarwin [ IOKit CoreData PCSC ];
|
||||
|
||||
|
@ -3,13 +3,13 @@
|
||||
, perlPackages
|
||||
, makeWrapper }:
|
||||
|
||||
let version = "2.9.2";
|
||||
let version = "2.9.3";
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
name = "abcde-${version}";
|
||||
src = fetchurl {
|
||||
url = "https://abcde.einval.com/download/abcde-${version}.tar.gz";
|
||||
sha256 = "13c5yvp87ckqgha160ym5rdr1a4divgvyqbjh0yb6ffclip6qd9l";
|
||||
sha256 = "091ip2iwb6b67bhjsj05l0sxyq2whqjycbzqpkfbpm4dlyxx0v04";
|
||||
};
|
||||
|
||||
# FIXME: This package does not support `distmp3', `eject', etc.
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, fetchFromGitHub, runCommand, ncurses, pkgconfig
|
||||
{ config, stdenv, fetchFromGitHub, runCommand, ncurses, pkgconfig
|
||||
, libiconv, CoreAudio
|
||||
|
||||
, alsaSupport ? stdenv.isLinux, alsaLib ? null
|
||||
@ -7,7 +7,7 @@
|
||||
, jackSupport ? false, libjack ? null
|
||||
, samplerateSupport ? jackSupport, libsamplerate ? null
|
||||
, ossSupport ? false, alsaOss ? null
|
||||
, pulseaudioSupport ? false, libpulseaudio ? null
|
||||
, pulseaudioSupport ? config.pulseaudio or false, libpulseaudio ? null
|
||||
|
||||
# TODO: add these
|
||||
#, artsSupport
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, fetchurl, intltool, pkgconfig, jansson
|
||||
{ config, stdenv, fetchurl, intltool, pkgconfig, jansson
|
||||
# deadbeef can use either gtk2 or gtk3
|
||||
, gtk2Support ? false, gtk2 ? null
|
||||
, gtk3Support ? true, gtk3 ? null, gsettings-desktop-schemas ? null, wrapGAppsHook ? null
|
||||
@ -20,7 +20,7 @@
|
||||
, osdSupport ? true, dbus ? null
|
||||
# output plugins
|
||||
, alsaSupport ? true, alsaLib ? null
|
||||
, pulseSupport ? true, libpulseaudio ? null
|
||||
, pulseSupport ? config.pulseaudio or stdenv.isLinux, libpulseaudio ? null
|
||||
# effect plugins
|
||||
, resamplerSupport ? true, libsamplerate ? null
|
||||
, overloadSupport ? true, zlib ? null
|
||||
|
@ -1,6 +1,6 @@
|
||||
{ stdenv, autoreconfHook, fetchFromGitHub, pkgconfig
|
||||
{ config, stdenv, autoreconfHook, fetchFromGitHub, pkgconfig
|
||||
, alsaLib, libtool, icu
|
||||
, pulseaudioSupport ? true, libpulseaudio }:
|
||||
, pulseaudioSupport ? config.pulseaudio or false, libpulseaudio }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "mimic-${version}";
|
||||
@ -32,4 +32,3 @@ stdenv.mkDerivation rec {
|
||||
maintainers = [ stdenv.lib.maintainers.noneucat ];
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
pythonPackages.buildPythonApplication rec {
|
||||
pname = "Mopidy-Iris";
|
||||
version = "3.32.4";
|
||||
version = "3.32.5";
|
||||
|
||||
src = pythonPackages.fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "16b3dkxland4mjzjs2rz5gbqjapzzmap4d1mfhbrj2ch3plmdy7g";
|
||||
sha256 = "0vs8x26zcakk6c31sc774h2lcdw3syp236vyymmx1jnfsh1jaqpn";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [
|
||||
|
@ -1,8 +1,8 @@
|
||||
{ stdenv, fetchurl, boost, mpd_clientlib, ncurses, pkgconfig, readline
|
||||
, libiconv, icu, curl
|
||||
, outputsSupport ? false # outputs screen
|
||||
, outputsSupport ? true # outputs screen
|
||||
, visualizerSupport ? false, fftw ? null # visualizer screen
|
||||
, clockSupport ? false # clock screen
|
||||
, clockSupport ? true # clock screen
|
||||
, taglibSupport ? true, taglib ? null # tag editor
|
||||
}:
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
{ stdenv, fetchurl, zlib, pkgconfig, mpg123, libogg, libvorbis, portaudio, libsndfile, flac
|
||||
, usePulseAudio ? false, libpulseaudio }:
|
||||
{ config, stdenv, fetchurl, zlib, pkgconfig, mpg123, libogg, libvorbis, portaudio, libsndfile, flac
|
||||
, usePulseAudio ? config.pulseaudio or false, libpulseaudio }:
|
||||
|
||||
let
|
||||
version = "0.4.1";
|
||||
|
@ -1,19 +1,19 @@
|
||||
{ stdenv, fetchFromGitHub, autoreconfHook, mpd_clientlib, ncurses, pcre, pkgconfig
|
||||
, taglib }:
|
||||
, taglib, curl }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
version = "0.09.1";
|
||||
version = "0.09.2";
|
||||
name = "vimpc-${version}";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "boysetsfrog";
|
||||
repo = "vimpc";
|
||||
rev = "v${version}";
|
||||
sha256 = "1495a702df4nja8mlxq98mkbic2zv88sjiinimf9qddrfb38jxk6";
|
||||
sha256 = "0lswzkap2nm7v5h7ppb6a64cb35rajysd09nb204rxgrkij4m6nx";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ autoreconfHook pkgconfig ];
|
||||
buildInputs = [ mpd_clientlib ncurses pcre taglib ];
|
||||
buildInputs = [ mpd_clientlib ncurses pcre taglib curl ];
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/etc
|
||||
|
@ -13,14 +13,14 @@ let
|
||||
sha256Hash = "0fghqkc8pkb7waxclm0qq4nlnsvmv9d3fcj5nnvgbfkjyw032q42";
|
||||
};
|
||||
betaVersion = {
|
||||
version = "3.4.0.12"; # "Android Studio 3.4 Beta 3"
|
||||
build = "183.5256591";
|
||||
sha256Hash = "1yab2sgabgk3wa3wrzv9z1dc2k7x0079v0mlwrp32jwx8r9byvcw";
|
||||
version = "3.4.0.13"; # "Android Studio 3.4 Beta 4"
|
||||
build = "183.5304277";
|
||||
sha256Hash = "01x7xba0f5js213wgw0h1vw297vwz5q7dprnilcdydfjxwqsbr8f";
|
||||
};
|
||||
latestVersion = { # canary & dev
|
||||
version = "3.5.0.2"; # "Android Studio 3.5 Canary 3"
|
||||
build = "183.5256920";
|
||||
sha256Hash = "09bd80ld21hq743xjacsq0nkxwl5xzr253p86n71n580yn4rgmlb";
|
||||
version = "3.5.0.3"; # "Android Studio 3.5 Canary 4"
|
||||
build = "183.5290690";
|
||||
sha256Hash = "0d1cl78b25pksaj0scv3hxb14bjxk3591zbc0v7dykk1gf4pvxd1";
|
||||
};
|
||||
in rec {
|
||||
# Old alias (TODO @primeos: Remove after 19.03 is branched off):
|
||||
|
@ -3,14 +3,14 @@
|
||||
let
|
||||
versions = {
|
||||
atom = {
|
||||
version = "1.33.0";
|
||||
sha256 = "0f6m6zwgz94m3q11ipyiliap3s5a3zlrg3ldjwkqnxjl6gwlxc2r";
|
||||
version = "1.34.0";
|
||||
sha256 = "16hrjymrc43izg7frcrk7cwjwwrclcxzcwb5iw2llzjc6iadzlkb";
|
||||
};
|
||||
|
||||
atom-beta = {
|
||||
version = "1.34.0";
|
||||
version = "1.35.0";
|
||||
beta = 0;
|
||||
sha256 = "1xnrr4z55sj46hqr0il26sfs6s3knv60m340cw3rzzic271b3ifw";
|
||||
sha256 = "0gm5k573dq1hhnyw3719f5k1c6rsz872mhzg8q53n89y0g2r5xmw";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1,22 +1,20 @@
|
||||
{ fetchurl, stdenv, coreutils, ncurses, lua }:
|
||||
{ lib, fetchurl, stdenv, libiconv, ncurses, lua }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "dit-${version}";
|
||||
version = "0.4";
|
||||
version = "0.5";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://hisham.hm/dit/releases/${version}/${name}.tar.gz";
|
||||
sha256 = "0bwczbv7annbbpg7bgbsqd5kwypn81sza4v7v99fin94wwmcn784";
|
||||
sha256 = "05vhr1gl3bb5fg49v84xhmjaqdjw6djampvylw10ydvbpnpvjvjc";
|
||||
};
|
||||
|
||||
buildInputs = [ coreutils ncurses lua ];
|
||||
buildInputs = [ ncurses lua ]
|
||||
++ lib.optional stdenv.isDarwin libiconv;
|
||||
|
||||
# fix paths
|
||||
prePatch = ''
|
||||
patchShebangs tools/GenHeaders
|
||||
'';
|
||||
|
||||
# needs GNU tail for tail -r
|
||||
postPatch = ''
|
||||
substituteInPlace Prototypes.h --replace 'tail' "$(type -P tail)"
|
||||
'';
|
||||
|
||||
|
@ -55,6 +55,7 @@ stdenv.mkDerivation rec {
|
||||
meta = {
|
||||
homepage = http://www.eclipse.org/;
|
||||
inherit description;
|
||||
platforms = [ "x86_64-linux" ];
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -11,7 +11,14 @@ assert stdenv ? glibc;
|
||||
# http://download.eclipse.org/eclipse/downloads/ is the main place to
|
||||
# find the downloads needed for new versions
|
||||
|
||||
rec {
|
||||
let
|
||||
platform_major = "4";
|
||||
platform_minor = "10";
|
||||
year = "2018";
|
||||
month = "12";
|
||||
timestamp = "201812060815";
|
||||
|
||||
in rec {
|
||||
|
||||
buildEclipse = import ./build-eclipse.nix {
|
||||
inherit stdenv makeDesktopItem freetype fontconfig libX11 libXrender zlib
|
||||
@ -21,133 +28,43 @@ rec {
|
||||
|
||||
### Eclipse CPP
|
||||
|
||||
eclipse-cpp = eclipse-cpp-47; # always point to latest
|
||||
|
||||
eclipse-cpp-47 = buildEclipse {
|
||||
name = "eclipse-cpp-4.7.0";
|
||||
eclipse-cpp = buildEclipse {
|
||||
name = "eclipse-cpp-${platform_major}.${platform_minor}";
|
||||
description = "Eclipse IDE for C/C++ Developers, Oxygen release";
|
||||
src =
|
||||
if stdenv.hostPlatform.system == "x86_64-linux" then
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/oxygen/R/eclipse-cpp-oxygen-R-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "813c791e739d7d0e2ab242a5bacadca135bbeee20ef97aa830353cd90f63fa6e9c89cfcc6aadf635c742befe035bd6e3f15103013f63c419f6144e86ebde3ed1";
|
||||
}
|
||||
else if stdenv.hostPlatform.system == "i686-linux" then
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/oxygen/R/eclipse-cpp-oxygen-R-linux-gtk.tar.gz;
|
||||
sha512 = "2b50f4a00306a89cda1aaaa606e62285cacbf93464a9dd3f3319dca3e2c578b802e685de6f78e5e617d269e21271188effe73d41f491a6de946e28795d82db8a";
|
||||
}
|
||||
else throw "Unsupported system: ${stdenv.hostPlatform.system}";
|
||||
};
|
||||
|
||||
eclipse-cpp-37 = buildEclipse {
|
||||
name = "eclipse-cpp-3.7";
|
||||
description = "Eclipse IDE for C/C++ Developers";
|
||||
src =
|
||||
if stdenv.hostPlatform.system == "x86_64-linux" then
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/indigo/R/eclipse-cpp-indigo-incubation-linux-gtk-x86_64.tar.gz;
|
||||
sha256 = "14ppc9g9igzvj1pq7jl01vwhzb66nmzbl9wsdl1sf3xnwa9wnqk3";
|
||||
}
|
||||
else
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/indigo/R/eclipse-cpp-indigo-incubation-linux-gtk.tar.gz;
|
||||
sha256 = "1cvg1vgyazrkinwzlvlf0dpl197p4784752srqybqylyj5psdi3b";
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/${year}-${month}/R/eclipse-cpp-${year}-${month}-R-linux-gtk-x86_64.tar.gz";
|
||||
sha512 = "1f5yr7cydz4iw8c14yn713d44f1g1wkiqiwmb4ikdfx4l70rc5xxsdxv9b4mhm89b02cqnxdh9p5hivkssmnzg0km3ab5bx9mvzgzx7";
|
||||
};
|
||||
};
|
||||
eclipse_cpp_37 = eclipse-cpp-37; # backward compatibility, added 2016-01-30
|
||||
|
||||
### Eclipse Modeling
|
||||
|
||||
eclipse-modeling = eclipse-modeling-47; # always point to latest
|
||||
|
||||
eclipse-modeling-47 = buildEclipse {
|
||||
name = "eclipse-modeling-4.7";
|
||||
eclipse-modeling = buildEclipse {
|
||||
name = "eclipse-modeling-${platform_major}.${platform_minor}";
|
||||
description = "Eclipse Modeling Tools";
|
||||
src =
|
||||
if stdenv.hostPlatform.system == "x86_64-linux" then
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/oxygen/R/eclipse-modeling-oxygen-R-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "3b9a7ad4b5d6b77fbdd64e8d323e0adb6c2904763ad042b374b4d87cef8607408cb407e395870fc755d58c0c800e20818adcf456ebe193d76cede16c5fe12271";
|
||||
}
|
||||
else
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/oxygen/R/eclipse-modeling-oxygen-R-linux-gtk.tar.gz;
|
||||
sha512 = "b8597c1dec117e69c72a5e1a53e09b1f81a7c9de86ed7e71a9d007664603202df301745f186ded02b2e76410345863e80a2ba40867d6848e5375601289999206";
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/${year}-${month}/R/eclipse-modeling-${year}-${month}-R-linux-gtk-x86_64.tar.gz";
|
||||
sha512 = "18psh1lgqg21dmndyc0yr6rz7piqyk861j9mlhgv9xaq8nz11fb6lil594sk64yyv0qbgi98vp03f1p06zvhgs37k9rjkfjmzl7n97k";
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-modeling-36 = buildEclipse {
|
||||
name = "eclipse-modeling-3.6.2";
|
||||
description = "Eclipse Modeling Tools (includes Incubating components)";
|
||||
src =
|
||||
if stdenv.hostPlatform.system == "x86_64-linux" then
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/helios/SR2/eclipse-modeling-helios-SR2-incubation-linux-gtk-x86_64.tar.gz;
|
||||
sha1 = "e96f5f006298f68476f4a15a2be8589158d5cc61";
|
||||
}
|
||||
else
|
||||
fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/helios/SR2/eclipse-modeling-helios-SR2-incubation-linux-gtk.tar.gz;
|
||||
sha1 = "696377895bb26445de39d82a916b7e69edb1d939";
|
||||
};
|
||||
};
|
||||
eclipse_modeling_36 = eclipse-modeling-36; # backward compatibility, added 2016-01-30
|
||||
|
||||
### Eclipse Platform
|
||||
|
||||
eclipse-platform = eclipse-platform-49; # always point to latest
|
||||
|
||||
eclipse-platform-47 = buildEclipse {
|
||||
name = "eclipse-platform-4.7.3a";
|
||||
description = "Eclipse Platform Oxygen";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.7.3a-201803300640/eclipse-platform-4.7.3a-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "caf86cd6efaf66258c75434f1adf552587a7395d57dba4cfd20f86196308cf942866d931f4b352f9d39a6fbf14444fcd2167e6bfd146a28c96c229bb9988156a";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.7.3a-201803300640/eclipse-platform-4.7.3a-linux-gtk.tar.gz;
|
||||
sha512 = "c633da467774e4ab40f8d51d07b8e7d8403f26f23365c3c3ceeaeec1039b8c23c7508cee1f786bf52db64c7b84e0f91cb31a2848a74ac8271f8504934407bd5c";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-platform-48 = buildEclipse {
|
||||
name = "eclipse-platform-4.8";
|
||||
description = "Eclipse Platform Photon";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.8-201806110500/eclipse-platform-4.8-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "ccce2b954938479e42ef3f9b78f74b24ae4cae7499546fa4f9a55ec1849e1acfd06315d4529b11474a8b3d1142c9409c581edfa571baaf1342ab062f02467af2";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.8-201806110500/eclipse-platform-4.8-linux-gtk.tar.gz;
|
||||
sha512 = "f5f407727e22b848931cf38f71b1a0c30a9778aa227c3df137dcceec2fba2ecc309cbfa8b4a660b814d2edb60f65110381497b4325781cab4d6402784139e32b";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-platform-49 = buildEclipse {
|
||||
name = "eclipse-platform-4.9";
|
||||
description = "Eclipse Platform 2018-09";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.9-201809060745/eclipse-platform-4.9-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "875714bb411145c917fccedf2f7c4fd2757640b2debf4a18f775604233abd6f0da893b350cc03da44413d7ec6fae3f773ef08634e632058e4b705e6cda2893eb";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.9-201809060745/eclipse-platform-4.9-linux-gtk.tar.gz;
|
||||
sha512 = "758bc0de30fa5c4b76b343ea0325611d87b6928ef5002244f2f1ba2a9fa937de89b2a94ce2c8d33d79344fd574d6e8a72c5d127fe416d785f48600e9e85fce86";
|
||||
};
|
||||
eclipse-platform = buildEclipse {
|
||||
name = "eclipse-platform-${platform_major}.${platform_minor}";
|
||||
description = "Eclipse Platform ${year}-${month}";
|
||||
src =
|
||||
fetchurl {
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops${platform_major}/R-${platform_major}.${platform_minor}-${timestamp}/eclipse-platform-${platform_major}.${platform_minor}-linux-gtk-x86_64.tar.gz";
|
||||
sha512 = "2zdvbjk05a00lbcad9v30rcr93j03d2pycdhpwrvrakr8z4yrxs6svamq9s294ry1w3lw04pgsnqklw6zjx6iil1kp51f374lkfpxn7";
|
||||
};
|
||||
};
|
||||
|
||||
### Eclipse Scala SDK
|
||||
|
||||
eclipse-scala-sdk = eclipse-scala-sdk-441; # always point to latest
|
||||
|
||||
eclipse-scala-sdk-441 = buildEclipse {
|
||||
eclipse-scala-sdk = buildEclipse {
|
||||
name = "eclipse-scala-sdk-4.4.1";
|
||||
description = "Eclipse IDE for Scala Developers";
|
||||
src =
|
||||
@ -165,88 +82,26 @@ rec {
|
||||
|
||||
### Eclipse SDK
|
||||
|
||||
eclipse-sdk = eclipse-sdk-49; # always point to latest
|
||||
|
||||
eclipse-sdk-47 = buildEclipse {
|
||||
name = "eclipse-sdk-4.7.3a";
|
||||
description = "Eclipse Oxygen Classic";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.7.3a-201803300640/eclipse-SDK-4.7.3a-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "d77e42aca16d26526cef32e363d038258bb8a4616d9dbe6e76dd3656dc2217369436390a82555bde4566bbbdb631813bbaca08602f7bb885cb30e8a26a14873f";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.7.3a-201803300640/eclipse-SDK-4.7.3a-linux-gtk.tar.gz;
|
||||
sha512 = "b0b936fd4142ae86ec5c30535cd5e4caf6fe313d814ae5b16f300635e4163a79b748b1eee11792a135114f2265678a74821ec80c2bfd69872769b6d0ccbcde3a";
|
||||
eclipse-sdk = buildEclipse {
|
||||
name = "eclipse-sdk-${platform_major}.${platform_minor}";
|
||||
description = "Eclipse ${year}-${month} Classic";
|
||||
src =
|
||||
fetchurl {
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops${platform_major}/R-${platform_major}.${platform_minor}-${timestamp}/eclipse-SDK-${platform_major}.${platform_minor}-linux-gtk-x86_64.tar.gz";
|
||||
sha512 = "1kq14vhzcngfhl8kjs722rshny81gxv6wcgln46x7lnpg2274sb9dprhns62fpq97l0355cmg8mnny6fsd1nqibrw09xq932v86cfm8";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-sdk-48 = buildEclipse {
|
||||
name = "eclipse-sdk-4.8";
|
||||
description = "Eclipse Photon Classic";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.8-201806110500/eclipse-SDK-4.8-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "357ea9e7f426c68ced693f1c7b76eae23f9e3c7893de1f12d17994ec17b447896b5daa7292d5fbf6d9c4e5b7fd637ca5b2a6ba8ce40a2a7c2fe06f2124d31b75";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.8-201806110500/eclipse-SDK-4.8-linux-gtk.tar.gz;
|
||||
sha512 = "c7cae7baa3978d48477090bb9941e85b4c7484021ece9c5c77a7e859e57e5c1f13556262f92b561cfb11f828b934bad7a6018be7b8fd9454e3991e8d5cae9917";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-sdk-49 = buildEclipse {
|
||||
name = "eclipse-sdk-4.9";
|
||||
description = "Eclipse 2018-09 Classic";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.9-201809060745/eclipse-SDK-4.9-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "5e74a0411f56b3973b7c6d8c3727392297d55ad458a814b4cc3f2f6a57dbeebc64852d1a6a958db5c3b08c620093bfb5bcc0d2c6a400f5594b82c2ef5d5fa9fb";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.9-201809060745/eclipse-SDK-4.9-linux-gtk.tar.gz;
|
||||
sha512 = "b1861bd99c8e43f1d04247226584246aa7844af5e2da820fe98a51018dbe8ff4c25dbb9fa655f56e103f95c0696f40a65dcce13430c63aa080f786738e70eb8b";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eclipse-sdk-37 = buildEclipse {
|
||||
name = "eclipse-sdk-3.7";
|
||||
description = "Eclipse Classic";
|
||||
sources = {
|
||||
"x86_64-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops/R-3.7.2-201202080800/eclipse-SDK-3.7.2-linux-gtk-x86_64.tar.gz;
|
||||
sha256 = "0nf4nv7awhp1k8b1hjb7chpjyjrqnyszsjbc4dlk9phpjv3j4wg5";
|
||||
};
|
||||
"i686-linux" = fetchurl {
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops/R-3.7.2-201202080800/eclipse-SDK-3.7.2-linux-gtk.tar.gz;
|
||||
sha256 = "1isn7i45l9kyn2yx6vm88jl1gnxph8ynank0aaa218cg8kdygk7j";
|
||||
};
|
||||
};
|
||||
};
|
||||
eclipse_sdk_37 = eclipse-sdk-37; # backward compatibility, added 2016-01-30
|
||||
|
||||
### Eclipse Java
|
||||
|
||||
eclipse-java = eclipse-java-49;
|
||||
|
||||
eclipse-java-49 = buildEclipse {
|
||||
name = "eclipse-java-4.9.0";
|
||||
eclipse-java = buildEclipse {
|
||||
name = "eclipse-java-${platform_major}.${platform_minor}";
|
||||
description = "Eclipse IDE for Java Developers";
|
||||
src =
|
||||
if stdenv.system == "x86_64-linux" then
|
||||
fetchurl {
|
||||
url = http://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/2018-09/R/eclipse-java-2018-09-linux-gtk-x86_64.tar.gz;
|
||||
sha512 = "9dac5d040cdabf779de3996de87290e352130c7e860c1d0a98772f41da828ad45f90748b68e0a8a4f8d1ebbbbe5fdfe6401b7d871b93af34103d4a81a041c6a5";
|
||||
}
|
||||
else if stdenv.system == "i686-linux" then
|
||||
fetchurl {
|
||||
url = http://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/2018-09/R/eclipse-java-2018-09-linux-gtk.tar.gz;
|
||||
sha512 = "24208e95b972e848d6b65ed8108d9e81584cf051397f2f43fb6269f5a625b8d7552ad77c7980a1a5653c87f06776e2926fd85607aae44e44657b4f6cc9b3e2e3";
|
||||
}
|
||||
else throw "Unsupported system: ${stdenv.system}";
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/technology/epp/downloads/release/${year}-${month}/R/eclipse-java-${year}-${month}-R-linux-gtk-x86_64.tar.gz";
|
||||
sha512 = "2xd5q7kg3aly7jnz2fijn06ljmnnd7ggwwzmndfhqwfzxpyjg1lnlln76pcd6chx7gnwdrl7khg0fs566ddabfjv17c46dj5fpw9y6j";
|
||||
};
|
||||
};
|
||||
|
||||
### Environments
|
||||
|
@ -254,12 +254,12 @@ rec {
|
||||
|
||||
cdt = buildEclipseUpdateSite rec {
|
||||
name = "cdt-${version}";
|
||||
version = "9.0.1";
|
||||
version = "9.6.0";
|
||||
|
||||
src = fetchzip {
|
||||
stripRoot = false;
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/tools/cdt/releases/9.0/${name}.zip";
|
||||
sha256 = "0vdx0j9ci533wnk7y17qjvjyqx38hlrdw67z6pi05vfv3r6ys39x";
|
||||
url = "https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/tools/cdt/releases/9.6/${name}/${name}.zip";
|
||||
sha256 = "08rk3b1va57jcy4s161fx0xmb8dn47akhhxd2f28hspq6i2jqicm";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
@ -346,6 +346,33 @@ rec {
|
||||
};
|
||||
};
|
||||
|
||||
drools = buildEclipseUpdateSite rec {
|
||||
name = "drools-${version}";
|
||||
version = "7.17.0.Final";
|
||||
|
||||
src = fetchzip {
|
||||
url = "https://download.jboss.org/drools/release/${version}/droolsjbpm-tools-distribution-${version}.zip";
|
||||
sha512 = "2qzc1iszqfrfnw8xip78n3kp6hlwrvrr708vlmdk7nv525xhs0ssjaxriqdhcr0s6jripmmazxivv3763rnk2bfkh31hmbnckpx4r3m";
|
||||
extraPostFetch = ''
|
||||
# work around https://github.com/NixOS/nixpkgs/issues/38649
|
||||
chmod go-w $out;
|
||||
|
||||
# update site is a couple levels deep, alongside some other irrelevant stuff
|
||||
cd $out;
|
||||
find . -type f -not -path ./binaries/org.drools.updatesite/\* -exec rm {} \;
|
||||
rmdir sources;
|
||||
mv binaries/org.drools.updatesite/* .;
|
||||
rmdir binaries/org.drools.updatesite binaries;
|
||||
'';
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = https://www.drools.org/;
|
||||
description = "Drools is a Business Rules Management System (BRMS) solution";
|
||||
license = licenses.asl20;
|
||||
};
|
||||
};
|
||||
|
||||
eclemma = buildEclipseUpdateSite rec {
|
||||
name = "eclemma-${version}";
|
||||
version = "2.3.2.201409141915";
|
||||
@ -470,12 +497,12 @@ rec {
|
||||
|
||||
jdt = buildEclipseUpdateSite rec {
|
||||
name = "jdt-${version}";
|
||||
version = "4.9";
|
||||
version = "4.10";
|
||||
|
||||
src = fetchzip {
|
||||
stripRoot = false;
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.9-201809060745/org.eclipse.jdt-4.9.zip;
|
||||
sha256 = "144rqrw0crxd2v862dqxm2p5y60n4pbzdryv709xnhcw54rycm7n";
|
||||
url = https://www.eclipse.org/downloads/download.php?r=1&nf=1&file=/eclipse/downloads/drops4/R-4.10-201812060815/org.eclipse.jdt-4.10.zip;
|
||||
sha256 = "1h11w3zd6xy5w4sk6xnyb2a27wxwhp83qfx67ji7bzdrwbvljqkz";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
|
@ -1,4 +1,4 @@
|
||||
{ stdenv, lib, fetchurl, ncurses, xlibsWrapper, libXaw, libXpm, Xaw3d
|
||||
{ stdenv, lib, fetchurl, ncurses, xlibsWrapper, libXaw, libXpm, Xaw3d, libXcursor
|
||||
, pkgconfig, gettext, libXft, dbus, libpng, libjpeg, libungif
|
||||
, libtiff, librsvg, gconf, libxml2, imagemagick, gnutls, libselinux
|
||||
, alsaLib, cairo, acl, gpm, cf-private, AppKit, GSS, ImageIO, m17n_lib, libotf
|
||||
@ -118,6 +118,17 @@ stdenv.mkDerivation rec {
|
||||
mv nextstep/Emacs.app $out/Applications
|
||||
'';
|
||||
|
||||
postFixup =
|
||||
let libPath = lib.makeLibraryPath [
|
||||
libXcursor
|
||||
];
|
||||
in lib.optionalString (withX && toolkit == "lucid") ''
|
||||
patchelf --set-rpath \
|
||||
"$(patchelf --print-rpath "$out/bin/emacs"):${libPath}" \
|
||||
"$out/bin/emacs"
|
||||
patchelf --add-needed "libXcursor.so.1" "$out/bin/emacs"
|
||||
'';
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "The extensible, customizable GNU text editor";
|
||||
homepage = https://www.gnu.org/software/emacs/;
|
||||
|
@ -53,6 +53,10 @@ stdenv.mkDerivation rec {
|
||||
|
||||
# use newer emacs icon
|
||||
cp nextstep/Cocoa/Emacs.base/Contents/Resources/Emacs.icns mac/Emacs.app/Contents/Resources/Emacs.icns
|
||||
|
||||
# Fix sandbox impurities.
|
||||
substituteInPlace Makefile.in --replace '/bin/pwd' 'pwd'
|
||||
substituteInPlace lib-src/Makefile.in --replace '/bin/pwd' 'pwd'
|
||||
'';
|
||||
|
||||
configureFlags = [
|
||||
|
@ -3,7 +3,7 @@
|
||||
with stdenv.lib;
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "kakoune-unstable-${version}";
|
||||
name = "kakoune-${version}";
|
||||
version = "2019.01.20";
|
||||
src = fetchFromGitHub {
|
||||
repo = "kakoune";
|
||||
|
29
pkgs/applications/editors/kdevelop5/kdev-php.nix
Normal file
29
pkgs/applications/editors/kdevelop5/kdev-php.nix
Normal file
@ -0,0 +1,29 @@
|
||||
{ stdenv, lib, fetchurl, cmake, extra-cmake-modules, threadweaver, ktexteditor, kdevelop-unwrapped, kdevelop-pg-qt }:
|
||||
|
||||
let
|
||||
pname = "kdev-php";
|
||||
version = "5.3.1";
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
name = "${pname}-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/KDE/${pname}/archive/v${version}.tar.gz";
|
||||
sha256 = "1xiz4v6w30dsa7l4nk3jw3hxpkx71b0yaaj2k8s7xzgjif824bgl";
|
||||
};
|
||||
|
||||
cmakeFlags = [
|
||||
"-DBUILD_TESTING=OFF"
|
||||
];
|
||||
|
||||
nativeBuildInputs = [ cmake extra-cmake-modules ];
|
||||
buildInputs = [ kdevelop-pg-qt threadweaver ktexteditor kdevelop-unwrapped ];
|
||||
|
||||
meta = with lib; {
|
||||
maintainers = [ maintainers.aanderse ];
|
||||
platforms = platforms.linux;
|
||||
description = "PHP support for KDevelop";
|
||||
homepage = https://www.kdevelop.org;
|
||||
license = [ licenses.gpl2 ];
|
||||
};
|
||||
}
|
30
pkgs/applications/editors/kdevelop5/kdev-python.nix
Normal file
30
pkgs/applications/editors/kdevelop5/kdev-python.nix
Normal file
@ -0,0 +1,30 @@
|
||||
{ stdenv, lib, fetchurl, cmake, extra-cmake-modules, threadweaver, ktexteditor, kdevelop-unwrapped, python }:
|
||||
|
||||
let
|
||||
pname = "kdev-python";
|
||||
version = "5.3.1";
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
name = "${pname}-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/KDE/${pname}/archive/v${version}.tar.gz";
|
||||
sha256 = "11hf8n6vrlaz31c0p3xbnf0df2q5j6ykgc9ip0l5g33kadwn5b9j";
|
||||
};
|
||||
|
||||
cmakeFlags = [
|
||||
"-DBUILD_TESTING=OFF"
|
||||
"-DPYTHON_EXECUTABLE=${python}/bin/python"
|
||||
];
|
||||
|
||||
nativeBuildInputs = [ cmake extra-cmake-modules ];
|
||||
buildInputs = [ threadweaver ktexteditor kdevelop-unwrapped ];
|
||||
|
||||
meta = with lib; {
|
||||
maintainers = [ maintainers.aanderse ];
|
||||
platforms = platforms.linux;
|
||||
description = "Python support for KDevelop";
|
||||
homepage = https://www.kdevelop.org;
|
||||
license = [ licenses.gpl2 ];
|
||||
};
|
||||
}
|
@ -43,6 +43,13 @@ mkDerivation rec {
|
||||
"-DCLANG_BUILTIN_DIR=${llvmPackages.clang-unwrapped}/lib/clang/${(builtins.parseDrvName llvmPackages.clang.name).version}/include"
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
# FIXME: temporary until https://invent.kde.org/kde/kdevelop/merge_requests/8 is merged
|
||||
substituteInPlace kdevplatform/language/backgroundparser/parsejob.cpp --replace \
|
||||
'if (internalFilePath.startsWith(dataPath.canonicalPath() + QStringLiteral("/kdev"))) {' \
|
||||
'if (internalFilePath.startsWith(dataPath.canonicalPath() + QStringLiteral("/kdev")) || localFile.startsWith(path + QStringLiteral("/kdev"))) {'
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
# The kdevelop! script (shell environment) needs qdbus and kioclient5 in PATH.
|
||||
wrapProgram "$out/bin/kdevelop!" \
|
||||
|
7
pkgs/applications/editors/kdevelop5/wrapper.nix
Normal file
7
pkgs/applications/editors/kdevelop5/wrapper.nix
Normal file
@ -0,0 +1,7 @@
|
||||
{ symlinkJoin, kdevelop-unwrapped, plugins ? null }:
|
||||
|
||||
symlinkJoin {
|
||||
name = "kdevelop-with-plugins";
|
||||
|
||||
paths = [ kdevelop-unwrapped ] ++ (if plugins != null then plugins else []);
|
||||
}
|
@ -18,16 +18,16 @@ let
|
||||
}.${system};
|
||||
|
||||
sha256 = {
|
||||
"i686-linux" = "09mgvff27iljj9z7h0xxmr6152hcxh7qqxl3i7wdc55ra1rsjq1n";
|
||||
"x86_64-linux" = "1gvlvg3cjsscx6khy5gxd4wnb069kska00qdfwcq4kn7x1z04xnz";
|
||||
"x86_64-darwin" = "1mf9nyjnxgmzai7rfd1rkwk0wvil0ripg3mh8icg4mld2jjz8rsy";
|
||||
"i686-linux" = "04kbx1cx40lsy9irxy1arp1rixzk49ldhg34w3llmfbx63a4hchf";
|
||||
"x86_64-linux" = "1plvx0mjcbizl6iffib95p5224r9frf0mn6c5xp14p3qnrp32jhm";
|
||||
"x86_64-darwin" = "14h9gs6jpxydgd1h16ybq3ifw5jc7k83yg22pw3sk6vhy7hx7pxr";
|
||||
}.${system};
|
||||
|
||||
archive_fmt = if system == "x86_64-darwin" then "zip" else "tar.gz";
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
name = "vscode-${version}";
|
||||
version = "1.31.0";
|
||||
version = "1.31.1";
|
||||
|
||||
src = fetchurl {
|
||||
name = "VSCode_${version}_${plat}.${archive_fmt}";
|
||||
@ -126,7 +126,7 @@ in
|
||||
and code refactoring. It is also customizable, so users can change the
|
||||
editor's theme, keyboard shortcuts, and preferences
|
||||
'';
|
||||
homepage = http://code.visualstudio.com/;
|
||||
homepage = https://code.visualstudio.com/;
|
||||
downloadPage = https://code.visualstudio.com/Updates;
|
||||
license = licenses.unfree;
|
||||
maintainers = with maintainers; [ eadwu ];
|
||||
|
@ -1,7 +1,7 @@
|
||||
{ stdenv, fetchFromGitHub, pkgconfig, libconfig,
|
||||
gtkmm2, glibmm, libxml2, libsecret, curl, libzip,
|
||||
librsvg, gst_all_1, autoreconfHook, makeWrapper,
|
||||
useUnrar ? false, unrar
|
||||
{ config, stdenv, fetchFromGitHub, pkgconfig, libconfig
|
||||
, gtkmm2, glibmm, libxml2, libsecret, curl, libzip
|
||||
, librsvg, gst_all_1, autoreconfHook, makeWrapper
|
||||
, useUnrar ? config.ahoviewer.useUnrar or false, unrar
|
||||
}:
|
||||
|
||||
assert useUnrar -> unrar != null;
|
||||
|
@ -50,13 +50,13 @@
|
||||
|
||||
mkDerivation rec {
|
||||
name = "digikam-${version}";
|
||||
version = "5.9.0";
|
||||
version = "6.0.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "KDE";
|
||||
repo = "digikam";
|
||||
rev = "v${version}";
|
||||
sha256 = "09diw273h9i7rss89ba82yrfy6jb2njv3k0dknrrg7bb998vrw2d";
|
||||
sha256 = "1ifvrn0bm7fp07d059rl4dy146qzdxafl36ipxg1fg00dkv95hh4";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake doxygen extra-cmake-modules kdoctools wrapGAppsHook ];
|
||||
|
@ -6,11 +6,11 @@ with stdenv.lib;
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "feh-${version}";
|
||||
version = "3.1.1";
|
||||
version = "3.1.2";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://feh.finalrewind.org/${name}.tar.bz2";
|
||||
sha256 = "1sy8z6rv5sy1bhk3846hgfdy96wdi874yr2fnxfprks46qp29l31";
|
||||
sha256 = "0qjhlrgr606gc9h96w9piyd13mx63jqfbxxnan41nrh76m8d0dka";
|
||||
};
|
||||
|
||||
outputs = [ "out" "man" "doc" ];
|
||||
@ -20,25 +20,22 @@ stdenv.mkDerivation rec {
|
||||
buildInputs = [ xorg.libX11 xorg.libXinerama imlib2 libjpeg libpng curl libexif ];
|
||||
|
||||
makeFlags = [
|
||||
"PREFIX=$(out)" "exif=1"
|
||||
"PREFIX=${placeholder "out"}" "exif=1"
|
||||
] ++ optional stdenv.isDarwin "verscmp=0";
|
||||
|
||||
postBuild = ''
|
||||
pushd man
|
||||
make
|
||||
popd
|
||||
'';
|
||||
|
||||
installTargets = [ "install" ];
|
||||
postInstall = ''
|
||||
wrapProgram "$out/bin/feh" --prefix PATH : "${libjpeg.bin}/bin" \
|
||||
--add-flags '--theme=feh'
|
||||
install -D -m 644 man/*.1 $out/share/man/man1
|
||||
'';
|
||||
|
||||
checkInputs = [ perlPackages.perl perlPackages.TestCommand ];
|
||||
preCheck = ''
|
||||
export PERL5LIB="${perlPackages.TestCommand}/${perlPackages.perl.libPrefix}"
|
||||
'';
|
||||
postCheck = ''
|
||||
unset PERL5LIB
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
|
103
pkgs/applications/graphics/gscan2pdf/default.nix
Normal file
103
pkgs/applications/graphics/gscan2pdf/default.nix
Normal file
@ -0,0 +1,103 @@
|
||||
{ stdenv, fetchurl, perlPackages, makeWrapper, wrapGAppsHook,
|
||||
librsvg, sane-backends, sane-frontends,
|
||||
imagemagick, libtiff, djvulibre, poppler_utils, ghostscript, unpaper,
|
||||
xvfb_run, hicolor-icon-theme, liberation_ttf, file, pdftk }:
|
||||
|
||||
with stdenv.lib;
|
||||
|
||||
perlPackages.buildPerlPackage rec {
|
||||
name = "gscan2pdf-${version}";
|
||||
version = "2.3.0";
|
||||
|
||||
src = fetchurl {
|
||||
url = "mirror://sourceforge/gscan2pdf/${version}/${name}.tar.xz";
|
||||
sha256 = "0mcsmly0j9pmyzh6py8r6sfa30hc6gv300hqq3dxj4hv653vhkk9";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ wrapGAppsHook ];
|
||||
|
||||
buildInputs =
|
||||
[ librsvg sane-backends sane-frontends ] ++
|
||||
(with perlPackages; [
|
||||
Gtk3
|
||||
Gtk3SimpleList
|
||||
Cairo
|
||||
CairoGObject
|
||||
Glib
|
||||
GlibObjectIntrospection
|
||||
GooCanvas2
|
||||
LocaleGettext
|
||||
PDFAPI2
|
||||
ImageSane
|
||||
SetIntSpan
|
||||
PerlMagick
|
||||
ConfigGeneral
|
||||
ListMoreUtils
|
||||
HTMLParser
|
||||
ProcProcessTable
|
||||
Log4Perl
|
||||
TryTiny
|
||||
DataUUID
|
||||
DateCalc
|
||||
IOString
|
||||
FilesysDf
|
||||
SubOverride
|
||||
]);
|
||||
|
||||
postPatch = let
|
||||
fontSubstitute = "${liberation_ttf}/share/fonts/truetype/LiberationSans-Regular.ttf";
|
||||
in ''
|
||||
# Required for the program to properly load its SVG assets
|
||||
substituteInPlace bin/gscan2pdf \
|
||||
--replace "/usr/share" "$out/share"
|
||||
|
||||
# Substitute the non-free Helvetica font in the tests
|
||||
sed -i 's|-pointsize|-font ${fontSubstitute} -pointsize|g' t/*.t
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
# Remove impurity
|
||||
find $out -type f -name "*.pod" -delete
|
||||
|
||||
# Add runtime dependencies
|
||||
wrapProgram "$out/bin/gscan2pdf" \
|
||||
--prefix PATH : "${imagemagick}/bin" \
|
||||
--prefix PATH : "${libtiff}/bin" \
|
||||
--prefix PATH : "${djvulibre}/bin" \
|
||||
--prefix PATH : "${poppler_utils}/bin" \
|
||||
--prefix PATH : "${ghostscript}/bin" \
|
||||
--prefix PATH : "${unpaper}/bin"
|
||||
'';
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
installTargets = [ "install" ];
|
||||
|
||||
outputs = [ "out" "man" ];
|
||||
|
||||
checkInputs = [
|
||||
xvfb_run
|
||||
hicolor-icon-theme
|
||||
imagemagick
|
||||
libtiff
|
||||
djvulibre
|
||||
poppler_utils
|
||||
ghostscript
|
||||
file
|
||||
pdftk
|
||||
unpaper
|
||||
];
|
||||
|
||||
checkPhase = ''
|
||||
xvfb-run -s '-screen 0 800x600x24' \
|
||||
make test
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "A GUI to produce PDFs or DjVus from scanned documents";
|
||||
homepage = http://gscan2pdf.sourceforge.net/;
|
||||
license = licenses.gpl3;
|
||||
maintainers = [ maintainers.pacien ];
|
||||
};
|
||||
}
|
||||
|
@ -28,6 +28,9 @@ stdenv.mkDerivation rec {
|
||||
|
||||
qmakeFlags = [ "VERSION=${version}" ];
|
||||
|
||||
# src/lexer.l:36:10: fatal error: parser.hxx: No such file or directory
|
||||
enableParallelBuilding = false; # true by default due to qmake
|
||||
|
||||
doCheck = false;
|
||||
|
||||
meta = {
|
||||
|
@ -6,7 +6,7 @@ stdenv, fetchFromGitHub, cmake, makeWrapper
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "paraview-${version}";
|
||||
version = "5.5.2";
|
||||
version = "5.6.0";
|
||||
|
||||
# fetching from GitHub instead of taking an "official" source
|
||||
# tarball because of missing submodules there
|
||||
@ -14,7 +14,7 @@ stdenv.mkDerivation rec {
|
||||
owner = "Kitware";
|
||||
repo = "ParaView";
|
||||
rev = "v${version}";
|
||||
sha256 = "1jivph7lppnflmjsiirhgv0mnh8mxx41i1vzkk78ynn00rzacx3j";
|
||||
sha256 = "1j13yfdgcv4yzfr449i4c8r4rs1c9zr6qd3igr4vv3ani8zixkzi";
|
||||
fetchSubmodules = true;
|
||||
};
|
||||
|
||||
|
@ -7,13 +7,13 @@
|
||||
|
||||
let
|
||||
pname = "shotwell";
|
||||
version = "0.30.1";
|
||||
version = "0.30.2";
|
||||
in stdenv.mkDerivation rec {
|
||||
name = "${pname}-${version}";
|
||||
|
||||
src = fetchurl {
|
||||
url = "mirror://gnome/sources/${pname}/${stdenv.lib.versions.majorMinor version}/${name}.tar.xz";
|
||||
sha256 = "01hsmig06hjv34yf9y60hv2gml593xfkza4ilq4b22gr8l4v2qip";
|
||||
sha256 = "0pam0si110vkc65kh59lrmgkv91f9zxmf1gpfm99ixjgw25rfi8r";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -3,7 +3,7 @@
|
||||
exiv2, kactivities, karchive, kbookmarks, kconfig, kconfigwidgets,
|
||||
kcoreaddons, kdbusaddons, kguiaddons, kdnssd, kiconthemes, ki18n, kio, khtml,
|
||||
kdelibs4support, kpty, libmtp, libssh, openexr, ilmbase, openslp, phonon,
|
||||
qtsvg, samba, solid
|
||||
qtsvg, samba, solid, gperf
|
||||
}:
|
||||
|
||||
mkDerivation {
|
||||
@ -16,7 +16,7 @@ mkDerivation {
|
||||
buildInputs = [
|
||||
exiv2 kactivities karchive kbookmarks kconfig kconfigwidgets kcoreaddons
|
||||
kdbusaddons kguiaddons kdnssd kiconthemes ki18n kio khtml kdelibs4support
|
||||
kpty libmtp libssh openexr openslp phonon qtsvg samba solid
|
||||
kpty libmtp libssh openexr openslp phonon qtsvg samba solid gperf
|
||||
];
|
||||
CXXFLAGS = [ "-I${ilmbase.dev}/include/OpenEXR" ];
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{ lib, stdenv, fetchurl
|
||||
{ config, lib, stdenv, fetchurl
|
||||
, enableAlsa ? true, alsaLib ? null
|
||||
, enableLibao ? true, libao ? null
|
||||
, enableLame ? false, lame ? null
|
||||
, enableLame ? config.sox.enableLame or false, lame ? null
|
||||
, enableLibmad ? true, libmad ? null
|
||||
, enableLibogg ? true, libogg ? null, libvorbis ? null
|
||||
, enableFLAC ? true, flac ? null
|
||||
|
@ -1,10 +1,10 @@
|
||||
{ stdenv, lib, fetchurl, boost, cmake, ffmpeg, gettext, glew
|
||||
{ config, stdenv, lib, fetchurl, boost, cmake, ffmpeg, gettext, glew
|
||||
, ilmbase, libXi, libX11, libXext, libXrender
|
||||
, libjpeg, libpng, libsamplerate, libsndfile
|
||||
, libtiff, libGLU_combined, openal, opencolorio, openexr, openimageio, openjpeg_1, pythonPackages
|
||||
, zlib, fftw, opensubdiv, freetype, jemalloc, ocl-icd
|
||||
, jackaudioSupport ? false, libjack2
|
||||
, cudaSupport ? false, cudatoolkit
|
||||
, cudaSupport ? config.cudaSupport or false, cudatoolkit
|
||||
, colladaSupport ? true, opencollada
|
||||
, enableNumpy ? false, makeWrapper
|
||||
}:
|
||||
|
@ -69,7 +69,7 @@ with python3.pkgs; buildPythonApplication rec {
|
||||
homepage = https://github.com/jarun/Buku;
|
||||
license = licenses.gpl3;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ infinisil ];
|
||||
maintainers = with maintainers; [ matthiasbeyer infinisil ];
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ stdenv.mkDerivation rec {
|
||||
homepage = http://cgg.bzatek.net/;
|
||||
description = "a simple static web photo gallery, designed to be clean and easily usable";
|
||||
license = stdenv.lib.licenses.gpl2;
|
||||
maintainers = with stdenv.lib.maintainers; [ ];
|
||||
maintainers = [ stdenv.lib.maintainers.matthiasbeyer ];
|
||||
platforms = with stdenv.lib.platforms; linux ++ darwin;
|
||||
};
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ stdenv.mkDerivation rec {
|
||||
homepage = https://github.com/dpayne/cli-visualizer;
|
||||
description = "CLI based audio visualizer";
|
||||
license = stdenv.lib.licenses.mit;
|
||||
maintainers = with stdenv.lib.maintainers; [ ];
|
||||
maintainers = [ stdenv.lib.maintainers.matthiasbeyer ];
|
||||
platforms = with stdenv.lib.platforms; linux;
|
||||
};
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ stdenv.mkDerivation rec {
|
||||
homepage = http://ctodo.apakoh.dk/;
|
||||
description = "A simple ncurses-based task list manager";
|
||||
license = stdenv.lib.licenses.mit;
|
||||
maintainers = with stdenv.lib.maintainers; [ ];
|
||||
maintainers = [ stdenv.lib.maintainers.matthiasbeyer ];
|
||||
platforms = stdenv.lib.platforms.linux;
|
||||
};
|
||||
}
|
||||
|
@ -14,13 +14,13 @@ in
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "electrum";
|
||||
version = "3.3.2";
|
||||
version = "3.3.4";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "spesmilo";
|
||||
repo = "electrum";
|
||||
rev = version;
|
||||
sha256 = "1jsn02azdydpq4plr2552s7ijyqgw6zqm2zx8skwsalgbwmhx12i";
|
||||
sha256 = "0yxdpc602jnd14xz3px85ka0b6db98zwbgfi9a3vj8p1k3mmiwaj";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = with python3Packages; [
|
||||
@ -53,9 +53,6 @@ python3Packages.buildPythonApplication rec {
|
||||
|
||||
preBuild = ''
|
||||
sed -i 's,usr_share = .*,usr_share = "'$out'/share",g' setup.py
|
||||
pyrcc5 icons.qrc -o electrum/gui/qt/icons_rc.py
|
||||
# Recording the creation timestamps introduces indeterminism to the build
|
||||
sed -i '/Created: .*/d' electrum/gui/qt/icons_rc.py
|
||||
sed -i "s|name = 'libzbar.*'|name='${zbar}/lib/libzbar.so'|" electrum/qrscanner.py
|
||||
substituteInPlace ./electrum/ecc_fast.py --replace libsecp256k1.so.0 ${secp256k1}/lib/libsecp256k1.so.0
|
||||
'';
|
||||
|
@ -4,16 +4,22 @@
|
||||
with python3.pkgs;
|
||||
|
||||
buildPythonApplication rec {
|
||||
version = "4.0.0a4";
|
||||
name = "gcalcli-${version}";
|
||||
pname = "gcalcli";
|
||||
version = "4.0.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "insanum";
|
||||
repo = "gcalcli";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "00giq5cdigidzv5bz4wgzi1yp6xlf2rdcy6ynmsc6bcf0cl5x64d";
|
||||
sha256 = "15hpm7b09p5qnha0hpp0mgdl2pgsyq2sjcqihk3fsv7arngdbr5q";
|
||||
};
|
||||
|
||||
postPatch = lib.optionalString stdenv.isLinux ''
|
||||
substituteInPlace gcalcli/argparsers.py --replace \
|
||||
"command = 'notify-send -u critical" \
|
||||
"command = '${libnotify}/bin/notify-send -u critical"
|
||||
'';
|
||||
|
||||
propagatedBuildInputs = [
|
||||
dateutil gflags httplib2 parsedatetime six vobject
|
||||
google_api_python_client oauth2client uritemplate
|
||||
|
@ -38,7 +38,7 @@ buildPythonApplication rec {
|
||||
homepage = https://github.com/donnemartin/haxor-news;
|
||||
description = "Browse Hacker News like a haxor";
|
||||
license = licenses.asl20;
|
||||
maintainers = with maintainers; [ ];
|
||||
maintainers = with maintainers; [ matthiasbeyer ];
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ stdenv.mkDerivation rec {
|
||||
homepage = https://github.com/LuRsT/hr;
|
||||
description = "A horizontal bar for your terminal";
|
||||
license = licenses.mit;
|
||||
maintainers = [ ];
|
||||
maintainers = [ maintainers.matthiasbeyer ];
|
||||
platforms = platforms.unix;
|
||||
};
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
|
||||
homepage = https://github.com/dvorka/hstr;
|
||||
description = "Shell history suggest box - easily view, navigate, search and use your command history";
|
||||
license = stdenv.lib.licenses.asl20;
|
||||
maintainers = with stdenv.lib.maintainers; [ ];
|
||||
maintainers = [ stdenv.lib.maintainers.matthiasbeyer ];
|
||||
platforms = with stdenv.lib.platforms; linux; # Cannot test others
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user