nixpkgs/nixos/tests/kubernetes/rbac.nix

145 lines
4.5 KiB
Nix
Raw Normal View History

2017-09-09 01:00:35 +01:00
{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
2017-05-03 00:20:32 +01:00
let
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
kind = "ServiceAccount";
apiVersion = "v1";
metadata = {
name = "read-only";
namespace = "default";
};
});
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1";
2017-09-09 01:00:35 +01:00
kind = "RoleBinding";
metadata = {
name = "read-pods";
namespace = "default";
2017-05-03 00:20:32 +01:00
};
2017-09-09 01:00:35 +01:00
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
name = "pod-reader";
2017-05-03 00:20:32 +01:00
};
2017-09-09 01:00:35 +01:00
subjects = [{
kind = "ServiceAccount";
name = "read-only";
namespace = "default";
2017-05-03 00:20:32 +01:00
}];
});
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
apiVersion = "rbac.authorization.k8s.io/v1";
2017-05-24 18:05:54 +01:00
kind = "Role";
metadata = {
name = "pod-reader";
namespace = "default";
2017-05-03 00:20:32 +01:00
};
2017-05-24 18:05:54 +01:00
rules = [{
apiGroups = [""];
resources = ["pods"];
verbs = ["get" "list" "watch"];
2017-05-03 00:20:32 +01:00
}];
});
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.namespace = "default";
metadata.labels.name = "kubectl";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
2017-09-09 01:00:35 +01:00
command = ["/bin/tail" "-f"];
2017-05-03 00:20:32 +01:00
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl-2";
metadata.namespace = "default";
metadata.labels.name = "kubectl-2";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl-2";
image = "kubectl:latest";
2017-09-09 01:00:35 +01:00
command = ["/bin/tail" "-f"];
2017-05-03 00:20:32 +01:00
imagePullPolicy = "Never";
tty = true;
}];
});
2017-09-09 01:00:35 +01:00
kubectl = pkgs.runCommand "copy-kubectl" { buildInputs = [ pkgs.kubernetes ]; } ''
mkdir -p $out/bin
cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
'';
2017-05-03 00:20:32 +01:00
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
2017-09-09 01:00:35 +01:00
contents = [ kubectl pkgs.busybox kubectlPod2 ];
config.Entrypoint = "/bin/sh";
2017-05-03 00:20:32 +01:00
};
2017-09-09 01:00:35 +01:00
base = {
name = "rbac";
};
2017-05-03 00:20:32 +01:00
2017-09-09 01:00:35 +01:00
singlenode = base // {
test = ''
nixos/kubernetes: Stabilize services startup across machines by adding targets and curl wait loops to services to ensure services are not started before their depended services are reachable. Extra targets cfssl-online.target and kube-apiserver-online.target syncronize starts across machines and node-online.target ensures docker is restarted and ready to deploy containers on after flannel has discussed the network cidr with apiserver. Since flannel needs to be started before addon-manager to configure the docker interface, it has to have its own rbac bootstrap service. The curl wait loops within the other services exists to ensure that when starting the service it is able to do its work immediately without clobbering the log about failing conditions. By ensuring kubernetes.target is only reached after starting the cluster it can be used in the tests as a wait condition. In kube-certmgr-bootstrap mkdir is needed for it to not fail to start. The following is the relevant part of systemctl list-dependencies default.target ● ├─certmgr.service ● ├─cfssl.service ● ├─docker.service ● ├─etcd.service ● ├─flannel.service ● ├─kubernetes.target ● │ ├─kube-addon-manager.service ● │ ├─kube-proxy.service ● │ ├─kube-apiserver-online.target ● │ │ ├─flannel-rbac-bootstrap.service ● │ │ ├─kube-apiserver-online.service ● │ │ ├─kube-apiserver.service ● │ │ ├─kube-controller-manager.service ● │ │ └─kube-scheduler.service ● │ └─node-online.target ● │ ├─node-online.service ● │ ├─flannel.target ● │ │ ├─flannel.service ● │ │ └─mk-docker-opts.service ● │ └─kubelet.target ● │ └─kubelet.service ● ├─network-online.target ● │ └─cfssl-online.target ● │ ├─certmgr.service ● │ ├─cfssl-online.service ● │ └─kube-certmgr-bootstrap.service
2019-03-01 07:44:45 +00:00
$machine1->waitForUnit("kubernetes.target");
2017-09-09 01:00:35 +01:00
$machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
2017-05-03 00:20:32 +01:00
$machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
2017-05-03 00:20:32 +01:00
2017-09-09 01:00:35 +01:00
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
2017-05-03 00:20:32 +01:00
2017-09-09 01:00:35 +01:00
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
2017-05-03 00:20:32 +01:00
$machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
2017-09-09 01:00:35 +01:00
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
2017-05-03 00:20:32 +01:00
};
2017-09-09 01:00:35 +01:00
multinode = base // {
test = ''
# Node token exchange
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
nixos/kubernetes: Stabilize services startup across machines by adding targets and curl wait loops to services to ensure services are not started before their depended services are reachable. Extra targets cfssl-online.target and kube-apiserver-online.target syncronize starts across machines and node-online.target ensures docker is restarted and ready to deploy containers on after flannel has discussed the network cidr with apiserver. Since flannel needs to be started before addon-manager to configure the docker interface, it has to have its own rbac bootstrap service. The curl wait loops within the other services exists to ensure that when starting the service it is able to do its work immediately without clobbering the log about failing conditions. By ensuring kubernetes.target is only reached after starting the cluster it can be used in the tests as a wait condition. In kube-certmgr-bootstrap mkdir is needed for it to not fail to start. The following is the relevant part of systemctl list-dependencies default.target ● ├─certmgr.service ● ├─cfssl.service ● ├─docker.service ● ├─etcd.service ● ├─flannel.service ● ├─kubernetes.target ● │ ├─kube-addon-manager.service ● │ ├─kube-proxy.service ● │ ├─kube-apiserver-online.target ● │ │ ├─flannel-rbac-bootstrap.service ● │ │ ├─kube-apiserver-online.service ● │ │ ├─kube-apiserver.service ● │ │ ├─kube-controller-manager.service ● │ │ └─kube-scheduler.service ● │ └─node-online.target ● │ ├─node-online.service ● │ ├─flannel.target ● │ │ ├─flannel.service ● │ │ └─mk-docker-opts.service ● │ └─kubelet.target ● │ └─kubelet.service ● ├─network-online.target ● │ └─cfssl-online.target ● │ ├─certmgr.service ● │ ├─cfssl-online.service ● │ └─kube-certmgr-bootstrap.service
2019-03-01 07:44:45 +00:00
$machine1->waitForUnit("kubernetes.target");
$machine2->waitForUnit("kubernetes.target");
2017-09-09 01:00:35 +01:00
$machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
2017-05-03 00:20:32 +01:00
$machine2->waitUntilSucceeds("docker load < ${kubectlImage}");
2017-09-09 01:00:35 +01:00
$machine1->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRole}");
$machine1->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$machine1->waitUntilSucceeds("kubectl create -f ${kubectlPod}");
$machine1->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$machine1->waitUntilSucceeds("kubectl exec -ti kubectl -- kubectl get pods");
2017-09-09 01:00:35 +01:00
$machine1->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$machine1->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest singlenode;
multinode = mkKubernetesMultiNodeTest multinode;
2017-05-03 00:20:32 +01:00
}