2019-10-27 12:46:05 +00:00
|
|
|
import ./make-test.nix ({pkgs, lib, ...}:
|
|
|
|
|
|
|
|
let
|
2019-10-29 14:00:28 +00:00
|
|
|
cfg = {
|
|
|
|
clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
|
|
|
|
monA = {
|
|
|
|
name = "a";
|
|
|
|
ip = "192.168.1.1";
|
|
|
|
};
|
|
|
|
osd0 = {
|
|
|
|
name = "0";
|
|
|
|
ip = "192.168.1.2";
|
|
|
|
key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
|
|
|
|
uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
|
|
|
|
};
|
|
|
|
osd1 = {
|
|
|
|
name = "1";
|
|
|
|
ip = "192.168.1.3";
|
|
|
|
key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
|
|
|
|
uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
|
|
|
|
};
|
|
|
|
};
|
2019-10-27 12:46:05 +00:00
|
|
|
generateCephConfig = { daemonConfig }: {
|
|
|
|
enable = true;
|
|
|
|
global = {
|
2019-10-29 14:00:28 +00:00
|
|
|
fsid = cfg.clusterId;
|
|
|
|
monHost = cfg.monA.ip;
|
|
|
|
monInitialMembers = cfg.monA.name;
|
2019-10-27 12:46:05 +00:00
|
|
|
};
|
|
|
|
} // daemonConfig;
|
|
|
|
|
|
|
|
generateHost = { pkgs, cephConfig, networkConfig, ... }: {
|
|
|
|
virtualisation = {
|
2019-10-27 15:56:52 +00:00
|
|
|
memorySize = 512;
|
2019-10-27 12:46:05 +00:00
|
|
|
emptyDiskImages = [ 20480 ];
|
|
|
|
vlans = [ 1 ];
|
|
|
|
};
|
|
|
|
|
|
|
|
networking = networkConfig;
|
|
|
|
|
|
|
|
environment.systemPackages = with pkgs; [
|
|
|
|
bash
|
|
|
|
sudo
|
|
|
|
ceph
|
|
|
|
xfsprogs
|
|
|
|
netcat-openbsd
|
|
|
|
];
|
|
|
|
|
|
|
|
boot.kernelModules = [ "xfs" ];
|
|
|
|
|
|
|
|
services.ceph = cephConfig;
|
|
|
|
};
|
|
|
|
|
|
|
|
networkMonA = {
|
|
|
|
dhcpcd.enable = false;
|
|
|
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
2019-10-29 14:00:28 +00:00
|
|
|
{ address = cfg.monA.ip; prefixLength = 24; }
|
2019-10-27 12:46:05 +00:00
|
|
|
];
|
|
|
|
firewall = {
|
|
|
|
allowedTCPPorts = [ 6789 3300 ];
|
|
|
|
allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
cephConfigMonA = generateCephConfig { daemonConfig = {
|
|
|
|
mon = {
|
|
|
|
enable = true;
|
2019-10-29 14:00:28 +00:00
|
|
|
daemons = [ cfg.monA.name ];
|
2019-10-27 12:46:05 +00:00
|
|
|
};
|
|
|
|
mgr = {
|
|
|
|
enable = true;
|
2019-10-29 14:00:28 +00:00
|
|
|
daemons = [ cfg.monA.name ];
|
2019-10-27 12:46:05 +00:00
|
|
|
};
|
|
|
|
}; };
|
|
|
|
|
|
|
|
networkOsd0 = {
|
|
|
|
dhcpcd.enable = false;
|
|
|
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
2019-10-29 14:00:28 +00:00
|
|
|
{ address = cfg.osd0.ip; prefixLength = 24; }
|
2019-10-27 12:46:05 +00:00
|
|
|
];
|
|
|
|
firewall = {
|
|
|
|
allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
cephConfigOsd0 = generateCephConfig { daemonConfig = {
|
|
|
|
osd = {
|
|
|
|
enable = true;
|
2019-10-29 14:00:28 +00:00
|
|
|
daemons = [ cfg.osd0.name ];
|
2019-10-27 12:46:05 +00:00
|
|
|
};
|
|
|
|
}; };
|
|
|
|
|
|
|
|
networkOsd1 = {
|
|
|
|
dhcpcd.enable = false;
|
|
|
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
2019-10-29 14:00:28 +00:00
|
|
|
{ address = cfg.osd1.ip; prefixLength = 24; }
|
2019-10-27 12:46:05 +00:00
|
|
|
];
|
|
|
|
firewall = {
|
|
|
|
allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
cephConfigOsd1 = generateCephConfig { daemonConfig = {
|
|
|
|
osd = {
|
|
|
|
enable = true;
|
2019-10-29 14:00:28 +00:00
|
|
|
daemons = [ cfg.osd1.name ];
|
2019-10-27 12:46:05 +00:00
|
|
|
};
|
|
|
|
}; };
|
|
|
|
|
2019-11-09 12:16:56 +00:00
|
|
|
# Following deployment is based on the manual deployment described here:
|
|
|
|
# https://docs.ceph.com/docs/master/install/manual-deployment/
|
|
|
|
# For other ways to deploy a ceph cluster, look at the documentation at
|
|
|
|
# https://docs.ceph.com/docs/master/
|
2019-10-29 14:00:28 +00:00
|
|
|
testscript = { ... }: ''
|
2019-10-27 12:46:05 +00:00
|
|
|
startAll;
|
|
|
|
|
|
|
|
$monA->waitForUnit("network.target");
|
|
|
|
$osd0->waitForUnit("network.target");
|
|
|
|
$osd1->waitForUnit("network.target");
|
|
|
|
|
|
|
|
# Bootstrap ceph-mon daemon
|
|
|
|
$monA->mustSucceed(
|
|
|
|
"sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
|
|
|
|
"sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
|
|
|
|
"sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
|
2019-10-29 14:00:28 +00:00
|
|
|
"monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
|
|
|
|
"sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
|
nixos/ceph: run unprivileged, use StateDirectory and tmpfiles, don't pass extraServiceConfig
Don't pass user and group to ceph, and rely on it to drop ceps, but let
systemd handle running it as the appropriate user.
This also inlines the extraServiceConfig into the makeService function,
as we have conditionals depending on daemonType there anyways.
Use StateDirectory to create directories in
/var/lib/ceph/${daemonType}/${clusterName}-${daemonId}.
There previously was a condition on daemonType being one of mds,mon,rgw
or mgr. We only instantiate makeServices with these types, and "osd" was
special.
In the osd case, test examples suggest it'd be in something like
/var/lib/ceph/osd/ceph-${cfg.osd0.name} - so it's not special at all,
but exactly like the pattern for the others.
During initialization, we also need these folders, before the unit is
started up. Move the mkdir -p commands in the vm tests to the line
immediately before they're required.
2019-11-02 14:01:39 +00:00
|
|
|
"sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
|
2019-10-29 14:00:28 +00:00
|
|
|
"sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
|
|
|
|
"systemctl start ceph-mon-${cfg.monA.name}"
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitForUnit("ceph-mon-${cfg.monA.name}");
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->mustSucceed("ceph mon enable-msgr2");
|
|
|
|
|
|
|
|
# Can't check ceph status until a mon is up
|
|
|
|
$monA->succeed("ceph -s | grep 'mon: 1 daemons'");
|
|
|
|
|
|
|
|
# Start the ceph-mgr daemon, it has no deps and hardly any setup
|
|
|
|
$monA->mustSucceed(
|
2019-10-29 14:00:28 +00:00
|
|
|
"ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
|
|
|
|
"systemctl start ceph-mgr-${cfg.monA.name}"
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
|
|
|
$monA->waitForUnit("ceph-mgr-a");
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
|
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
|
2019-10-27 12:46:05 +00:00
|
|
|
|
|
|
|
# Send the admin keyring to the OSD machines
|
2019-10-27 15:56:52 +00:00
|
|
|
$monA->mustSucceed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared");
|
|
|
|
$osd0->mustSucceed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph");
|
|
|
|
$osd1->mustSucceed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph");
|
2019-10-27 12:46:05 +00:00
|
|
|
|
|
|
|
# Bootstrap both OSDs
|
|
|
|
$osd0->mustSucceed(
|
|
|
|
"mkfs.xfs /dev/vdb",
|
nixos/ceph: run unprivileged, use StateDirectory and tmpfiles, don't pass extraServiceConfig
Don't pass user and group to ceph, and rely on it to drop ceps, but let
systemd handle running it as the appropriate user.
This also inlines the extraServiceConfig into the makeService function,
as we have conditionals depending on daemonType there anyways.
Use StateDirectory to create directories in
/var/lib/ceph/${daemonType}/${clusterName}-${daemonId}.
There previously was a condition on daemonType being one of mds,mon,rgw
or mgr. We only instantiate makeServices with these types, and "osd" was
special.
In the osd case, test examples suggest it'd be in something like
/var/lib/ceph/osd/ceph-${cfg.osd0.name} - so it's not special at all,
but exactly like the pattern for the others.
During initialization, we also need these folders, before the unit is
started up. Move the mkdir -p commands in the vm tests to the line
immediately before they're required.
2019-11-02 14:01:39 +00:00
|
|
|
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
2019-10-29 14:00:28 +00:00
|
|
|
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
|
|
|
|
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
|
|
|
|
"echo '{\"cephx_secret\": \"${cfg.osd0.key}\"}' | ceph osd new ${cfg.osd0.uuid} -i -",
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
|
|
|
$osd1->mustSucceed(
|
|
|
|
"mkfs.xfs /dev/vdb",
|
nixos/ceph: run unprivileged, use StateDirectory and tmpfiles, don't pass extraServiceConfig
Don't pass user and group to ceph, and rely on it to drop ceps, but let
systemd handle running it as the appropriate user.
This also inlines the extraServiceConfig into the makeService function,
as we have conditionals depending on daemonType there anyways.
Use StateDirectory to create directories in
/var/lib/ceph/${daemonType}/${clusterName}-${daemonId}.
There previously was a condition on daemonType being one of mds,mon,rgw
or mgr. We only instantiate makeServices with these types, and "osd" was
special.
In the osd case, test examples suggest it'd be in something like
/var/lib/ceph/osd/ceph-${cfg.osd0.name} - so it's not special at all,
but exactly like the pattern for the others.
During initialization, we also need these folders, before the unit is
started up. Move the mkdir -p commands in the vm tests to the line
immediately before they're required.
2019-11-02 14:01:39 +00:00
|
|
|
"mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
2019-10-29 14:00:28 +00:00
|
|
|
"mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
|
|
|
|
"ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
|
|
|
|
"echo '{\"cephx_secret\": \"${cfg.osd1.key}\"}' | ceph osd new ${cfg.osd1.uuid} -i -"
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
# Initialize the OSDs with regular filestore
|
|
|
|
$osd0->mustSucceed(
|
2019-10-29 14:00:28 +00:00
|
|
|
"ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
|
2019-10-27 12:46:05 +00:00
|
|
|
"chown -R ceph:ceph /var/lib/ceph/osd",
|
2019-10-29 14:00:28 +00:00
|
|
|
"systemctl start ceph-osd-${cfg.osd0.name}",
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
|
|
|
$osd1->mustSucceed(
|
2019-10-29 14:00:28 +00:00
|
|
|
"ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
|
2019-10-27 12:46:05 +00:00
|
|
|
"chown -R ceph:ceph /var/lib/ceph/osd",
|
2019-10-29 14:00:28 +00:00
|
|
|
"systemctl start ceph-osd-${cfg.osd1.name}"
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
|
|
|
$monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
|
|
|
|
|
|
|
|
$monA->mustSucceed(
|
|
|
|
"ceph osd pool create multi-node-test 100 100",
|
|
|
|
"ceph osd pool ls | grep 'multi-node-test'",
|
|
|
|
"ceph osd pool rename multi-node-test multi-node-other-test",
|
2019-10-29 14:00:28 +00:00
|
|
|
"ceph osd pool ls | grep 'multi-node-other-test'"
|
2019-10-27 12:46:05 +00:00
|
|
|
);
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep '1 pools, 100 pgs'");
|
|
|
|
$monA->mustSucceed("ceph osd pool set multi-node-other-test size 2");
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
|
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep '100 active+clean'");
|
|
|
|
$monA->mustFail(
|
|
|
|
"ceph osd pool ls | grep 'multi-node-test'",
|
|
|
|
"ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it"
|
|
|
|
);
|
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Shut down ceph on all machines in a very unpolite way
|
|
|
|
$monA->crash;
|
|
|
|
$osd0->crash;
|
|
|
|
$osd1->crash;
|
2019-11-02 14:15:33 +00:00
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Start it up
|
|
|
|
$osd0->start;
|
|
|
|
$osd1->start;
|
|
|
|
$monA->start;
|
2019-11-02 14:15:33 +00:00
|
|
|
|
2019-11-09 15:05:58 +00:00
|
|
|
# Ensure the cluster comes back up again
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->succeed("ceph -s | grep 'mon: 1 daemons'");
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
|
2019-10-29 14:00:28 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
|
2019-10-27 12:46:05 +00:00
|
|
|
$monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
|
|
|
|
'';
|
2019-10-29 14:00:28 +00:00
|
|
|
in {
|
|
|
|
name = "basic-multi-node-ceph-cluster";
|
|
|
|
meta = with pkgs.stdenv.lib.maintainers; {
|
|
|
|
maintainers = [ lejonet ];
|
|
|
|
};
|
|
|
|
|
|
|
|
nodes = {
|
|
|
|
monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
|
|
|
|
osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd0; networkConfig = networkOsd0; };
|
|
|
|
osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd1; networkConfig = networkOsd1; };
|
|
|
|
};
|
|
|
|
|
|
|
|
testScript = testscript;
|
2019-10-27 12:46:05 +00:00
|
|
|
})
|