nixos/hadoop: add better test
The existing tests for HDFS and YARN only check if the services come up and expose their web interfaces. The new combined hadoop test will also test whether the services and roles work together as intended. It spin up an HDFS+YARN cluster and submit a demo YARN application that uses the hadoop cluster for storageand yarn cluster for compute.
This commit is contained in:
parent
91bb2b7016
commit
6688c52254
@ -165,6 +165,7 @@ in
|
|||||||
grocy = handleTest ./grocy.nix {};
|
grocy = handleTest ./grocy.nix {};
|
||||||
grub = handleTest ./grub.nix {};
|
grub = handleTest ./grub.nix {};
|
||||||
gvisor = handleTest ./gvisor.nix {};
|
gvisor = handleTest ./gvisor.nix {};
|
||||||
|
hadoop.all = handleTestOn [ "x86_64-linux" ] ./hadoop/hadoop.nix {};
|
||||||
hadoop.hdfs = handleTestOn [ "x86_64-linux" ] ./hadoop/hdfs.nix {};
|
hadoop.hdfs = handleTestOn [ "x86_64-linux" ] ./hadoop/hdfs.nix {};
|
||||||
hadoop.yarn = handleTestOn [ "x86_64-linux" ] ./hadoop/yarn.nix {};
|
hadoop.yarn = handleTestOn [ "x86_64-linux" ] ./hadoop/yarn.nix {};
|
||||||
handbrake = handleTestOn ["x86_64-linux"] ./handbrake.nix {};
|
handbrake = handleTestOn ["x86_64-linux"] ./handbrake.nix {};
|
||||||
@ -416,6 +417,7 @@ in
|
|||||||
solr = handleTest ./solr.nix {};
|
solr = handleTest ./solr.nix {};
|
||||||
sonarr = handleTest ./sonarr.nix {};
|
sonarr = handleTest ./sonarr.nix {};
|
||||||
spacecookie = handleTest ./spacecookie.nix {};
|
spacecookie = handleTest ./spacecookie.nix {};
|
||||||
|
spark = handleTestOn ["x86_64-linux"] ./spark {};
|
||||||
spike = handleTest ./spike.nix {};
|
spike = handleTest ./spike.nix {};
|
||||||
sslh = handleTest ./sslh.nix {};
|
sslh = handleTest ./sslh.nix {};
|
||||||
sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
|
sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
|
||||||
|
70
nixos/tests/hadoop/hadoop.nix
Normal file
70
nixos/tests/hadoop/hadoop.nix
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
import ../make-test-python.nix ({pkgs, ...}: {
|
||||||
|
|
||||||
|
nodes = let
|
||||||
|
package = pkgs.hadoop;
|
||||||
|
coreSite = {
|
||||||
|
"fs.defaultFS" = "hdfs://master";
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
master = {pkgs, options, ...}: {
|
||||||
|
services.hadoop = {
|
||||||
|
inherit package coreSite;
|
||||||
|
hdfs.namenode.enabled = true;
|
||||||
|
yarn.resourcemanager.enabled = true;
|
||||||
|
};
|
||||||
|
virtualisation.memorySize = 1024;
|
||||||
|
};
|
||||||
|
|
||||||
|
worker = {pkgs, options, ...}: {
|
||||||
|
services.hadoop = {
|
||||||
|
inherit package coreSite;
|
||||||
|
hdfs.datanode.enabled = true;
|
||||||
|
yarn.nodemanager.enabled = true;
|
||||||
|
yarnSite = options.services.hadoop.yarnSite.default // {
|
||||||
|
"yarn.resourcemanager.hostname" = "master";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
virtualisation.memorySize = 2048;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
master.wait_for_unit("network.target")
|
||||||
|
master.wait_for_unit("hdfs-namenode")
|
||||||
|
|
||||||
|
master.wait_for_open_port(8020)
|
||||||
|
master.wait_for_open_port(9870)
|
||||||
|
|
||||||
|
worker.wait_for_unit("network.target")
|
||||||
|
worker.wait_for_unit("hdfs-datanode")
|
||||||
|
worker.wait_for_open_port(9864)
|
||||||
|
worker.wait_for_open_port(9866)
|
||||||
|
worker.wait_for_open_port(9867)
|
||||||
|
|
||||||
|
master.succeed("curl -f http://worker:9864")
|
||||||
|
worker.succeed("curl -f http://master:9870")
|
||||||
|
|
||||||
|
worker.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
|
||||||
|
|
||||||
|
master.wait_for_unit("yarn-resourcemanager")
|
||||||
|
|
||||||
|
master.wait_for_open_port(8030)
|
||||||
|
master.wait_for_open_port(8031)
|
||||||
|
master.wait_for_open_port(8032)
|
||||||
|
master.wait_for_open_port(8088)
|
||||||
|
worker.succeed("curl -f http://master:8088")
|
||||||
|
|
||||||
|
worker.wait_for_unit("yarn-nodemanager")
|
||||||
|
worker.wait_for_open_port(8042)
|
||||||
|
worker.wait_for_open_port(8040)
|
||||||
|
master.succeed("curl -f http://worker:8042")
|
||||||
|
|
||||||
|
assert "Total Nodes:1" in worker.succeed("yarn node -list")
|
||||||
|
|
||||||
|
assert "Estimated value of Pi is" in worker.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
|
||||||
|
assert "SUCCEEDED" in worker.succeed("yarn application -list -appStates FINISHED")
|
||||||
|
worker.succeed("sudo -u hdfs hdfs dfs -ls / | systemd-cat")
|
||||||
|
'';
|
||||||
|
})
|
@ -2,7 +2,7 @@ import ../make-test-python.nix ({...}: {
|
|||||||
nodes = {
|
nodes = {
|
||||||
namenode = {pkgs, ...}: {
|
namenode = {pkgs, ...}: {
|
||||||
services.hadoop = {
|
services.hadoop = {
|
||||||
package = pkgs.hadoop_3_1;
|
package = pkgs.hadoop;
|
||||||
hdfs.namenode.enabled = true;
|
hdfs.namenode.enabled = true;
|
||||||
coreSite = {
|
coreSite = {
|
||||||
"fs.defaultFS" = "hdfs://namenode:8020";
|
"fs.defaultFS" = "hdfs://namenode:8020";
|
||||||
@ -20,7 +20,7 @@ import ../make-test-python.nix ({...}: {
|
|||||||
};
|
};
|
||||||
datanode = {pkgs, ...}: {
|
datanode = {pkgs, ...}: {
|
||||||
services.hadoop = {
|
services.hadoop = {
|
||||||
package = pkgs.hadoop_3_1;
|
package = pkgs.hadoop;
|
||||||
hdfs.datanode.enabled = true;
|
hdfs.datanode.enabled = true;
|
||||||
coreSite = {
|
coreSite = {
|
||||||
"fs.defaultFS" = "hdfs://namenode:8020";
|
"fs.defaultFS" = "hdfs://namenode:8020";
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import ../make-test-python.nix ({...}: {
|
import ../make-test-python.nix ({...}: {
|
||||||
nodes = {
|
nodes = {
|
||||||
resourcemanager = {pkgs, ...}: {
|
resourcemanager = {pkgs, ...}: {
|
||||||
services.hadoop.package = pkgs.hadoop_3_1;
|
services.hadoop.package = pkgs.hadoop;
|
||||||
services.hadoop.yarn.resourcemanager.enabled = true;
|
services.hadoop.yarn.resourcemanager.enabled = true;
|
||||||
services.hadoop.yarnSite = {
|
services.hadoop.yarnSite = {
|
||||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||||
@ -12,7 +12,7 @@ import ../make-test-python.nix ({...}: {
|
|||||||
];
|
];
|
||||||
};
|
};
|
||||||
nodemanager = {pkgs, ...}: {
|
nodemanager = {pkgs, ...}: {
|
||||||
services.hadoop.package = pkgs.hadoop_3_1;
|
services.hadoop.package = pkgs.hadoop;
|
||||||
services.hadoop.yarn.nodemanager.enabled = true;
|
services.hadoop.yarn.nodemanager.enabled = true;
|
||||||
services.hadoop.yarnSite = {
|
services.hadoop.yarnSite = {
|
||||||
"yarn.resourcemanager.hostname" = "resourcemanager";
|
"yarn.resourcemanager.hostname" = "resourcemanager";
|
||||||
|
Loading…
Reference in New Issue
Block a user