2019-11-24 02:40:27 +00:00
|
|
|
import ./make-test-python.nix ({pkgs, lib, ...}:
|
|
|
|
|
|
|
|
let
|
|
|
|
# Settings for both servers and agents
|
|
|
|
webUi = true;
|
|
|
|
retry_interval = "1s";
|
|
|
|
raft_multiplier = 1;
|
|
|
|
|
|
|
|
defaultExtraConfig = {
|
|
|
|
inherit retry_interval;
|
|
|
|
performance = {
|
|
|
|
inherit raft_multiplier;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
allConsensusServerHosts = [
|
|
|
|
"192.168.1.1"
|
|
|
|
"192.168.1.2"
|
|
|
|
"192.168.1.3"
|
|
|
|
];
|
|
|
|
|
|
|
|
allConsensusClientHosts = [
|
|
|
|
"192.168.2.1"
|
|
|
|
"192.168.2.2"
|
|
|
|
];
|
|
|
|
|
|
|
|
firewallSettings = {
|
|
|
|
# See https://www.consul.io/docs/install/ports.html
|
|
|
|
allowedTCPPorts = [ 8301 8302 8600 8500 8300 ];
|
|
|
|
allowedUDPPorts = [ 8301 8302 8600 ];
|
|
|
|
};
|
|
|
|
|
|
|
|
client = index: { pkgs, ... }:
|
|
|
|
let
|
|
|
|
ip = builtins.elemAt allConsensusClientHosts index;
|
|
|
|
in
|
|
|
|
{
|
|
|
|
environment.systemPackages = [ pkgs.consul ];
|
|
|
|
|
|
|
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
|
|
|
{ address = ip; prefixLength = 16; }
|
|
|
|
];
|
|
|
|
networking.firewall = firewallSettings;
|
|
|
|
|
|
|
|
services.consul = {
|
|
|
|
enable = true;
|
|
|
|
inherit webUi;
|
|
|
|
extraConfig = defaultExtraConfig // {
|
|
|
|
server = false;
|
|
|
|
retry_join = allConsensusServerHosts;
|
|
|
|
bind_addr = ip;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
server = index: { pkgs, ... }:
|
|
|
|
let
|
2020-06-18 00:49:24 +01:00
|
|
|
numConsensusServers = builtins.length allConsensusServerHosts;
|
2020-06-18 00:48:19 +01:00
|
|
|
thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index;
|
|
|
|
ip = thisConsensusServerHost; # since we already use IPs to identify servers
|
2019-11-24 02:40:27 +00:00
|
|
|
in
|
|
|
|
{
|
|
|
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
2020-06-18 00:48:19 +01:00
|
|
|
{ address = ip; prefixLength = 16; }
|
2019-11-24 02:40:27 +00:00
|
|
|
];
|
|
|
|
networking.firewall = firewallSettings;
|
|
|
|
|
|
|
|
services.consul =
|
|
|
|
assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
|
|
|
|
{
|
|
|
|
enable = true;
|
|
|
|
inherit webUi;
|
|
|
|
extraConfig = defaultExtraConfig // {
|
|
|
|
server = true;
|
2020-06-18 00:49:24 +01:00
|
|
|
bootstrap_expect = numConsensusServers;
|
2020-06-18 01:08:17 +01:00
|
|
|
# Tell Consul that we never intend to drop below this many servers.
|
|
|
|
# Ensures to not permanently lose consensus after temporary loss.
|
|
|
|
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
|
|
|
autopilot.min_quorum = numConsensusServers;
|
2019-11-24 02:40:27 +00:00
|
|
|
retry_join =
|
|
|
|
# If there's only 1 node in the network, we allow self-join;
|
|
|
|
# otherwise, the node must not try to join itself, and join only the other servers.
|
|
|
|
# See https://github.com/hashicorp/consul/issues/2868
|
2020-06-18 00:49:24 +01:00
|
|
|
if numConsensusServers == 1
|
2019-11-24 02:40:27 +00:00
|
|
|
then allConsensusServerHosts
|
|
|
|
else builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts;
|
|
|
|
bind_addr = ip;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
};
|
|
|
|
in {
|
|
|
|
name = "consul";
|
|
|
|
|
|
|
|
nodes = {
|
|
|
|
server1 = server 0;
|
|
|
|
server2 = server 1;
|
|
|
|
server3 = server 2;
|
|
|
|
|
|
|
|
client1 = client 0;
|
|
|
|
client2 = client 1;
|
|
|
|
};
|
|
|
|
|
|
|
|
testScript = ''
|
|
|
|
servers = [server1, server2, server3]
|
|
|
|
machines = [server1, server2, server3, client1, client2]
|
|
|
|
|
|
|
|
for m in machines:
|
|
|
|
m.wait_for_unit("consul.service")
|
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
|
|
|
|
def wait_for_healthy_servers():
|
2020-06-18 01:45:42 +01:00
|
|
|
# See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
|
|
|
|
# for why the `Voter` column of `list-peers` has that info.
|
|
|
|
# TODO: The `grep true` relies on the fact that currently in
|
|
|
|
# the output like
|
|
|
|
# # consul operator raft list-peers
|
|
|
|
# Node ID Address State Voter RaftProtocol
|
|
|
|
# server3 ... 192.168.1.3:8300 leader true 3
|
|
|
|
# server2 ... 192.168.1.2:8300 follower true 3
|
|
|
|
# server1 ... 192.168.1.1:8300 follower false 3
|
|
|
|
# `Voter`is the only boolean column.
|
|
|
|
# Change this to the more reliable way to be defined by
|
|
|
|
# https://github.com/hashicorp/consul/issues/8118
|
|
|
|
# once that ticket is closed.
|
2020-06-18 01:43:11 +01:00
|
|
|
for m in machines:
|
2020-06-18 01:45:42 +01:00
|
|
|
m.wait_until_succeeds(
|
|
|
|
"[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]"
|
|
|
|
)
|
2020-06-18 01:43:11 +01:00
|
|
|
|
|
|
|
|
|
|
|
wait_for_healthy_servers()
|
|
|
|
# Also wait for clients to be alive.
|
2019-11-24 02:40:27 +00:00
|
|
|
for m in machines:
|
|
|
|
m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
|
|
|
|
|
|
|
|
client1.succeed("consul kv put testkey 42")
|
|
|
|
client2.succeed("[ $(consul kv get testkey) == 42 ]")
|
|
|
|
|
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
def rolling_reboot_test():
|
|
|
|
"""
|
|
|
|
Tests that the cluster can tolearate failures of any single server,
|
|
|
|
following the recommended rolling upgrade procedure from
|
|
|
|
https://www.consul.io/docs/upgrading#standard-upgrades
|
|
|
|
"""
|
2019-11-24 02:40:27 +00:00
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
for server in servers:
|
|
|
|
server.crash()
|
2019-11-24 02:40:27 +00:00
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
# For each client, wait until they have connection again
|
|
|
|
# using `kv get -recurse` before issuing commands.
|
|
|
|
client1.wait_until_succeeds("consul kv get -recurse")
|
|
|
|
client2.wait_until_succeeds("consul kv get -recurse")
|
2019-11-24 02:40:27 +00:00
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
# Do some consul actions while one server is down.
|
|
|
|
client1.succeed("consul kv put testkey 43")
|
|
|
|
client2.succeed("[ $(consul kv get testkey) == 43 ]")
|
|
|
|
client2.succeed("consul kv delete testkey")
|
|
|
|
|
|
|
|
# Restart crashed machine.
|
|
|
|
server.start()
|
|
|
|
|
|
|
|
# Wait for recovery.
|
|
|
|
wait_for_healthy_servers()
|
|
|
|
|
|
|
|
# Wait for client connections.
|
|
|
|
client1.wait_until_succeeds("consul kv get -recurse")
|
|
|
|
client2.wait_until_succeeds("consul kv get -recurse")
|
|
|
|
|
|
|
|
# Do some consul actions with server back up.
|
|
|
|
client1.succeed("consul kv put testkey 44")
|
|
|
|
client2.succeed("[ $(consul kv get testkey) == 44 ]")
|
|
|
|
client2.succeed("consul kv delete testkey")
|
2019-11-24 02:40:27 +00:00
|
|
|
|
|
|
|
|
2020-06-18 01:43:11 +01:00
|
|
|
# Run the tests.
|
|
|
|
rolling_reboot_test()
|
2019-11-24 02:40:27 +00:00
|
|
|
'';
|
|
|
|
})
|