Merge pull request #78555 from tfc/limesurvey-utf8
nixosTests.limesurvey: port to python and drop badly utf encoded characters
This commit is contained in:
commit
839be38cca
@ -395,7 +395,7 @@ class Machine:
|
||||
status_code_pattern = re.compile(r"(.*)\|\!EOF\s+(\d+)")
|
||||
|
||||
while True:
|
||||
chunk = self.shell.recv(4096).decode()
|
||||
chunk = self.shell.recv(4096).decode(errors="ignore")
|
||||
match = status_code_pattern.match(chunk)
|
||||
if match:
|
||||
output += match[1]
|
||||
|
@ -1,21 +1,26 @@
|
||||
import ./make-test.nix ({ pkgs, ... }: {
|
||||
import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
name = "limesurvey";
|
||||
meta.maintainers = [ pkgs.stdenv.lib.maintainers.aanderse ];
|
||||
|
||||
machine =
|
||||
{ ... }:
|
||||
{ services.limesurvey.enable = true;
|
||||
services.limesurvey.virtualHost.hostName = "example.local";
|
||||
services.limesurvey.virtualHost.adminAddr = "root@example.local";
|
||||
|
||||
# limesurvey won't work without a dot in the hostname
|
||||
networking.hosts."127.0.0.1" = [ "example.local" ];
|
||||
machine = { ... }: {
|
||||
services.limesurvey = {
|
||||
enable = true;
|
||||
virtualHost = {
|
||||
hostName = "example.local";
|
||||
adminAddr = "root@example.local";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
startAll;
|
||||
# limesurvey won't work without a dot in the hostname
|
||||
networking.hosts."127.0.0.1" = [ "example.local" ];
|
||||
};
|
||||
|
||||
$machine->waitForUnit('phpfpm-limesurvey.service');
|
||||
$machine->succeed('curl http://example.local/') =~ /The following surveys are available/ or die;
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("phpfpm-limesurvey.service")
|
||||
assert "The following surveys are available" in machine.succeed(
|
||||
"curl http://example.local/"
|
||||
)
|
||||
'';
|
||||
})
|
||||
|
@ -1,97 +1,90 @@
|
||||
import ./make-test.nix ({ pkgs, ...} :
|
||||
import ./make-test-python.nix ({ pkgs, ...} :
|
||||
|
||||
let
|
||||
|
||||
backend =
|
||||
{ pkgs, ... }:
|
||||
|
||||
{ services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "foo@example.org";
|
||||
services.httpd.virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
backend = { pkgs, ... }: {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "foo@example.org";
|
||||
virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
in {
|
||||
name = "proxy";
|
||||
meta = with pkgs.stdenv.lib.maintainers; {
|
||||
maintainers = [ eelco ];
|
||||
};
|
||||
|
||||
nodes =
|
||||
{ proxy =
|
||||
{ nodes, ... }:
|
||||
nodes = {
|
||||
proxy = { nodes, ... }: {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "bar@example.org";
|
||||
extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
|
||||
extraConfig = ''
|
||||
ExtendedStatus on
|
||||
'';
|
||||
virtualHosts.localhost = {
|
||||
extraConfig = ''
|
||||
<Location /server-status>
|
||||
Require all granted
|
||||
SetHandler server-status
|
||||
</Location>
|
||||
|
||||
{ services.httpd.enable = true;
|
||||
services.httpd.adminAddr = "bar@example.org";
|
||||
services.httpd.extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
|
||||
services.httpd.extraConfig = ''
|
||||
ExtendedStatus on
|
||||
<Proxy balancer://cluster>
|
||||
Require all granted
|
||||
BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
|
||||
BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
|
||||
</Proxy>
|
||||
|
||||
ProxyStatus full
|
||||
ProxyPass /server-status !
|
||||
ProxyPass / balancer://cluster/
|
||||
ProxyPassReverse / balancer://cluster/
|
||||
|
||||
# For testing; don't want to wait forever for dead backend servers.
|
||||
ProxyTimeout 5
|
||||
'';
|
||||
services.httpd.virtualHosts.localhost = {
|
||||
extraConfig = ''
|
||||
<Location /server-status>
|
||||
Require all granted
|
||||
SetHandler server-status
|
||||
</Location>
|
||||
|
||||
<Proxy balancer://cluster>
|
||||
Require all granted
|
||||
BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
|
||||
BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
|
||||
</Proxy>
|
||||
|
||||
ProxyStatus full
|
||||
ProxyPass /server-status !
|
||||
ProxyPass / balancer://cluster/
|
||||
ProxyPassReverse / balancer://cluster/
|
||||
|
||||
# For testing; don't want to wait forever for dead backend servers.
|
||||
ProxyTimeout 5
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
backend1 = backend;
|
||||
backend2 = backend;
|
||||
|
||||
client = { ... }: { };
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
''
|
||||
startAll;
|
||||
backend1 = backend;
|
||||
backend2 = backend;
|
||||
|
||||
$proxy->waitForUnit("httpd");
|
||||
$backend1->waitForUnit("httpd");
|
||||
$backend2->waitForUnit("httpd");
|
||||
$client->waitForUnit("network.target");
|
||||
client = { ... }: { };
|
||||
};
|
||||
|
||||
# With the back-ends up, the proxy should work.
|
||||
$client->succeed("curl --fail http://proxy/");
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
$client->succeed("curl --fail http://proxy/server-status");
|
||||
proxy.wait_for_unit("httpd")
|
||||
backend1.wait_for_unit("httpd")
|
||||
backend2.wait_for_unit("httpd")
|
||||
client.wait_for_unit("network.target")
|
||||
|
||||
# Block the first back-end.
|
||||
$backend1->block;
|
||||
# With the back-ends up, the proxy should work.
|
||||
client.succeed("curl --fail http://proxy/")
|
||||
|
||||
# The proxy should still work.
|
||||
$client->succeed("curl --fail http://proxy/");
|
||||
client.succeed("curl --fail http://proxy/server-status")
|
||||
|
||||
$client->succeed("curl --fail http://proxy/");
|
||||
# Block the first back-end.
|
||||
backend1.block()
|
||||
|
||||
# Block the second back-end.
|
||||
$backend2->block;
|
||||
# The proxy should still work.
|
||||
client.succeed("curl --fail http://proxy/")
|
||||
client.succeed("curl --fail http://proxy/")
|
||||
|
||||
# Now the proxy should fail as well.
|
||||
$client->fail("curl --fail http://proxy/");
|
||||
# Block the second back-end.
|
||||
backend2.block()
|
||||
|
||||
# But if the second back-end comes back, the proxy should start
|
||||
# working again.
|
||||
$backend2->unblock;
|
||||
$client->succeed("curl --fail http://proxy/");
|
||||
'';
|
||||
# Now the proxy should fail as well.
|
||||
client.fail("curl --fail http://proxy/")
|
||||
|
||||
# But if the second back-end comes back, the proxy should start
|
||||
# working again.
|
||||
backend2.unblock()
|
||||
client.succeed("curl --fail http://proxy/")
|
||||
'';
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user