Merge pull request 'direct-fixes' (#6) from direct-fixes into develop

Reviewed-on: #6
This commit is contained in:
JakeHillion 2020-12-11 16:02:36 +00:00
commit ccff05c8d2
2 changed files with 349 additions and 21 deletions

View File

@ -91,15 +91,14 @@
" (directionInbound[test.name()], directionOutbound[test.name()]) = env.test(test)\n",
"\n",
"def attempt_n_times(foo, n=3):\n",
" i = 0\n",
" while True:\n",
" for i in range(n):\n",
" try:\n",
" return foo()\n",
" except KeyboardInterrupt as e:\n",
" raise e\n",
" except Exception as e:\n",
" if i == n:\n",
" if i == n - 1:\n",
" raise e\n",
" finally:\n",
" i += 1\n",
"\n",
"\n",
"fast_tests = True"
@ -114,7 +113,7 @@
}
},
"source": [
"### Direct Server to Server Testing"
"### Direct Server to Server"
]
},
{
@ -132,7 +131,9 @@
" run_and_save_test(env, DirectTest(1, variation_target=0.4 if fast_tests else 0.2))\n",
" run_and_save_test(env, DirectTest(2, variation_target=0.4 if fast_tests else 0.2))\n",
" run_and_save_test(env, DirectTest(3, variation_target=0.4 if fast_tests else 0.2))\n",
" run_and_save_test(env, DirectTest(4, variation_target=0.4 if fast_tests else 0.2))"
" run_and_save_test(env, DirectTest(4, variation_target=0.4 if fast_tests else 0.2))\n",
"\n",
"attempt_n_times(direct_tests)"
]
},
{
@ -228,7 +229,11 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"## Graphs\n",
"This section produces graphs from the collected data."
@ -391,6 +396,316 @@
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### More than 2 connections evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '4x1MB Connections (proxied)': StandardTest([1,1,1,1]),\n",
" '3x1MB Connections (proxied)': StandardTest([1,1,1]),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '4x2MB Connections (proxied)': StandardTest([2,2,2,2]),\n",
" '3x2MB Connections (proxied)': StandardTest([2,2,2]),\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Mixed Performance Evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" '1x1MB + 1x2MB Connections (proxied)': StandardTest([1,2]),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Eventful Evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" 'Varied Connection': StandardTest([2,2], events={10: (0,1), 15: (0,2)}, duration=30),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Comparisons to a Direct Connection"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '1x2MB Connection (not proxied)': DirectTest(2),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" '1x1MB Connection (not proxied)': DirectTest(1),\n",
"\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '1x4MB Connection (not proxied)': DirectTest(4),\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" '1x2MB Connection (not proxied)': DirectTest(2),\n",
"\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '4x1MB Connections (proxied)': StandardTest([1,1,1,1]),\n",
" '3x1MB Connections (proxied)': StandardTest([1,1,1]),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '4x2MB Connections (proxied)': StandardTest([2,2,2,2]),\n",
" '3x2MB Connections (proxied)': StandardTest([2,2,2]),\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Mixed Performance Evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" '1x1MB + 1x2MB Connections (proxied)': StandardTest([1,2]),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Eventful Evaluation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" 'Varied Connection': StandardTest([2,2], events={10: (0,1), 15: (0,2)}, duration=30),\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Comparisons to a Direct Connection"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '1x2MB Connection (not proxied)': DirectTest(2),\n",
" '2x1MB Connections (proxied)': StandardTest([1,1]),\n",
" '1x1MB Connection (not proxied)': DirectTest(1),\n",
"\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"plot_iperf_results(\n",
" {\n",
" '1x4MB Connection (not proxied)': DirectTest(4),\n",
" '2x2MB Connections (proxied)': StandardTest([2,2]),\n",
" '1x2MB Connection (not proxied)': DirectTest(2),\n",
"\n",
" },\n",
" filename='png',\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
@ -399,9 +714,7 @@
"name": "#%% md\n"
}
},
"source": [
"### More than 2 connections evaluation"
]
"source": []
},
{
"cell_type": "code",

View File

@ -167,11 +167,15 @@ class SpeedTestServer(Node):
def server(self):
self.ssh('iperf3 -s -1 -D', error_stdout=True, error_stderr=True)
def client(self, target, time=30):
def client(self, target, time=30, interval_size=2.0):
if isinstance(target, SpeedTestServer):
target = target.get_interfaces()[0].get_address()
command = 'iperf3 -c {target} -t {time} -O 5 -J'.format(target=target, time=time)
command = 'iperf3 -c {target} -t {time} -O 6 -i {interval_size} -J'.format(
target=target,
time=time,
interval_size=interval_size,
)
return self.ssh(command, error_stdout=True, error_stderr=True, return_stdout=True)
@ -231,6 +235,7 @@ class RemotePortal(Node):
sudo ip route add table 10 to {local_host} via 172.19.152.3 dev nc0
sudo ip rule add to {local_host} table 10 priority 10
ping -W 0.1 -c 1 172.19.152.3
ps $(cat mpbl3p.pid) || cat mpbl3p.log
''').format(
local_host=self.get_interfaces()[0].get_address(),
@ -329,6 +334,7 @@ class LocalPortal(Node):
sudo ip route add to {remote_host} dev {local_interface} table 19
sudo ip rule add to {remote_host} table 19 priority 19
ping -W 0.1 -c 1 172.19.152.2
ps $(cat mpbl3p.pid) || cat mpbl3p.log
''').format(
**self.setup_params,
@ -373,7 +379,7 @@ class DirectTest(StandardTest):
class StandardIperfResult:
def __init__(self, test: StandardTest, iperf: str, interval_size=1.0):
def __init__(self, test: StandardTest, iperf: str, interval_size=2.0):
self.test = test
self.interval_size = interval_size
@ -381,6 +387,7 @@ class StandardIperfResult:
# list containing an exact time and a value
self.data: List[Tuple[float, float]] = []
self.num_tests = 0
self.add_results(iperf)
def add_results(self, iperf: str):
@ -397,6 +404,8 @@ class StandardIperfResult:
):
self.data.append((time, result))
self.num_tests += 1
def bins(self) -> List[List[Tuple[float, float]]]:
bins: List[List[Tuple[float, float]]] = [[] for _ in np.arange(0, self.test.duration, self.interval_size)]
@ -432,12 +441,13 @@ class StandardIperfResult:
def repeat_until_satisfied(reducer, satisfied, initial=None, max_attempts=100, max_failures=3):
val = initial()
i = 0
val = initial
for i in range(max_attempts):
for j in range(max_failures):
try:
val = reducer(val)
break
except Exception as e:
print('failed with {}'.format(e))
if j == max_failures - 1:
@ -485,8 +495,8 @@ class BaseEnvironment:
for t, (iface, rate) in test.events.items():
threading.Timer(
5 + t,
(lambda s: lambda: s.lp.get_interfaces()[iface].set_rate(rate))(self),
6 + t,
(lambda n: lambda: n.get_interfaces()[iface].set_rate(rate))(rated_node),
)
iperf = client.client(server, time=test.duration)
@ -496,9 +506,14 @@ class BaseEnvironment:
old.add_results(iperf)
return old
def test_satisfier(val: StandardIperfResult) -> bool:
if val.num_tests < 3:
return False
return False not in [x < test.variation_target for x in val.coefficient_variance().values()]
result = repeat_until_satisfied(
test_reducer,
lambda x: max(x.coefficient_variance().values()) < test.variation_target,
test_satisfier,
max_failures=test.max_failures,
max_attempts=test.max_attempts,
)