dissertation-3-evaluation/structure/tests.py

138 lines
4.7 KiB
Python
Raw Normal View History

import json
from typing import List, Dict, Tuple
import numpy as np
def repeat_until_satisfied(reducer, satisfied, initial=None, max_attempts=100, max_failures=3):
val = initial
for i in range(max_attempts):
for j in range(max_failures):
try:
val = reducer(val)
break
except Exception as e:
print('failed with {}'.format(e))
if j == max_failures - 1:
raise e
if satisfied(val):
return val
raise RuntimeError('too many attempts')
class StandardTest:
def __init__(
self,
rates: List[int],
events: Dict[float, Tuple[int, int]] = None,
duration: int = 10,
interval_variation_target: float = np.inf,
bandwidth_variation_target: float = np.inf,
max_failures: int = 3,
max_attempts: int = 60,
):
self.rates = rates
self.events = events if events is not None else dict()
self.duration = duration
self.interval_variation_target = interval_variation_target
self.bandwidth_variation_target = bandwidth_variation_target
self.max_failures = max_failures
self.max_attempts = max_attempts
def name(self) -> str:
name_builder = ['R{}-{}'.format(*y) for y in enumerate(self.rates)]
name_builder += ['E{}R{}-{}'.format(x, *y) for (x, y) in self.events.items()]
name_builder.append('T{}'.format(self.duration))
return ''.join(name_builder)
class DirectTest(StandardTest):
def __init__(self, rate: int, **kwargs):
super().__init__([rate], **kwargs)
def name(self) -> str:
return 'D{}'.format(super().name())
class IperfResult:
def __init__(self, test: StandardTest, iperf: str, interval_size=2.0):
self.test = test
self.interval_size = interval_size
# list containing an exact time and a value
self.interval_data: List[Tuple[float, float]] = []
# list containing the overall data transferred and the time taken
self.bandwidth_data: List[float] = []
self.num_tests = 0
self.add_results(iperf)
def add_results(self, iperf: str):
data = json.loads(iperf)
# grab the overall bandwidth
self.bandwidth_data.append(data['end']['sum_sent']['bits_per_second'])
# grab the sum data of all non omitted intervals, excluding any that are smaller than expected
intervals = [
x['sum'] for x in data['intervals'] if
(not x['sum']['omitted']) and (x['sum']['end'] - x['sum']['start'] > self.interval_size / 2)
]
for (time, result) in zip(
[((x['start'] + x['end']) / 2) for x in intervals],
[x['bits_per_second'] for x in intervals],
):
self.interval_data.append((time, result))
self.num_tests += 1
def bins(self) -> List[List[Tuple[float, float]]]:
bins: List[List[Tuple[float, float]]] = [[] for _ in np.arange(0, self.test.duration, self.interval_size)]
for time, result in self.interval_data:
index = int(np.round((time - self.interval_size / 2) / self.interval_size))
bins[index].append((time, result))
return bins
def interval_means(self) -> Dict[float, float]:
bins = self.bins()
means = [np.mean(x, axis=0)[1] for x in bins]
times = [i + self.interval_size / 2 for i in np.arange(0, self.test.duration, self.interval_size)]
return dict(zip(times, means))
def interval_standard_deviations(self) -> Dict[float, float]:
bins = self.bins()
stds = [np.std(x, axis=0)[1] for x in bins]
times = [i + self.interval_size / 2 for i in np.arange(0, self.test.duration, self.interval_size)]
return dict(zip(times, stds))
def interval_coefficient_variances(self) -> Dict[float, float]:
stds = self.interval_standard_deviations()
means = self.interval_means()
return {k: stds[k] / means[k] for k in stds.keys()}
def interval_time_ranges(self) -> Dict[float, Tuple[float, float]]:
bins = self.bins()
times = [i + self.interval_size / 2 for i in np.arange(0, self.test.duration, self.interval_size)]
ranges = [(-np.min(x, axis=0)[0] + time, np.max(x, axis=0)[0] - time) for (x, time) in zip(bins, times)]
return dict(zip(times, ranges))
def bandwidth_mean(self):
return np.mean(self.bandwidth_data)
def bandwidth_standard_deviation(self):
return np.std(self.bandwidth_data)
def bandwidth_coefficient_variance(self):
return self.bandwidth_standard_deviation() / self.bandwidth_mean()