Compare commits

...

126 Commits

Author SHA1 Message Date
9f79e23419 Update on Overleaf. 2021-05-14 10:59:21 +00:00
md403
f5e45e6feb Update on Overleaf. 2021-05-14 10:19:15 +00:00
md403
a166eb474c Update on Overleaf. 2021-05-14 10:19:00 +00:00
md403
6e391f8e42 Update on Overleaf. 2021-05-14 10:17:43 +00:00
25dd9d27c8 Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-14 11:14:36 +01:00
md403
da9458adc0 Update on Overleaf. 2021-05-14 10:14:34 +00:00
101a2dac9d Added graphs 2021-05-14 11:14:22 +01:00
8548ac344d Update on Overleaf. 2021-05-14 10:04:41 +00:00
51001f7df4 Update on Overleaf. 2021-05-14 08:43:29 +00:00
5be6ad93ce Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-14 09:22:49 +01:00
md403
2bde53b3da Update on Overleaf. 2021-05-14 08:22:48 +00:00
fa607bb0fd Updated graphs 2021-05-14 09:22:39 +01:00
2fe3b73cdc Update on Overleaf. 2021-05-14 07:00:30 +00:00
8fd0a3aa20 Update on Overleaf. 2021-05-14 06:39:11 +00:00
54be5311cb Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-14 07:08:14 +01:00
ce74e845ed Update on Overleaf. 2021-05-14 06:08:13 +00:00
50677c24b1 Updated graphs 2021-05-14 07:08:04 +01:00
0248ba39b6 Update on Overleaf. 2021-05-14 06:05:02 +00:00
5abb8fc0c9 Update on Overleaf. 2021-05-14 03:26:35 +00:00
7c545edaf1 Update on Overleaf. 2021-05-13 20:24:54 +00:00
0f37a09214 Update on Overleaf. 2021-05-13 18:36:07 +00:00
4d29ba45ae Update on Overleaf. 2021-05-13 18:30:46 +00:00
29869b47ef Update on Overleaf. 2021-05-13 16:44:29 +00:00
ecb5db8cbb updated references 2021-05-13 16:45:48 +01:00
b2a602c555 Update on Overleaf. 2021-05-13 15:45:36 +00:00
0d3e71c33a Update on Overleaf. 2021-05-13 15:04:57 +00:00
c17f6a6410 Update on Overleaf. 2021-05-13 14:57:20 +00:00
67a8a5aeb6 Folder renaming 2021-05-13 15:48:24 +01:00
2ac9955c72 updated appendix names 2021-05-13 15:36:04 +01:00
87d93c2ebb Reset to compilable state 2021-05-13 15:33:36 +01:00
0a95b2a693 Merge branch 'master' of gitea.hillion.co.uk:JakeHillion/dissertation-4-dissertation 2021-05-13 15:33:18 +01:00
4e23615e39 Reset to compilable state 2021-05-13 15:32:30 +01:00
3775d4523d Update on Overleaf. 2021-05-13 14:19:59 +00:00
47f3ea58e3 Update on Overleaf. 2021-05-13 14:17:12 +00:00
852b624324 Renamed appendix directories 2021-05-13 13:31:10 +01:00
331f98ae4e Update on Overleaf. 2021-05-13 12:29:04 +00:00
56e7b22e61 Update on Overleaf. 2021-05-13 12:27:57 +00:00
5fd95b0638 Update on Overleaf. 2021-05-13 12:17:10 +00:00
bfae620816 Update on Overleaf. 2021-05-13 12:04:27 +00:00
aef7709b4f Update on Overleaf. 2021-05-13 10:16:48 +00:00
3831d58a4a Update on Overleaf. 2021-05-13 08:55:00 +00:00
27772fe86b Update on Overleaf. 2021-05-13 08:20:32 +00:00
15e8ab0f7b Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-12 23:35:42 +01:00
8b0aca10dc Update on Overleaf. 2021-05-12 22:35:41 +00:00
c9afb062d1 updated references 2021-05-12 23:35:37 +01:00
b7e49b9c66 Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-12 23:32:46 +01:00
efc2cdb270 Update on Overleaf. 2021-05-12 22:32:45 +00:00
bd0e04bbf4 updated author 2021-05-12 23:32:38 +01:00
a2c9877548 Update on Overleaf. 2021-05-12 22:30:22 +00:00
808e26e968 wireguard references 2021-05-12 22:18:46 +01:00
c789477274 Update on Overleaf. 2021-05-12 21:18:31 +00:00
31018f96ef wireguard references 2021-05-12 22:13:50 +01:00
44fd9b8b98 Update on Overleaf. 2021-05-12 21:13:04 +00:00
bbc5abd8c4 updated bib 2021-05-12 21:37:33 +01:00
1664fcca31 Update on Overleaf. 2021-05-12 20:34:39 +00:00
029185fc82 Update on Overleaf. 2021-05-10 09:01:05 +00:00
0efe8729f4 Update on Overleaf. 2021-05-10 00:10:05 +00:00
e056cffc23 Update on Overleaf. 2021-05-09 17:05:20 +00:00
fce56fe3d6 added references 2021-05-09 14:24:49 +01:00
472c701222 Update on Overleaf. 2021-05-08 12:49:54 +00:00
2e5ecedcc9 Moved proforma 2021-05-08 13:26:00 +01:00
da0568ea35 Update on Overleaf. 2021-05-08 12:25:28 +00:00
03f54f3a81 box widening 2021-05-04 16:32:57 +01:00
5879be9d73 Update on Overleaf. 2021-05-04 07:55:48 +00:00
99d92fa9cb Update on Overleaf. 2021-05-04 07:38:03 +00:00
96191a5156 Update on Overleaf. 2021-05-04 07:14:21 +00:00
3d81a24271 Update on Overleaf. 2021-05-04 00:05:44 +00:00
095d977e83 Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-05-04 00:52:44 +01:00
1e06f0ade0 Update on Overleaf. 2021-05-03 23:52:38 +00:00
4b7337cde2 Security zone figures 2021-05-04 00:52:24 +01:00
29b33f3fea updated port 2021-05-03 23:22:40 +01:00
85f5916ff1 updated remote portal 2021-05-03 20:45:49 +01:00
f9e2f11a07 consistent stroke widths 2021-05-03 20:30:15 +01:00
5d40556556 added dots 2021-05-03 20:23:16 +01:00
b5fcde6b00 better line thicknesses 2021-05-03 20:22:16 +01:00
e7ce622cdf Update on Overleaf. 2021-05-03 19:04:34 +00:00
63f44e2187 fixed excessive beziers 2021-05-03 18:17:00 +01:00
7be0daa3ef added services 2021-05-03 18:13:57 +01:00
cc68c87d5d added client 2021-05-03 17:37:54 +01:00
71c768bf27 increased image dpi to 192 2021-05-03 17:27:55 +01:00
af2f55a66a improved image export 2021-05-03 17:26:31 +01:00
1dcf9d2568 updated local portal in diagram (png) 2021-05-03 17:18:28 +01:00
8390b13eb0 updated local portal in diagram 2021-05-03 17:10:09 +01:00
d390cc3ca3 straightened line 2021-05-03 16:45:55 +01:00
ff3d6ab1b9 removed pdf 2021-05-03 16:44:31 +01:00
c6c05d680f fixed missing book 2021-04-30 16:17:43 +01:00
2d121c0cd0 updated pdf 2021-04-30 16:06:03 +01:00
60d6977fad updated pdf 2021-04-30 16:04:49 +01:00
4f195f5380 updated pdf 2021-04-30 16:04:26 +01:00
b04bada753 updated pdf 2021-04-30 16:03:25 +01:00
0e4001adc7 Update on Overleaf. 2021-04-30 14:47:35 +00:00
d79d549988 missing dates 2021-04-30 15:33:27 +01:00
66b99dd7c7 updated bib 2021-04-30 15:30:33 +01:00
6df35aa5d6 Update on Overleaf. 2021-04-30 14:30:19 +00:00
3c22d9c04c Update on Overleaf. 2021-04-29 15:57:11 +00:00
8fb4eca4e8 Moved shell scripts 2021-04-29 16:54:08 +01:00
1c1615d3c9 Update on Overleaf. 2021-04-29 15:52:10 +00:00
e1257f879b Update on Overleaf. 2021-04-22 23:24:34 +00:00
1ace7fa02c Update on Overleaf. 2021-04-22 18:53:58 +00:00
b15ffc4f22 Update on Overleaf. 2021-04-10 00:25:44 +00:00
f3f1488831 Merge branch 'master' of gitea.hillion.co.uk:JakeHillion/dissertation-4-dissertation 2021-04-09 11:37:58 +01:00
4e86705ee5 minor graphical fixes 2021-04-09 11:36:19 +01:00
d770267ed2 Update on Overleaf. 2021-04-08 22:50:15 +00:00
e1ce6889ea Reverted scale and added raster 2021-04-08 21:04:46 +01:00
cf6dda75c8 Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-04-08 21:02:29 +01:00
591f9d18cd Update on Overleaf. 2021-04-08 20:02:17 +00:00
13047df9c4 downscaled overview 2021-04-08 21:02:12 +01:00
cec03dbb10 Added overview figure 2021-04-08 20:55:25 +01:00
f87d739902 Update on Overleaf. 2021-03-31 23:15:18 +00:00
d8bca325b2 updated import scripts 2021-03-31 13:56:20 +01:00
ec529482a7 Update on Overleaf. 2021-03-31 12:54:45 +00:00
306573edaa Update on Overleaf. 2021-03-26 22:15:36 +00:00
3e966e059f Update on Overleaf. 2021-03-22 01:06:37 +00:00
44b523a79e Updated graphs 2021-03-21 21:32:05 +00:00
9c7d7b5f6f Update on Overleaf. 2021-03-21 21:29:00 +00:00
549e714736 Update on Overleaf. 2021-03-02 00:29:06 +00:00
77fc2eb404 Update on Overleaf. 2021-02-19 11:19:03 +00:00
3249beaaa8 bib update 2021-02-08 22:32:32 +00:00
8a5514bc0f Update on Overleaf. 2021-02-08 22:28:49 +00:00
e450e1a91f Update on Overleaf. 2021-02-08 16:17:37 +00:00
e2b8e1f8aa Update on Overleaf. 2021-02-08 16:14:51 +00:00
5e5035fc68 Update on Overleaf. 2021-02-08 01:12:42 +00:00
88874239ba Merge branch 'master' of https://git.overleaf.com/5fd37aafb26bc430615b7f18 2021-02-08 00:17:49 +00:00
05ec41157f Update on Overleaf. 2021-02-08 00:17:49 +00:00
a6d97e77d5 bib update 2021-02-08 00:17:40 +00:00
0126158c16 Update on Overleaf. 2021-02-08 00:12:20 +00:00
90 changed files with 10309 additions and 1215 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
thesis.pdf
_minted-thesis/
*.tdo*
.DS_Store*

31
0_Proforma/proforma.tex Normal file
View File

@ -0,0 +1,31 @@
% ************************** Thesis Proforma **************************
\begin{proforma}
\begin{tabular}{ll}
Candidate Number: & 2373A \\
Project Title: & A Multi-Path Bidirectional Layer 3 Proxy \\
Examination: & Computer Science Tripos - Part II, 2021 \\
Word Count: & 11894 \\
Line Count: & 3564\footnotemark \\
Project Originator: & The dissertation author \\
Supervisor: & Michael Dodson
\end{tabular}
\footnotetext[1]{Gathered using \texttt{cat **/*.go | wc -l}}
\vspace{6mm}
\section*{Original Aims of the Project}
This project aimed to produce a multipath proxy, which combines multiple, heterogeneous connections and utilises congestion control to dynamically monitor and adapt to changing link conditions. Using congestion control to dynamically monitor link capacity allows a wider variety of links to be combined, such as those on shared mediums or impacted by environmental factors. The core project aimed to implement such a proxy with links over TCP, with extensions relating to improved performance. The stretch goals of the project were to implement the proxy using other transport mechanisms.
\section*{Work Completed}
The project fulfilled its core success criteria and most of its extended criteria. The proxy supports transport over UDP, which provides increased performance and flexibility over TCP. The performance gains of the proxy over standard connections is tangible, demonstrating the proxy is effective at providing increased bandwidth in many cases. The proxy solution also improves the resilience of the Internet connection, such that if any one connection remains up, all carried flows are maintained.
\section*{Special Difficulties}
None.
\end{proforma}

View File

Before

Width:  |  Height:  |  Size: 130 KiB

After

Width:  |  Height:  |  Size: 130 KiB

View File

@ -0,0 +1,86 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%*********************************** First Chapter *****************************
%*******************************************************************************
\chapter{Introduction} %Title of the First Chapter
\ifpdf
\graphicspath{{1_Introduction/Figs/Raster/}{1_Introduction/Figs/PDF/}{1_Introduction/Figs/}}
\else
\graphicspath{{1_Introduction/Figs/Vector/}{1_Introduction/Figs/}}
\fi
The advertised broadband download speed of most UK residences lies between 30Mbps and 100Mbps \citep{ofcom_performance_2020}, which is often the highest available speed. However, in most cases, more of these connections can be installed at a price linear in the number of connections. More generally, a wider variety of Internet connections for fixed locations are becoming available with time. These include: DSL, Fibre To The Premises, 4G, 5G, Wireless ISPs such as LARIAT\footnote{\url{http://lariat.net}} and Low Earth Orbit ISPs such as Starlink.\footnote{\url{https://starlink.com}}
Though multiple low bandwidth, low cost connections may be accessible, a mechanism to combine multiple connections to present a single high speed, highly available connection to a user is unavailable. This work focuses on providing such a mechanism, taking multiple, distinct connections and providing a single, aggregate connection via a proxy.
Using a proxy to combine connections provides three significant benefits: immediate failover of a single flow, exceeding the bandwidth of each individual connection with a single flow, and balancing of inbound connections. For failover, this means that a flow between a user of this proxy and an external user, such as a SIP call, is maintained when one Internet connection is lost. Exceeding the bandwidth of a single connection means that an application which utilises a single flow can take advantage of higher bandwidth than is available over a single connection. This is useful in cases such as a CCTV system, where viewing a live stream from a camera remotely is possible in a higher resolution with the increased bandwidth available. Finally, although methods such as load balancing routers can balance outgoing flows effectively in many cases, inbound flows cannot be handled so simply. Balancing inbound flows involves complex solutions, which rely on client support. The proxy presented here returns control to the network architect, and hides the complexity from the client and server on either side of the proxy, providing a practical mechanism for obtaining all three benefits.
\section{Existing Work}
Three pieces of existing work that will be examined for their usefulness are MultiPath TCP (MPTCP), Wireguard, and Cloudflare. MPTCP is an effort to expand TCP (Transmission Control Protocol) connections to multiple paths, and is implemented at the kernel layer such that applications which already use TCP can immediately take advantage of the multipath benefits. Wireguard is a state of the art Virtual Private Network (VPN), providing an excellent example for transmitting packets securely over the Internet. Finally, Cloudflare shows examples of how a high bandwidth network can be used to supplement multiple smaller networks, but in a different context to this project. This section focuses on how these examples do not satisfy the aims of this project, and how they provide useful initial steps and considerations for this project.
\subsection{MultiPath TCP (MPTCP)}
MPTCP \citep{handley_tcp_2020} is an extension to the regular Transmission Control Protocol, allowing for the creation of subflows. MPTCP was designed with two purposes: increasing resiliency and throughput for multi-homed mobile devices, and providing multi-homed servers with better control over balancing flows between their interfaces. Initially, MPTCP seems like a solution to the aims of this project. However, it suffers for three reasons: the rise of User Datagram Protocol (UDP)-based protocols, device knowledge of interfaces, and legacy devices.
Although many UDP-based protocols have been around for a long time, using UDP-based protocols in applications to replace TCP-based protocols is a newer effort. An example of an older UDP-based protocol is SIP \citep{schooler_sip_2002}, still widely used for VoIP, which would benefit particularly from increased resilience to single Internet connection outages. For a more recent UDP-based protocol intended to replace a TCP-based protocol, HTTP/3 \citep{bishop_hypertext_2021}, also known as HTTP-over-QUIC, is one of the largest. HTTP/3 is enabled by default in Google Chrome \citep{govindan_enabling_2020} and its derivatives, soon to be enabled by default in Mozilla Firefox \citep{damjanovic_quic_2021}, and available behind an experimental flag in Apple's Safari \citep{kinnear_boost_2020}. Previously, HTTP requests have been sent over TCP connections, but HTTP/3 switches this to a UDP-based protocol, reducing the benefit of MPTCP.
Secondly, devices using MPTCP must have knowledge of their network infrastructure. Consider the example of a phone with a WiFi and 4G interface reaching out to a voice assistant. The phone in this case can utilise MPTCP, as it has knowledge of both Internet connections. However, consider instead a tablet with only a WiFi interface, but behind a router with two Wide Area Network (WAN) interfaces using Network Address Translation (NAT). In this case, the tablet only sees one connection to the Internet, but could take advantage of two. This problem is difficult to solve at the client level, suggesting that solving the problem of combining multiple Internet connections is better suited to network infrastructure.
Finally, it is important to remember legacy devices. Often, these legacy devices will benefit the most from resilience improvements, and they are the least likely to receive updates to new networking technologies such as MPTCP. Although MPTCP can still provide a significant balancing benefit to the servers to which legacy devices connect, the legacy devices see little benefit from the availability of multiple connections. In contrast, providing an infrastructure-level solution, such as the proxy presented here, benefits all devices behind it equally, regardless of their legacy status.
\subsection{Wireguard}
Wireguard \citep{donenfeld_wireguard_2017} is a state of the art VPN solution. Though Wireguard does not serve to combine multiple network connections, it is widely considered an excellent method of transmitting packets securely via the Internet, demonstrated by its inclusion in the Linux kernel \citep{torvalds_linux_2020}, use by commercial providers of overlay networks \citep{pennarun_how_2020}, a security audit \citep{donenfeld_wireguard_2020}, and ongoing efforts for formal verification \citep{donenfeld_formal_nodate,preneel_cryptographic_2018}.
For each Layer 3 packet that Wireguard transports, it generates and sends a single UDP datagram. This is a pattern that will be followed in the UDP implementation of my software. These UDP packets present many of the same challenges as will occur in my software, such as a vulnerability to replay attacks, so the Wireguard implementation overcoming these challenges will be considered throughout. Finally, Wireguard provides an implementation in Go, which will be a useful reference for the Layer 3 networking in Go used in my project.
\subsection{Cloudflare}
Cloudflare uses a global network of servers to provide a variety of infrastructure products, mostly pertaining to websites and security \citep{cloudflare_cloudflare_nodate}. Two of the products offered by Cloudflare are of particular interest to this project: load balancing and magic WAN.
Cloudflare provides the option to proxy HTTPS traffic via their global network of servers to your origin server. This layer 7 (application layer) proxy operates on the level of HTTP requests themselves, and takes advantage of its knowledge of connections to load balance between origin servers. Cloudflare can use knowledge of origin server responsiveness to alter the load balancing. This is a similar use case to my proxy, where items (HTTP requests / IP packets) hit one high-bandwidth server (one of Cloudflare's edge servers / the remote proxy), and this server decides the path through which to proxy the item (a chosen origin server / a connection to the local proxy).
Unlike Cloudflare load balancing, the proxy presented in this work operates on layer 3. Cloudflare receives a stream of HTTPS requests and forwards each to a chosen origin server, while my remote proxy receives a stream of IP packets and forwards them via a chosen path to my local proxy. Though these achieve different goals, Cloudflare load balancing provides an example of using a high-bandwidth edge server to manage balancing between multiple low-bandwidth endpoints.
Cloudflare Magic WAN provides a fully software-defined WAN over their global network. That is, their anycast infrastructure will accept traffic to your network at any edge server in their global infrastructure before forwarding it to you. This supports DDoS mitigation and firewalling at a far higher capacity than on your origin servers. When a DDoS attack or violation of firewall policies occur, offending connections are cut off at the Cloudflare edge, without reaching the limited bandwidth of your local system.
Magic WAN demonstrates that there can be security benefits to moving your network edge to the cloud. By configuring to block bad traffic at the edge, the limited bandwidth connections at your origin are protected. It further demonstrates that WAN-as-a-Service is possible at a large scale, which is the same class of products as my proxy.
Though neither of these Cloudflare products address the aims of my proxy, specifically the multipath problem, they show how cloud infrastructure can be leveraged to support the Internet connections of services in different capacities.
\section{Aims}
This project aims to produce proxy software that uses congestion control to manage transporting packets across multiple paths of data flow, including across discrete Internet connections. When combining Internet connections, there are three main measures that one can prioritise: throughput, resilience, and latency. This project aims to improve throughput and resilience at the cost of latency. By using a layer 3 proxy for entire IP packets, connections are combined in a way that is transparent to devices on both sides of the proxy, overcoming the throughput and availability limitations of each individual connection. The basic structure of this proxy system is shown in Figure \ref{fig:proxy-components}.
\begin{figure}
\centering
\begin{tikzpicture}
\draw (0.5, 3.5) node {Local Proxy};
\draw (0,0.14) rectangle (1,3.14);
\draw (5.5, 3.5) node {Remote Proxy};
\draw (5,0.14) rectangle (6,3.14);
\draw (3, 2.8) node {ISP A};
\draw [<->] (1, 2.5) -- (5, 2.5);
\draw (3, 1.8) node {ISP B};
\draw [<->] (1, 1.5) -- (5, 1.5);
\draw (3, 0.8) node {ISP C};
\draw [<->] (1, 0.5) -- (5, 0.5);
\draw (-1.7, 1.5) node {Client};
\draw [<->] (-1, 1.5) -- (0, 1.5);
\draw (7.9, 1.5) node {Internet};
\draw [<->] (6, 1.5) -- (7, 1.5);
\end{tikzpicture}
\caption{The basic components of this proxy.}
\label{fig:proxy-components}
\end{figure}
The approach presented in this work achieves throughput superior to a single connection by using congestion control to split packets appropriately between each available connection. Further, resilience increases, as a connection loss results in decreased throughout, but does not lose any connection state. Latency, however, increases, as packets must travel via a proxy server. Fortunately, the wide availability of well-peered cloud servers allows for this latency increase to be kept minimal, affecting only the most latency sensitive applications.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 108 KiB

View File

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 42 KiB

View File

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@ -0,0 +1,444 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="210mm"
height="297mm"
viewBox="0 0 210 297"
version="1.1"
id="svg8"
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
sodipodi:docname="security-zones-attackers.svg"
inkscape:export-filename="/home/jake/repos/dissertation/dissertation-4-dissertation/2_Preparation/Figs/security-zones-attackers.png"
inkscape:export-xdpi="192"
inkscape:export-ydpi="192">
<defs
id="defs2">
<marker
style="overflow:visible;"
id="marker3955"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path3953" />
</marker>
<marker
style="overflow:visible"
id="marker3837"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path3835" />
</marker>
<marker
style="overflow:visible"
id="marker3737"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path3735" />
</marker>
<marker
style="overflow:visible"
id="marker3643"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path3641" />
</marker>
<marker
style="overflow:visible;"
id="marker3567"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path3565" />
</marker>
<marker
style="overflow:visible;"
id="marker1583"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1581" />
</marker>
<marker
style="overflow:visible"
id="marker1513"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1511" />
</marker>
<marker
style="overflow:visible"
id="marker1437"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1435" />
</marker>
<marker
style="overflow:visible;"
id="marker1385"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1383" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Mend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path978" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Mstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path975" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Lend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="scale(0.8) rotate(180) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path972" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lstart"
inkscape:isstock="true">
<path
transform="scale(0.8) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path969" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="2.8284271"
inkscape:cx="199.57677"
inkscape:cy="44.550937"
inkscape:document-units="mm"
inkscape:current-layer="layer1"
inkscape:document-rotation="0"
showgrid="true"
inkscape:window-width="2560"
inkscape:window-height="1384"
inkscape:window-x="2560"
inkscape:window-y="27"
inkscape:window-maximized="1">
<inkscape:grid
type="xygrid"
id="grid833" />
</sodipodi:namedview>
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;fill-opacity:0.241541;stroke:#000000;stroke-width:0.172532;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect835"
width="96.572922"
height="42.333332"
x="1.3229166"
y="1.3229166" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837"
width="15.875"
height="10.583334"
x="3.96875"
y="17.197916" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="7.851006"
y="22.707998"
id="text841"><tspan
sodipodi:role="line"
id="tspan839"
x="7.851006"
y="22.707998"
style="stroke-width:0.264583">Client</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3"
width="15.875"
height="10.583334"
x="26.458334"
y="17.197916" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="30.662308"
y="21.247402"
id="text841-6"><tspan
sodipodi:role="line"
id="tspan839-7"
x="30.662308"
y="21.247402"
style="stroke-width:0.264583">Local</tspan><tspan
sodipodi:role="line"
x="30.662308"
y="24.77519"
style="stroke-width:0.264583"
id="tspan871">Portal</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-2"
width="15.875"
height="10.583334"
x="79.375"
y="17.197916" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="83.049805"
y="22.8349"
id="text841-6-9"><tspan
sodipodi:role="line"
x="83.049805"
y="22.8349"
style="stroke-width:0.264583"
id="tspan871-2">Server</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-5"
width="15.875"
height="10.583334"
x="56.885418"
y="17.197916" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="59.615852"
y="21.2474"
id="text841-6-3"><tspan
sodipodi:role="line"
id="tspan839-7-5"
x="59.615852"
y="21.2474"
style="stroke-width:0.264583">Remote</tspan><tspan
sodipodi:role="line"
x="59.615852"
y="24.775188"
style="stroke-width:0.264583"
id="tspan871-6"> Portal</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
d="m 19.84375,22.489584 h 6.614583"
id="path963" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1437);marker-end:url(#marker1385)"
d="m 42.333333,22.489584 14.552084,1e-6"
id="path965"
sodipodi:nodetypes="cc" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1513);marker-end:url(#marker1583)"
d="m 72.760416,22.489583 h 6.614583"
id="path967" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-2"
width="15.875"
height="10.583334"
x="31.75"
y="5.2916665" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="36.17728"
y="11.547029"
id="text841-0"><tspan
sodipodi:role="line"
id="tspan839-2"
x="36.17728"
y="11.547029"
style="stroke-width:0.264583">Alice</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-37"
width="15.875"
height="10.583334"
x="51.59375"
y="5.2916665" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="57.304562"
y="11.228348"
id="text841-5"><tspan
sodipodi:role="line"
id="tspan839-9"
x="57.304562"
y="11.228348"
style="stroke-width:0.264583">Bob</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-2-2"
width="15.875"
height="10.583334"
x="31.75"
y="29.104168" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="35.118946"
y="35.359531"
id="text841-0-2"><tspan
sodipodi:role="line"
id="tspan839-2-8"
x="35.118946"
y="35.359531"
style="stroke-width:0.264583">Charlie</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-37-9"
width="15.875"
height="10.583334"
x="51.59375"
y="29.104168" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="56.246227"
y="35.570019"
id="text841-5-7"><tspan
sodipodi:role="line"
id="tspan839-9-3"
x="56.246227"
y="35.570019"
style="stroke-width:0.264583">Dave</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker3955);marker-end:url(#marker3955)"
d="m 46.302083,15.875 v 3.96875 h -3.96875"
id="path3551" />
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-start:url(#marker3643);marker-end:url(#marker3643)"
d="m 46.302083,29.104166 v -3.96875 h -3.96875"
id="path3553" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker3567);marker-end:url(#marker3567)"
d="m 52.916666,15.875 v 3.96875 h 3.96875"
id="path3555" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker3737);marker-end:url(#marker3737)"
d="m 52.916666,29.104166 v -3.96875 h 3.96875"
id="path3557" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -0,0 +1,363 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="210mm"
height="297mm"
viewBox="0 0 210 297"
version="1.1"
id="svg8"
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
sodipodi:docname="security-zones-vpn.svg"
inkscape:export-filename="/home/jake/repos/dissertation/dissertation-4-dissertation/2_Preparation/Figs/security-zones-vpn.png"
inkscape:export-xdpi="192"
inkscape:export-ydpi="192">
<defs
id="defs2">
<marker
style="overflow:visible;"
id="marker1583"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1581" />
</marker>
<marker
style="overflow:visible"
id="marker1513"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1511" />
</marker>
<marker
style="overflow:visible"
id="marker1437"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1435" />
</marker>
<marker
style="overflow:visible;"
id="marker1385"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1383" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Mend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path978" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Mstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path975" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Lend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="scale(0.8) rotate(180) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path972" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lstart"
inkscape:isstock="true">
<path
transform="scale(0.8) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path969" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="4"
inkscape:cx="173.94742"
inkscape:cy="64.556605"
inkscape:document-units="mm"
inkscape:current-layer="layer1"
inkscape:document-rotation="0"
showgrid="true"
inkscape:window-width="2560"
inkscape:window-height="1384"
inkscape:window-x="2560"
inkscape:window-y="27"
inkscape:window-maximized="1">
<inkscape:grid
type="xygrid"
id="grid833" />
</sodipodi:namedview>
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.53, 0.53;stroke-dashoffset:0;stroke-opacity:1"
d="M 56.921715,1.3229167 56.657146,27.78125"
id="path925-6"
sodipodi:nodetypes="cc" />
<rect
style="fill:#000000;fill-opacity:0.145211;stroke:none;stroke-width:0.204451;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect1691"
width="33.072914"
height="26.458332"
x="56.885418"
y="1.3229166" />
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.53, 0.53;stroke-dashoffset:0;stroke-opacity:1"
d="M 34.380862,1.3152943 34.116293,27.773628"
id="path925"
sodipodi:nodetypes="cc" />
<rect
style="fill:none;fill-opacity:0.241541;stroke:#000000;stroke-width:0.130673;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect835"
width="88.635422"
height="26.458334"
x="1.3229166"
y="1.3229166" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837"
width="15.875"
height="10.583334"
x="3.96875"
y="10.583333" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="7.851006"
y="16.093414"
id="text841"><tspan
sodipodi:role="line"
id="tspan839"
x="7.851006"
y="16.093414"
style="stroke-width:0.264583">Client</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3"
width="15.875"
height="10.583334"
x="26.458334"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="30.662308"
y="14.632818"
id="text841-6"><tspan
sodipodi:role="line"
id="tspan839-7"
x="30.662308"
y="14.632818"
style="stroke-width:0.264583">Local</tspan><tspan
sodipodi:role="line"
x="30.662308"
y="18.160606"
style="stroke-width:0.264583"
id="tspan871">Portal</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-2"
width="15.875"
height="10.583334"
x="71.4375"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="75.112305"
y="16.220318"
id="text841-6-9"><tspan
sodipodi:role="line"
x="75.112305"
y="16.220318"
style="stroke-width:0.264583"
id="tspan871-2">Server</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-5"
width="15.875"
height="10.583334"
x="48.947918"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="51.678352"
y="14.632818"
id="text841-6-3"><tspan
sodipodi:role="line"
id="tspan839-7-5"
x="51.678352"
y="14.632818"
style="stroke-width:0.264583">Remote</tspan><tspan
sodipodi:role="line"
x="51.678352"
y="18.160606"
style="stroke-width:0.264583"
id="tspan871-6"> Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="14.022915"
y="6.614583"
id="text937"><tspan
sodipodi:role="line"
id="tspan935"
x="14.022915"
y="6.614583"
style="stroke-width:0.264583">Local</tspan></text>
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="67.033913"
y="6.5149798"
id="text937-7"><tspan
sodipodi:role="line"
id="tspan935-0"
x="67.033913"
y="6.5149798"
style="stroke-width:0.264583">Internet</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
d="m 19.84375,15.875 h 6.614583"
id="path963" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1437);marker-end:url(#marker1385)"
d="m 42.333333,15.875 h 6.614583"
id="path965" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1513);marker-end:url(#marker1583)"
d="m 64.822916,15.875 h 6.614583"
id="path967" />
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="37.980984"
y="25.146349"
id="text1653"><tspan
sodipodi:role="line"
id="tspan1651"
x="37.980984"
y="25.146349"
style="stroke-width:0.264583">Portal to Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="14.490785"
y="25.079409"
id="text1653-9"><tspan
sodipodi:role="line"
id="tspan1651-3"
x="14.490785"
y="25.079409"
style="stroke-width:0.264583">Client to Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="61.344189"
y="25.172338"
id="text1653-6"><tspan
sodipodi:role="line"
id="tspan1651-0"
x="61.344189"
y="25.172338"
style="stroke-width:0.264583">Portal to Server</tspan></text>
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="42.692226"
y="6.5149798"
id="text937-7-2"><tspan
sodipodi:role="line"
id="tspan935-0-6"
x="42.692226"
y="6.5149798"
style="stroke-width:0.264583">VPN</tspan></text>
</g>
</svg>

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -0,0 +1,347 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="210mm"
height="297mm"
viewBox="0 0 210 297"
version="1.1"
id="svg8"
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
sodipodi:docname="security-zones.svg"
inkscape:export-filename="/home/jake/repos/dissertation/dissertation-4-dissertation/2_Preparation/Figs/security-zones.png"
inkscape:export-xdpi="192"
inkscape:export-ydpi="192">
<defs
id="defs2">
<marker
style="overflow:visible;"
id="marker1583"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1581" />
</marker>
<marker
style="overflow:visible"
id="marker1513"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1511" />
</marker>
<marker
style="overflow:visible"
id="marker1437"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1435" />
</marker>
<marker
style="overflow:visible;"
id="marker1385"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path1383" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Mend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mend"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) rotate(180) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path978" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Mstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Mstart"
inkscape:isstock="true"
inkscape:collect="always">
<path
transform="scale(0.4) translate(10,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path975" />
</marker>
<marker
style="overflow:visible;"
id="Arrow1Lend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="scale(0.8) rotate(180) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path972" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lstart"
inkscape:isstock="true">
<path
transform="scale(0.8) translate(12.5,0)"
style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path969" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="2"
inkscape:cx="412.18702"
inkscape:cy="186.23616"
inkscape:document-units="mm"
inkscape:current-layer="layer1"
inkscape:document-rotation="0"
showgrid="true"
inkscape:window-width="2560"
inkscape:window-height="1384"
inkscape:window-x="2560"
inkscape:window-y="27"
inkscape:window-maximized="1">
<inkscape:grid
type="xygrid"
id="grid833" />
</sodipodi:namedview>
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:#000000;fill-opacity:0.14521094;stroke:none;stroke-width:0.264999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect1691"
width="55.5625"
height="26.458332"
x="34.395832"
y="1.3229166" />
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.53, 0.53;stroke-dashoffset:0;stroke-opacity:1"
d="M 34.380862,1.3152943 34.116293,27.773628"
id="path925"
sodipodi:nodetypes="cc" />
<rect
style="fill:none;fill-opacity:0.241541;stroke:#000000;stroke-width:0.130673;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect835"
width="88.635422"
height="26.458334"
x="1.3229166"
y="1.3229166" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837"
width="15.875"
height="10.583334"
x="3.96875"
y="10.583333" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="7.851006"
y="16.093414"
id="text841"><tspan
sodipodi:role="line"
id="tspan839"
x="7.851006"
y="16.093414"
style="stroke-width:0.264583">Client</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3"
width="15.875"
height="10.583334"
x="26.458334"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="30.662308"
y="14.632818"
id="text841-6"><tspan
sodipodi:role="line"
id="tspan839-7"
x="30.662308"
y="14.632818"
style="stroke-width:0.264583">Local</tspan><tspan
sodipodi:role="line"
x="30.662308"
y="18.160606"
style="stroke-width:0.264583"
id="tspan871">Portal</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-2"
width="15.875"
height="10.583334"
x="71.4375"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="75.112305"
y="16.220318"
id="text841-6-9"><tspan
sodipodi:role="line"
x="75.112305"
y="16.220318"
style="stroke-width:0.264583"
id="tspan871-2">Server</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.213809;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect837-3-5"
width="15.875"
height="10.583334"
x="48.947918"
y="10.583334" />
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="51.678352"
y="14.632818"
id="text841-6-3"><tspan
sodipodi:role="line"
id="tspan839-7-5"
x="51.678352"
y="14.632818"
style="stroke-width:0.264583">Remote</tspan><tspan
sodipodi:role="line"
x="51.678352"
y="18.160606"
style="stroke-width:0.264583"
id="tspan871-6"> Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="14.022915"
y="6.614583"
id="text937"><tspan
sodipodi:role="line"
id="tspan935"
x="14.022915"
y="6.614583"
style="stroke-width:0.264583">Local</tspan></text>
<text
xml:space="preserve"
style="font-size:2.82223px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="58.567238"
y="6.5149798"
id="text937-7"><tspan
sodipodi:role="line"
id="tspan935-0"
x="58.567238"
y="6.5149798"
style="stroke-width:0.264583">Internet</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:0.265;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
d="m 19.84375,15.875 h 6.614583"
id="path963" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1437);marker-end:url(#marker1385)"
d="m 42.333333,15.875 h 6.614583"
id="path965" />
<path
style="fill:none;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1513);marker-end:url(#marker1583)"
d="m 64.822916,15.875 h 6.614583"
id="path967" />
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="37.980984"
y="25.146349"
id="text1653"><tspan
sodipodi:role="line"
id="tspan1651"
x="37.980984"
y="25.146349"
style="stroke-width:0.264583">Portal to Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="14.490785"
y="25.079409"
id="text1653-9"><tspan
sodipodi:role="line"
id="tspan1651-3"
x="14.490785"
y="25.079409"
style="stroke-width:0.264583">Client to Portal</tspan></text>
<text
xml:space="preserve"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="61.344189"
y="25.172338"
id="text1653-6"><tspan
sodipodi:role="line"
id="tspan1651-0"
x="61.344189"
y="25.172338"
style="stroke-width:0.264583">Portal to Server</tspan></text>
</g>
</svg>

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,204 @@
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{Preparation}
\ifpdf
\graphicspath{{2_Preparation/Figs/Raster/}{2_Preparation/Figs/PDF/}{2_Preparation/Figs/}}
\else
\graphicspath{{2_Preparation/Figs/Vector/}{2_Preparation/Figs/}}
\fi
Proxying packets is the process of taking packets that arrive at one location and transporting them to leave at another. This chapter focuses on the preparatory work to achieve this practically and securely, given the design outlined in the previous chapter, in which the proxy consolidates multiple connections to appear as one to both the wider Internet and devices on the local network. In Sections \ref{section:risk-analysis} and \ref{section:preparation-security}, I discuss the security risks and plans to confront them. In Section \ref{section:language-selection}, I present three languages: Go, Rust and C++, and provide context for choosing Go as the implementation language. Finally, in sections \ref{section:requirements-analysis} and \ref{section:engineering-approach}, I present a requirements analysis and a description of the engineering approach for the project.
% ---------------------------- Risk Analysis ------------------------------- %
\section{Security Analysis}
\label{section:risk-analysis}
\begin{figure}
\centering
\includegraphics{2_Preparation/Figs/security-zones.png}
\caption{A summary of the three different transportation zones in this proxy, with grey shading indicating an adversarial network.}
\label{fig:security-zones}
\end{figure}
Any connection between two computers presents a set of security risks. A proxy adds some further risks to this, as additional attack vectors are created by the proxy itself. Firstly, this section focuses on layered security. If we consider the Local Proxy and Remote Proxy, with everything in between, as a single Internet connection, layered security focuses on how the risks of this combined connection compare to that of a standard Internet connection, and what guarantees must be made to achieve the same risks for a proxied connection as for a standard connection.
The transportation of packets is in three sections, as shown in Figure \ref{fig:security-zones}. The first segment of the figure is Client-to-Proxy, which occurs physically in the local zone. The second section is Proxy-to-Proxy, which occurs across the Internet. Finally is Proxy-to-Server, which also occurs across the Internet. With the goal of providing security equivalent to a standard connection, the Client-to-Proxy communication can be considered safe - it is equivalent to connecting a client directly to a modem. Therefore, this section will focus on the transports of Proxy-to-Proxy, and Proxy-to-Server communication. The threat model for this analysis will now be described.
\subsection{Threat Model}
\label{section:threat-model}
The threat model considered here will be that packets can be injected, read, and black-holed at any point in the Internet. This is the model employed by \cite{dolev_security_1983}, in which the attacker has full control of the message while it's transmitting over the Internet. Private networks will be considered safe, covering both the connection between from client to local proxy, and any connections within a VPN (Virtual Private Network).
\subsection{Proxy-to-Proxy Communication}
\begin{figure}
\centering
\includegraphics{2_Preparation/Figs/security-zones-attackers.png}
\caption{Four potential directions of attack for Proxy-to-Proxy communication.}
\label{fig:security-zones-attackers}
\end{figure}
There are four locations to insert or remove packets in the transport between the two proxies, as shown in Figure \ref{fig:security-zones-attackers}. In this case, Alice can insert packets to the local proxy to be sent to the client, Bob can insert packets to the remote proxy to be sent to the world, Charlie can steal packets from the local proxy destined for the remote proxy, and Dave can steal packets from the remote proxy destined for the local proxy. Each of these will be examined for the impact that it would cause.
The impact of Alice inserting packets of their choosing to the local proxy is none. Considering a client connected directly to a modem, any device on the Internet is able to send packets to this modem. In the case of Alice inserting these packets, they could simply send the packets to the remote proxy instead, achieving the same effect. As such, inserting packets destined for the client presents no additional risk.
The impact of Bob inserting packets of their choosing to the remote proxy creates a legal risk to the user, and further cost. For example, Bob may be inserting packets destined for Frank, of an illegal nature. As the machine running the remote proxy is your responsibility, these packets would appear to have come from you. Similarly, if using a metered service such as many cloud providers, all traffic inserted by Bob will be billed to you. Thus it is highly important to prevent attackers such as Bob from inserting packets that will be forwarded as if from you.
Charlie and Dave black-holing packets provides the same risk in either direction, which is denial of service. Even if only a small percentage of packets are able to be stolen, the increase in packet loss has a significant effect on any loss based congestion control mechanisms. This applies whether to the tunnelled flows, or to the congestion controlled flows between proxies themselves. Mitigations for this will focus on opportunities for stealing packets unique to this proxy setup, such as convincing one proxy to send you a portion of its outgoing packets. As stated in Section \ref{section:threat-model}, attackers are able to black-hole packets going to a server on the Internet regardless of the presence of this proxy, so this will not be mitigated.
\subsection{Proxy-to-Server Communication}
Packets between the proxy and server are transmitted openly across the Internet. As this proxy transports entire IP packets at layer 3, no security guarantees need be maintained once the IP packet has left the remote proxy, as it is the responsibility of the application to provide its own security guarantees. Maintaining the same level of security as a standard connection can therefore be achieved by ensuring that the packets which leave one side of a proxy are a subset of the packets that entered the other side.
% ------------------------------- Security --------------------------------- %
\section{Security Solutions}
\label{section:preparation-security}
This section provides means of alleviating the risks given in Section \ref{section:risk-analysis}. To achieve this goal, the authenticity of packets will be verified. Authenticity in this context means two properties of the object hold: integrity and freshness \citep[pp. 14]{anderson_security_2008}. Integrity guarantees that any modification between the sending and receiving proxies can be detected, while freshness guarantees that reuse of a previously transmitted packet can be detected.
\subsection{Message Authentication}
To provide integrity and freshness for each message, I evaluate two choices: Message Authentication Codes (MACs) and Digital Signatures. A MAC is a hash digest generated from a concatenation of data and a secret key. The hash digest is appended to the data before transmission. Anyone sharing the secret key can perform an identical operation to verify the hash and, therefore, the integrity of the data \citep[pp. 352]{menezes_handbook_1997}. Producing a digital signature for a message uses the private key in a public/private keypair to generate a digital signature for a message, proving that the message was signed by the owner of the private key, which can be verified by anyone with the corresponding public key \citep[pp. 147-149]{anderson_security_2008}. In each case, a code is appended to the message, such that the integrity and authenticity of the message can be verified.
As both proxy servers are controlled by the same party, non-repudiation - the knowledge of which side of the proxy provided an authenticity guarantee for the message - is not necessary. This leaves MAC as the message authentication of choice for this project, as producing MACs is less computationally complex than digital signatures, while not providing non-repudiation.
\subsection{Connection Authentication}
Beyond authenticating messages themselves, the connection built between the two proxies must be authenticated. Consider a person-in-the-middle attack, where an attacker forwards the packets between the two proxies. Then, the attacker stops forwarding packets, and instead black holes them. This creates the denial of service mentioned in the previous section.
To prevent such forwarding attacks, the connection itself must be authenticated. I present two methods to solve this, the first being address allow-lists, and the second authenticating the IP address and port of each sent packet. The first solution is static, and simply states that the listening proxy may only respond to new communications when the IP address of the communication is in an approved set. This verifies that the connection is from an approved party, as they must control that IP to create a two-way communication from it.
The second is a more dynamic solution. The IP authentication header \citep{kent_ip_2005} achieves this by protecting all immutable parts of the IP header with an authentication code. In the case of this software, authenticating the source IP address, source port, destination IP address, and destination port ensures connection authenticity. By authenticating these addresses, which can be checked easily at both ends, it can be confirmed that both devices knew with whom they were talking, and from where the connection was initiated. That is, an owner of the shared key authorised this communication path.
However, both of these solutions have some shortfalls when Network Address Translation (NAT) is involved. The second solution, authenticating addresses, fails with any form of NAT. This is because the IPs and ports of the packets sent by the sending proxy are different to when they will be received by the receiving proxy, and therefore cannot be authenticated. The first solution, providing a set of addresses, fails with Carrier Grade NAT (CG-NAT), as many users share the same IP address, and hence anyone under the same IP could perform an attack. In most cases one of these solutions will work, else one can fail over to the security layering presented in Section \ref{section:layered-security}.
\subsection{Freshness}
To ensure freshness of received packets, an anti-replay algorithm is employed. Replay protection in IP authentication headers is achieved by using a sequence number on each packet. This sequence number is monotonically and strictly increasing. The algorithm that I have chosen to implement for this is \emph{IPsec Anti-Replay Algorithm without Bit Shifting} \citep{tsou_ipsec_2012}, also employed in Wireguard \citep{donenfeld_wireguard_2017}.
When applying message authentication, it was sufficient to authenticate messages individually to their flow. However, replay protection must be applied across all flows connected to the proxy, otherwise, a packet could be repeated but appearing as a different connection, and therefore remain undetected. This is similar to the design pattern of MPTCP's congestion control, where there is a separation between the sequence number of individual subflows and the sequence number of the data transport as a whole \citep[pp. 11]{wischik_design_2011}.
\subsection{Layered Security}
\label{section:layered-security}
It was previously mentioned that my solution is transparent to the higher layer security encapsulated by proxied packets. Further to this, my solution provides transparent security in the other direction, where proxied packets are encapsulated by another security solution. Consider the case of a satellite office that employs both a whole network corporate VPN and my solution. The network can be configured in each of two cases: the multipath proxy runs behind the VPN, or the VPN runs behind the multipath proxy.
Packet structures for proxied packets in each of these cases are given in Appendix \ref{appendix:layered-security}, as Figure \ref{fig:whole-network-vpn-behind} and Figure \ref{fig:whole-network-vpn-infront} for the VPN Wireguard \citep{donenfeld_wireguard_2017}. In Figure \ref{fig:whole-network-vpn-infront}, the proxies are only accessible via the VPN protected network. It can be seen that the packet in Figure \ref{fig:whole-network-vpn-infront} is shorter, given the removal of the message authentication code and the data sequence number. The data sequence number is unnecessary, given that Wireguard uses the same anti-replay algorithm, and thus replayed packets would have been caught entering the secure network. Further, the message authentication code is unnecessary, as the authenticity of packets is now guaranteed by Wireguard.
Supporting and encouraging this layering of protocols provides a second benefit: if the security in my solution degrades with time, there are two options to repair it. One can either fix the open source application, or compose it with a security solution that is not broken, but perhaps provides redundant security guarantees, translating to additional overhead. To this end, the security features mentioned are all configurable. This allows for flexibility in implementation.
\begin{figure}
\centering
\includegraphics{2_Preparation/Figs/security-zones-vpn.png}
\caption{A summary of the three different transportation zones in this proxy behind a VPN, with grey shading indicating an adversarial network.}
\label{fig:security-zones-vpn}
\end{figure}
The benefits of using a VPN tunnel between the two proxies are shown in Figure \ref{fig:security-zones-vpn}. Whereas in Figure \ref{fig:security-zones} the proxy-to-proxy communication is across the unprotected Internet, in Figure \ref{fig:security-zones-vpn} this communication occurs across a secure overlay network. This allows the packet transport to be trusted, and avoids the need for additional verification. Further, it allows the application to remain secure in any situation where a VPN will work. Home users, in most cases, would use this solution with the inbuilt authentication mechanisms. Business users, who already have a need for a corporate VPN, would benefit from running my solution across VPN tunnels, avoiding the need to complete authentication work multiple times.
% -------------------------- Language Selection ---------------------------- %
\section{Language Selection}
\label{section:language-selection}
In this section, I evaluate three potential languages (C++, Rust and Go) for the implementation of this software. To support this evaluation, I have provided a sample program in each language. The sample program is a minimal example of reading packets from a TUN interface, placing them in a queue from a single thread, and consuming the packets from the queue with multiple threads. These examples are given in Figures \ref{fig:cpp-tun-sample} through \ref{fig:go-tun-sample}, in Appendix \ref{appendix:language-samples}. For each language, I considered the performance, code clarity, and the language ecosystem. This culminated in choosing Go for the implementation language.
I similarly evaluated two languages for the test suite: Python and Java. Though Python was initially chosen for rapid development and better ecosystem support, the final test suite is a combination of both Python and Java - Python for data processing, and Java for systems interaction.
\subsection{Implementation Languages}
\subsubsection{C++}
There are two primary advantages to completing this project in C++: speed of execution, and C++ being low level enough to achieve this project's goals (which turned out to be true for all considered languages).
The negatives of using C++ are demonstrated in the sample script, given in Figure \ref{fig:cpp-tun-sample}: achieving even the base functionality of this project requires multiple times more code than Rust or Go (93 lines compared to 34 for Rust or 48 for Go). This arises from the need to manually implement the required thread safe queue, which is available as a library for Rust, and included in the Go runtime. This manual implementation gives rise to additional risk of incorrect implementation, specifically with regards to thread safety, that could cause undefined behaviour, security vulnerabilities, and great difficulty debugging. Further, although open source queues are available, they are not handled by a package manager, and thus security updates would have to be manual, risking the introduction of bugs. Finally, C++ does not provide any memory safety guarantees.
\subsubsection{Rust}
Rust is memory safe and thread safe, solving the latter issues with C++. Rust also has a minimal runtime, allowing for an execution speed comparable to C or C++. The Rust sample is given in Figure \ref{fig:rust-tun-sample}, and is pleasantly concise.
For the purposes of this project, Rust's youthfulness is a negative. This is two-faceted: Integrated Development Environment (IDE) support and crate stability (crates are the Rust mechanism for package management). Firstly, the IDE support for Rust in my IDEs of choice is provided via a plugin to IntelliJ, and is not as well supported as the other languages. Secondly, the crate available for TUN support (tun-tap\footnote{\url{https://docs.rs/tun-tap/}}) does not yet provide a stable Application Programming Interface (API), which was noticed during test program development. Between writing the program initially and re-testing when documenting it, the crate API had changed to the point where my script no longer type checked. Further, the old version had disappeared, and thus I was left with a program that did not compile or function. Although I could write the API for TUN interaction myself, the safety benefits of Rust would be less pronounced, as the direct systems interaction requires \texttt{unsafe} code, which bypasses parts of the type-checker and borrow-checker, leading to an increased potential for bugs.
\subsubsection{Go}
The final language to evaluate is Go, often written as GoLang. The primary difference between Go and the other two evaluated languages is the presence of a runtime. The code sample is provided in Figure \ref{fig:go-tun-sample}. Go is significantly higher level than the other two languages mentioned, and provides a memory management model that is both simpler than C++ and more standard than Rust.
For the greedy structure of this project, Go's focus on concurrency is extremely beneficial. Go has channels in the standard runtime, which support any number of both producers and consumers. In this project, both SPMC (Single Producer Multi Consumer) and MPSC (Multi Producer Single Consumer) queues are required, so having these provided as a first class feature of the language is beneficial.
Garbage collection and first order concurrency come together to make the code produced for this project highly readable, but relies on a more complex runtime than the other two languages evaluated. The downside is that the speed of execution is negatively affected by this runtime. However, for the purposes of this first production, that compromise is acceptable. By producing code that makes the functionality of the application clear, future implementations could more easily be built to mirror it. Given the performance shown in Section \ref{section:performance-evaluation}, the benefits of the compromise of using a well-suited, high-level language are clearly evident.
\subsection{Evaluation Languages}
\label{section:preparation-language-choices-evaluation}
\subsubsection{Python}
Python is a dynamically typed language, and it was chosen as the initial implementation language for the test suite. The first reason for this is \verb'matplotlib',\footnote{\url{https://matplotlib.org/}} a widely used graphing library that can produce the graphs needed for this evaluation. The second reason is \verb'proxmoxer'\footnote{\url{https://github.com/proxmoxer/proxmoxer}}, a fluent API for interacting with a Proxmox server.
Having the required modules available allowed for a swift initial development sprint. This showed that the method of evaluation was viable and effective. However, the requirements of evaluation changed with the growth of the software, and an important part of an agile process is adapting to changing requirements. The lack of static typing complicates the refactorability of Python, and becomes increasingly challenging as the project grows. Therefore, after the initial proof of concept, it became necessary to explore another language for the Proxmox interaction.
\subsubsection{Java}
Java is statically typed and became the implementation language for all external interaction within the test suite. The initial reason for not choosing Java was the lack of availability of an equivalent library to \verb'proxmoxer'. However, as the implementation size grew in Python, the lack of static typing meant that making changes to the system without adding bugs became particularly difficulty. Further, productivity was reduced by lack of code suggestions provided by \verb'proxmoxer' without type hints, as much API documentation had to be read for each implemented piece of code.
To this end, I developed a library in Java with an almost identical interface, but providing a high degree of type-safety. This allowed for much safer changes to the program, while also encouraging the use of IDE hints for quickly generating code. Although the data gathering was much improved by switching to Java, the code for generating graphs was perfectly manageable in Python. As such, a hybrid solution with Java for data gathering and Python for data processing was employed.
% ------------------------- Requirements Analysis -------------------------- %
\section{Requirements Analysis}
\label{section:requirements-analysis}
The requirements of the project are detailed in the Success Criteria of the Project Proposal (Appendix \ref{appendix:project-proposal}), and are the primary method of evaluation for project success. They are split into three categories: success criteria, extended goals and stretch goals.
The three categories of success criteria can be summarised as follows. The success criteria, or must have elements, are to provide a multi-path proxy that is functional, secure and improves speed and resilience in specific cases. The extended goals, or should have elements, are focused on increasing the performance and flexibility of the solution. The stretch goals, or could have elements, are aimed at increasing performance by reducing overheads, and supporting IPv6 alongside IPv4.
Beyond the success criteria, I wanted to demonstrate the practicality of my software on prototypic networking equipment; therefore, continuous integration testing and evaluation will run on Linux and FreeBSD.
% ------------------------- Engineering Approach --------------------------- %
\section{Engineering Approach}
\label{section:engineering-approach}
\subsubsection{Software Development Model}
The development of this software followed the agile methodology. Work was organised into weekly sprints, aiming for increased functionality in the software each time. By focusing on sufficient but not excessive planning, a minimum viable product was quickly established. From there, the remaining features could be implemented in the correct sized segments. Examples of these sprints are: initial build including configuration, TUN adaptors and main program; TCP transport, enabling an end-to-end connection between the two parts; repeatable testing, providing the data to evaluate each iteration of the project against its success criteria; UDP transport for performance and control.
The agile methodology welcomse changing requirements \citep{beck_manifesto_2001}, and as the project grew, it became clear where shortcomings existed, and these could be fixed in very quick pull requests. An example is given in Figure \ref{fig:changing-requirements}, in which the type of a variable was changed from \mintinline{go}{string} to \mintinline{go}{func() string}. This allowed for lazy evaluation, when it became clear that configuring fixed IP addresses or DNS names could be impractical. Static typing enables refactors like this to be completed with ease, particularly with the development tools mentioned in the next section, reducing the incidental complexity of the agile methodology.
\begin{figure}
\centering
\begin{subfigure}[t]{0.45\textwidth}
\centering
\inputminted{go}{2_Preparation/Samples/string.go}
\caption{The structure with a fixed local address.}
\end{subfigure}
\begin{subfigure}[t]{0.45\textwidth}
\centering
\inputminted{go}{2_Preparation/Samples/funcstring.go}
\caption{The structure with a dynamic local address.}
\end{subfigure}
\caption{An example of refactoring for changing requirements.}
\label{fig:changing-requirements}
\end{figure}
\subsubsection{Development Tools}
A large part of the language choice focused on development tools, particularly IDE support. I used GoLand (Go), IntelliJ (Java), and PyCharm (Python). Using intelligent IDEs, particularly with the statically-typed Go and Java, significantly increases programming productivity. They provide code suggestions and automated code generation for repetitive sections to reduce keystrokes, syntax highlighting for ease of reading, near-instant type checking without interaction, and many other features. Each reduce incidental complexity.
I used Git version control, with a self-hosted Gitea\footnote{\url{https://gitea.com/}} server as the remote. The repository contains over 180 commits, committed at regular intervals while programming. I maintained several on- and off-site backups (Multiple Computers + Git Remote + NAS + 2xCloud + 2xUSB). The Git remote was updated with every commit, the NAS and Cloud providers daily, with one USB updated every time significant work was added and the other a few days after. Having some automated and some manual backups, along with a variety of backup locations, minimises any potential data loss in the event of any failure. The backups are regularly checked for consistency, to ensure no data loss goes unnoticed.
Alongside my Gitea server, I have a self-hosted Drone\footnote{\url{http://drone.io/}} server for continuous integration: running Go tests, verifying formatting, and building artefacts. On a push, after verification, each artefact is built, uploaded to a central repository, and saved under the branch name. This dovetailed with my automated testing, which downloaded the relevant artefact automatically for the branch under test. I also built artefacts for multiple architectures to support real world testing on \texttt{AMD64} and \texttt{ARM64} architectures.
Continuous integration and Git are used in tandem to ensure that each pull request meet certain standards before merging, reducing the possibility of accidentally causing performance regressions. Pull requests also provide an opportunity to review submitted code, even with the same set of eyes, in an attempt to detect any glaring errors. Twenty-four pull requests were submitted to the repository for this project.
\subsubsection{Licensing}
I chose to license this software under the MIT license, which is simple and permissive.
% ---------------------------- Starting Point ------------------------------ %
\section{Starting Point}
I had significant experience with the language Go before the start of this project, though not formally taught. My knowledge of networking is limited to that of a user, and the content of the Part IB Tripos courses \emph{Computer Networking} and \emph{Principles of Communication} (the latter given after the start of this project). The security analysis drew from the Part IA course \emph{Software and Security Engineering} and the Part IB course \emph{Security}. As the software is highly concurrent, the Part IB course \emph{Concurrent and Distributed Systems} and the Part II Unit of Assessment \emph{Multicore Semantics and Programming} were applied.
% -------------------------------- Summary --------------------------------- %
\section{Summary}
In this Chapter, I described my preparation for developing, testing and securing my proxy application. I chose to implement MACs, authenticated headers, and IP allow-lists for security, while maintaining composability with other solutions such as VPNs. I will be using Go as the implementation language for its high-level features that are well suited to this project, and Python and Java for evaluation for programming speed and type-safety, respectively. I have prepared a set of development tools, including IDEs, version control and continuous integration, to encourage productivity as both a developer and a project manager.

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 78 KiB

View File

@ -0,0 +1,2 @@
ifconfig "$CLIENT_INTERFACE" 192.168.1.1 netmask 255.255.255.255
route add "$REMOTE_PORTAL_ADDR" -interface "$CLIENT_INTERFACE"

View File

@ -0,0 +1,8 @@
# Route packets due to the other node via the WAN interface
pass out quick on $ext_if to $rp_ip port { 4725 }
# Else route these packets to the client
pass out quick on $cl_if to $rp_ip
# Route packets due to this node locally
pass in quick on $ext_if from $rp_ip port { 4725 }
# Else route these packets via the tunnel
pass out quick on $nc_if from $rp_ip

View File

@ -0,0 +1,6 @@
# Forward SSH traffic to the host
pass in quick on $ext_if inet proto tcp to ($ext_if) port { 22 }
# Forward proxy traffic to the host
pass in quick on $ext_if inet proto udp to ($ext_if) port { 4725 }
# Forward everything via the netcombiner interface
pass out quick on $nc_if inet to ($ext_if)

View File

@ -0,0 +1,2 @@
ip addr add 192.168.1.1 dev "$CLIENT_INTERFACE"
ip route add "$REMOTE_PORTAL_ADDR" dev "$CLIENT_INTERFACE"

View File

@ -0,0 +1,10 @@
# The local table has priority, so packets for the proxy will be routed correctly
# Add a default route via the other node via the tunnel
ip route add table 20 default via 172.19.152.2 dev nc0
# Use this default route for outbound client packets
ip rule add from "$REMOTE_PORTAL_ADDRESS" iif "$CLIENT_INTERFACE" table 20 priority 20
# Add a route to the client
ip route add table 21 to "$REMOTE_PORTAL_ADDRESS" dev "$CLIENT_INTERFACE"
# Use this route for packets to the remote portal from the tunnel
# Note: there must be a higher priority table for proxy packets
ip rule add to "$REMOTE_PORTAL_ADDRESS" table 21 priority 21

View File

@ -0,0 +1,12 @@
# Add a new rule to the local table at a lower priority
ip rule add from all table local priority 20
# Delete the existing lowest priority rule (always to the local table)
ip rule del priority 0
# Forward SSH traffic to the host
ip rule add to "$REMOTE_PORTAL_ADDRESS" dport 22 table local priority 1
# Forward proxy traffic to the host
ip rule add to "$REMOTE_PORTAL_ADDRESS" dport 4725 table local priority 1
# Create a new routing table and route for crossing the TUN
ip route add table 19 to "$REMOTE_PORTAL_ADDRESS" via 172.19.152.3 dev nc0
# Route all packets not already caught via the TUN
ip rule add to "$REMOTE_PORTAL_ADDRESS" table 19 priority 19

View File

@ -0,0 +1,346 @@
%*******************************************************************************
%****************************** Third Chapter **********************************
%*******************************************************************************
\chapter{Implementation}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{3_Implementation/Figs/Raster/}{3_Implementation/Figs/PDF/}{3_Implementation/Figs/}}
\else
\graphicspath{{3_Implementation/Figs/Vector/}{3_Implementation/Figs/}}
\fi
% --------------------------- Introduction --------------------------------- %
Implementation of the proxy is in two parts: software that provides a multipath layer 3 tunnel between two hosts, and the system configuration necessary to utilise this tunnel as a proxy. An overview of the software and system is presented in Figure \ref{fig:dataflow-overview}.
This chapter details this implementation in three sections. The software will be described in Sections \ref{section:implementation-packet-transport} and \ref{section:implementation-software-structure}. Section \ref{section:implementation-packet-transport} details the implementation of both TCP and UDP methods of transporting the tunnelled packets between the hosts. Section \ref{section:implementation-software-structure} explains the software's structure and dataflow. The system configuration will be described in Section \ref{section:implementation-system-configuration}. Figure \ref{fig:dataflow-overview} shows the path of packets within the proxy, and it will be referenced throughout these sections.
\begin{sidewaysfigure}
\includegraphics[width=\textheight]{overview.png}
\caption{Diagram of packet path from a client behind the proxy to a server on the Internet.}
\label{fig:dataflow-overview}
\end{sidewaysfigure}
% -------------------------------------------------------------------------- %
% -------------------------- Packet Transport ------------------------------ %
% -------------------------------------------------------------------------- %
\section{Packet Transport}
\label{section:implementation-packet-transport}
As shown in Figure \ref{fig:dataflow-overview}, the interfaces through which transport for packets is provided between the two hosts are producers and consumers. A transport pair is between a consumer on one proxy and a producer on the other, where packets enter the consumer and exit the corresponding producer. Two methods for producers and consumers are implemented: TCP and UDP. As the greedy load balancing of this proxy relies on congestion control, TCP provided an initial proof-of-concept, while UDP expands on this proof-of-concept to remove unnecessary overhead and improve performance in the case of TCP-over-TCP tunnelling. Section \ref{section:implementation-tcp} discusses the method of transporting discrete packets across the continuous byte stream of a TCP flow, before describing why this solution is not ideal. Then, Section \ref{section:implementation-udp} goes on to discuss adding congestion control to UDP datagrams, while avoiding retransmitting a proxied packet.
\subsection{TCP}
\label{section:implementation-tcp}
The requirements for greedy load balancing to function are simple: flow control and congestion control. TCP provides both of these, so was an obvious initial solution. However, TCP also provides unnecessary overhead, which will go on to be discussed further.
A TCP flow cannot be connected directly to a TUN adaptor, as the TUN adaptor accepts and outputs discrete and formatted IP packets while the TCP connection sends a stream of bytes. To resolve this, each packet sent across a TCP flow is prefixed with the length of the packet. When a TCP consumer is given a packet to send, it first sends the 32-bit length of the packet across the TCP flow, before sending the packet itself. The corresponding TCP producer then reads these 4 bytes from the TCP flow, before reading the number of bytes specified by the received number. This enables punctuation of the stream-oriented TCP flow into a packet-carrying connection.
However, using TCP to tunnel TCP packets (TCP-over-TCP) can cause a degradation in performance \citep{honda_understanding_2005}. Further, using TCP to tunnel IP packets provides a superset of the required guarantees, in that reliable delivery and ordering are guaranteed. Reliable delivery can cause a decrease in performance for tunnelled flows which may not require reliable delivery, such as a live video stream. Ordering can limit performance when tunnelling multiple streams, as a packet for a phone call could already be received, but instead has to wait in a buffer for a packet for an unrelated download to arrive.
Although the TCP implementation provides an excellent proof-of-concept, work moved to a second UDP implementation, aiming to solve some of these problems. However, the TCP implementation is functionally correct; in cases where a connection that suffers particularly high packet loss is combined with one which is more stable, TCP could be employed on the high loss connection to limit overall packet loss. The effectiveness of such a solution would be implementation specific, so is left for the architect to decide.
% --------------------------------- UDP ------------------------------------ %
\subsection{UDP}
\label{section:implementation-udp}
After initial success with the TCP proof-of-concept, work moved to developing a UDP protocol for transporting the proxied packets. UDP differs from TCP in providing a more basic mechanism for sending discrete messages, while TCP provides a stream of bytes. Implementing a UDP datagram proxy solution returns control from the kernel to the application itself, allowing much more fine-grained management of congestion control. Further, UDP provides increased performance over TCP by removing ordering guarantees, and improving the quality of TCP tunnelling compared to TCP-over-TCP. This allows maximum flexibility, as application developers should not have to avoid using TCP to maintain compatibility with my proxy.
This section first describes the special purpose congestion control mechanism designed, which uses negative acknowledgements to avoid retransmissions. This design informs the design of the UDP packet structure. Finally, this section discusses the initial implementation of congestion control, which is based on the characteristic curve of TCP New Reno \citep{henderson_newreno_2012}.
\subsection{Congestion Control}
Congestion control is most commonly applied in the context of reliable delivery. This provides a significant benefit to TCP congestion control protocols: cumulative acknowledgements. As all of the bytes should always arrive eventually, unless the connection has faulted, the acknowledgement number (ACK) can simply be set to the highest received byte. Therefore, some adaptations are necessary for such a congestion control algorithm to apply in an context where reliable delivery is not expected. Firstly, for a packet based connection, ACKing specific bytes makes little sense - a packet is atomic, and is lost as a whole unit. To account for this, sequence numbers and their respective acknowledgements will be for entire packets, as opposed to per byte.
Secondly, for an protocol that does not guarantee reliable delivery, cumulative acknowledgements are not as simple. As tunnelled packets are now allowed to never arrive within the correct function of the flow, a situation where a packet is never received would cause deadlock with an ACK that is simply set to the highest received sequence number, demonstrated in Figure \ref{fig:sequence-ack-discontinuous}. Neither side can progress once the window is full, as the sender will not receive an ACK to free up space within the window, and the receiver will not receive the missing packet to increase the ACK. In TCP, one would expect the missing packet (one above the received ACK) to be retransmitted, which allows the ACK to catch up in only one RTT. However, as retransmissions are to be avoided, the UDP solution presented here would become deadlocked - the sending side knows that the far side has not received the packet, but must not retransmit.
\begin{figure}
\hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\begin{tabular}{|c|c|}
SEQ & ACK \\
1 & 0 \\
2 & 0 \\
3 & 2 \\
4 & 2 \\
5 & 2 \\
6 & 5 \\
6 & 6
\end{tabular}
\caption{ACKs only responding to in order sequence numbers}
\label{fig:sequence-ack-continuous}
\end{subfigure}\hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\begin{tabular}{|c|c|}
SEQ & ACK \\
1 & 0 \\
2 & 0 \\
3 & 2 \\
5 & 3 \\
6 & 3 \\
7 & 3 \\
7 & 3
\end{tabular}
\caption{ACKs only responding to a missing sequence number}
\label{fig:sequence-ack-discontinuous}
\end{subfigure}\hfill
\begin{subfigure}[t]{0.35\textwidth}
\centering
\begin{tabular}{|c|c|c|}
SEQ & ACK & NACK \\
1 & 0 & 0 \\
2 & 0 & 0 \\
3 & 2 & 0 \\
5 & 2 & 0 \\
6 & 2 & 0 \\
7 & 6 & 4 \\
7 & 7 & 4
\end{tabular}
\caption{ACKs and NACKs responding to a missing sequence number}
\label{fig:sequence-ack-nack-discontinuous}
\end{subfigure}
\caption{Congestion control responding to correct and missing sequence numbers of packets.}
\label{fig:sequence-ack-nack-comparison}
\hfill
\end{figure}
I present a solution based on Negative Acknowledgements (NACKs). When the receiver believes that it will never receive a packet, it increases the NACK to the highest missing sequence number, and sets the ACK to one above the NACK. This occurs after a timeout that is presently set at $3*RTT$ (Round Trip Time). The ACK algorithm is then performed to grow the ACK as high as possible. This is simplified to any change in NACK representing at least one lost packet, which can be used by the specific congestion control algorithms to react. Though this usage of the NACK appears to provide a close approximation to ACKs on reliable delivery, the choice of how to use the ACK and NACK fields is delegated to the congestion controller implementation, allowing for different implementations if they better suit the method of congestion control. Using NACKs, the deadlock in Figure \ref{fig:sequence-ack-nack-discontinuous} can be avoided, with the case in Figure \ref{fig:sequence-ack-nack-comparison} occurring instead. The NACK is used to inform the far side that a packet was lost, and therefore allow it to continue sending fresh packets. In contrast, TCP would retransmit the missing packet, which can be avoided with this NACK-based solution.
Given the decision to use ACKs and NACKs, the packet structure for UDP datagrams can now be designed. The chosen structure is given in Figure \ref{fig:udp-packet-structure}. The congestion control header consists of the sequence number and the ACK and NACK, each 32-bit unsigned integers.
\begin{figure}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{rightwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords \\
\wordbox[blr]{1}{} \\
\begin{rightwordgroup}{Security\\Footer}
\wordbox[tlr]{1}{Security footer} \\
\wordbox[blr]{1}{$\cdots$}
\end{rightwordgroup}
\end{bytefield}
\caption{UDP packet structure}
\label{fig:udp-packet-structure}
\end{figure}
\subsubsection{New Reno}
TCP New Reno \citep{henderson_newreno_2012} is widely known for its sawtooth pattern of throughput. New Reno is an RTT-based congestion control mechanism, which, in the steady state, increases the window size (number of packets in flight at a time) by 1 for each successful window. In the case of a retransmission, this quantity halves. The window size is the quantity of packets that can be in flight at one time, which depends on the round trip time, as a longer round trip time requires a larger window size to transmit the same amount of packets. For a freshly started New Reno connection, slow start occurs, which increases the window size by 1 for each packet transmitted successfully, as opposed to each full window of packets. This creates an exponential curve, which stops on the first transmission failure.
An algorithm that performs similarly but takes advantage of NACKs works identically for a flawless connection. That is, if no packets are lost, the implementation is identical. This includes increasing the window size by one for each successfully transmitted packet initially, and dropping to increasing by one for each window size later in the process. The difference from TCP's mechanisms arises when packets are lost, and more specifically, how that is detected. This is the NACK mechanism, which sets the NACK to the missing packet if a packet has been waiting for more than $0.5*RTT$ to be acknowledged. This occurs when packet 4 arrives before packet 3, and packet 3 has still not arrived after an additional half of the round trip time (the entire time expected for the packet to arrive), and would cause the NACK field on the next packet to be set to 3, with the ACK field set to 4. When the sender receives this NACK response, it affects the window size as TCP would (halving the size, and stopping slow start).
The congestion control algorithm has multiple threads accessing it at any one time, so uses a mixture of atomic operations and fine-grained locking to remain consistent. The \texttt{ack}, \texttt{nack} and \texttt{windowSize} fields all use atomic operations, such that they can be read immediately and allow a packet to almost be sent without gaining a lock. However, the \texttt{inFlight} and \texttt{awaitingAck} fields are each protected by a mutex, ensuring that they remain consistent. This is a compromise between performance and correctness, limiting code complexity while allowing more performance than coarse-grained locks. Further, high-level data structures (specifically, growable lists) are used, which reduce programming complexity at the cost of some performance. This allows for good readability, and increases the likelihood of writing correct code.
Congestion control is one of the main point for tests in the repository. The New Reno controller was developed mostly with test-driven development, due to the complicated interactions between threads. Though the testing of multithreaded code can be extremely challenging due to the risk of deadlock when the code is incorrect, large timeouts and a CI environment made this quite manageable.
% -------------------------------------------------------------------------- %
% ------------------------- Software Structure ----------------------------- %
% -------------------------------------------------------------------------- %
\section{Software Structure}
\label{section:implementation-software-structure}
This section details the design decisions behind the application structure, and how it fits into the systems where it will be used. Much of the focus is on the flexibility of the interfaces to future additions, while also describing the concrete implementations available with the software as of this work.
% ---------------------- Running the Application --------------------------- %
\subsection{Running the Application}
Initially, the application suffered from a significant race condition when starting. The application followed a standard flow, where it created a TUN adaptor to receive IP packets and then began proxying the packets from/to it. However, when running the application, no notification was received when this TUN adaptor became available. As such, any configuration completed on the TUN adaptor was racing with the TUN adaptor's creation, resulting in many start failures.
The software now runs in much the same way as other daemons you would launch, leading to a similar experience as other applications. The primary inspiration for the functionality of the application is Wireguard \citep{donenfeld_wireguard_2017}, specifically \verb'wireguard-go'\footnote{\url{https://github.com/WireGuard/wireguard-go}}. To launch the application, the following shell command is used:
\begin{minted}{shell-session}
netcombiner nc0
\end{minted}
When the program is executed as such, the following control flow occurs:
\begin{minted}{c}
if not child process:
c = validate_configuration()
t = new_tun(nc0)
child_process = new_process(this, c, t)
return
proxy = new_proxy(c, t)
proxy.run()
\end{minted}
Firstly, the application validates the configuration, allowing an early exit if misconfigured. Then the TUN adaptor is created. This TUN adaptor and the configuration are handed to a duplicate of the process, which sees them and begins running the given proxy. This allows the parent process to exit, while the background process continues running as a daemon.
By exiting cleanly and running the proxy in the background, the race condition is avoided. The exit is a notice to the launcher that the TUN adaptor is up and ready, allowing for further configuration steps to occur. Otherwise, an implementation specific signal would be necessary to allow the launcher of the application to move on, which conflicts with the requirement of easy future platform compatibility.
% ------------------------------ Security ---------------------------------- %
\subsection{Security}
The integrated security solution of this software is in two parts: message authentication and repeat protection. The interface for these is shared, as they perform the same action from the perspective of the producer or consumer.
\subsubsection{Message Authenticity Verification}
Message authentication is provided by a pair of interfaces, \texttt{MacGenerator} and \texttt{MacVerifier}, which add bytes at consumers and remove bytes at producers respectively. \texttt{MacGenerator} provides a method which takes input data and produces a list of bytes as output, to be appended to the message. \texttt{MacVerifier} takes the appended bytes to the message, and confirms whether they are valid for that message.
The provided implementation for message authenticity uses the BLAKE2s \citep{hutchison_blake2_2013} algorithm. By using library functions, the implementation is achieved simply by matching the interface provided by the library and the interface mentioned here. This ensures clarity, and reduces the likelihood of introducing a bug.
Key exchange is presently implemented by using a secure and external channel. For example, one might configure their proxies using the Secure Shell Protocol (SSH), and would transmit the shared key over this secure channel. In future, this could be extended with external software that manages the tunnel for you, by using its own secure channel to configure the proxies with a shared key.
\subsubsection{Repeat Protection}
Repeat protection takes advantage of the same two interfaces already mentioned. To allow this to be implemented, each consumer or producer takes an ordered list of \verb'MacGenerator's or \verb'MacVerifier's. When a packet is consumed, each of the generators is run in order, operating on the data of the last. When called by a producer, this operation is completed in reverse, with each \verb'MacVerifier' stripping off the corresponding generator. An example of this is shown in Figure \ref{fig:udp-packet-dataflow}. Firstly, the data sequence number is generated, before the MAC. When receiving the packet, the MAC is first stripped, before the data sequence number. This means that the data sequence number is protected by the MAC.
One difference between repeat protection and MAC generation is that repeat protection is shared between all producers and consumers. This is in contrast to the message authenticity, which are, as implemented, specific to a producer or consumer. The currently implemented repeat protection is that of \cite{tsou_ipsec_2012}. The code sample is provided with a BSD license, so is compatible with this project, and hence was simply adapted from C to Go. This is created at a host level when building the proxy, and the same shared amongst all producers, so has to be thread safe. Producing the sequence numbers is achieved with a single atomic operation, avoiding the need to lock at all. Verifying the sequences requires altering multiple elements of an array of bytes, so uses locking to ensure consistency. Ensuring that locks are only taken when necessary makes the calls as efficient as possible.
\begin{figure}
\centering
\begin{tikzpicture}[
onenode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm, align=center},
twonode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm, align=center, rectangle split, rectangle split parts=2},
threenode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm, align=center, rectangle split, rectangle split parts=3},
fournode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm, align=center, rectangle split, rectangle split parts=4},
fivenode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm, align=center, rectangle split, rectangle split parts=5},
bluenode/.style={rectangle, draw=black!60, fill=blue!5, very thick, minimum size=5mm, align=center},
]
% Nodes
\node[fivenode] at (0,0) (udp) {\nodepart{one} UDP Header \nodepart{two} Congestion\\Control\\Header \nodepart{three} Packet\\Data \nodepart{four} Data\\Sequence\\Number \nodepart{five} MAC};
\node[fournode] at (3,0) (mac) {\nodepart{one} Congestion\\Control\\Header \nodepart{two} Packet\\Data \nodepart{three} Data\\Sequence\\Number \nodepart{four} MAC};
\node[threenode] at (6,0) (cc) {\nodepart{one} Congestion\\Control\\Header \nodepart{two} Packet\\Data \nodepart{three} Data\\Sequence\\Number};
\node[twonode] at (9,0) (sequence) {\nodepart{one} Congestion\\Control\\Header \nodepart{two} Packet\\Data};
\node[onenode] at (12,0) (data) {Packet\\Data};
% Edges
\draw[<->] (udp.east) -- (mac.west);
\draw[<->] (mac.east) -- (cc.west);
\draw[<->] (cc.east) -- (sequence.west);
\draw[<->] (sequence.east) -- (data.west);
\end{tikzpicture}
\caption{Expansion of a UDP packet through a consumer/producer.}
\label{fig:udp-packet-dataflow}
\end{figure}
% ------------------------ Repository Overview ----------------------------- %
\subsection{Repository Overview}
A directory tree of the repository is provided in Figure \ref{fig:repository-structure}. The top level is split between \verb'code' and \verb'evaluation', where \verb'code' is compiled to produce the application binary, and \verb'evaluation' is used to verify the performance characteristics and generate graphs. The Go code is built with the Go modules system, the Java code built with Gradle, and the Python code runs in an iPython notebook. Go tests are interspersed with the code, for example in a file named \texttt{flow\_test.go}, providing tests for \texttt{flow.go} in the same directory.
\begin{figure}
\dirtree{%
.1 /.
.2 code\DTcomment{Go code for the project}.
.3 config\DTcomment{Configuration management}.
.3 crypto\DTcomment{Cryptographic methods}.
.4 sharedkey\DTcomment{Shared key MACs}.
.3 flags\DTcomment{Command line flag processing}.
.3 mocks\DTcomment{Mocks to enable testing}.
.3 proxy\DTcomment{The central proxy controller}.
.3 replay\DTcomment{Replay protection}.
.3 shared\DTcomment{Shared errors}.
.3 tcp\DTcomment{TCP flow transport}.
.3 tun\DTcomment{TUN adaptor}.
.3 udp\DTcomment{UDP datagram transport}.
.4 congestion\DTcomment{Congestion control methods}.
.3 .drone.yml\DTcomment{CI specification}.
.2 evaluation\DTcomment{Result gathering and graph generation}.
.3 java\DTcomment{Java automated result gathering}.
.3 python\DTcomment{Python graph generation}.
}
\caption{Repository folder structure.}
\label{fig:repository-structure}
\end{figure}
% -------------------------------------------------------------------------- %
% ------------------------ System Configuration ---------------------------- %
% -------------------------------------------------------------------------- %
\section{System Configuration}
\label{section:implementation-system-configuration}
The software portion of this proxy is entirely symmetric, as can be seen in Figure \ref{fig:dataflow-overview}. However, the system configuration diverges, as each side of the proxy serves a different role. Referring to Figure \ref{fig:dataflow-overview}, it can be seen that the kernel routing differs between the two nodes. Throughout, these two sides have been referred to as the local proxy and the remote proxy, with the local in the top left and the remote in the bottom right.
As the software portion of this application is implemented in user-space, it has no control over the routing of packets. Instead, a virtual interface is provided, and the kernel is instructed to route relevant packets to/from this interface. In sections \ref{section:implementation-remote-proxy-routing} and \ref{section:implementation-local-proxy-routing}, the configuration for routing the packets for the remote proxy and local proxy respectively are explained. Finally, in Section \ref{section:implementation-multi-interface-routing}, some potentially unexpected behaviour of using devices with multiple interfaces is discussed, such that the reader can avoid some of these pitfalls. Throughout this section, examples will be given for both Linux and FreeBSD. Though these examples are provided, they are one of many methods of achieving the same results.
\subsection{Remote Proxy Routing}
\label{section:implementation-remote-proxy-routing}
The common case for remote proxies is a cloud Virtual Private Server (VPS) with one public network interface. As such, some configuration is required to both proxy bidirectionally via that interface, and also use it for communication with the local proxy. Firstly, packet forwarding must be enabled for the device. On Linux this is achieved as follows:
\begin{minted}{shell-session}
sysctl -w net.ipv4.ip_forward=1
\end{minted}
\noindent
Or on FreeBSD via:
\begin{minted}{shell-session}
echo 'GATEWAY_ENABLE="YES"' >> /etc/rc.conf
\end{minted}
These instruct the kernel in each case to forward packets. However, more instructions are necessary to ensure packets are routed correctly once forwarded. For the remote proxy, this involves two things: routing the communication for the proxy to the software side, and routing items necessary to the local system to the relevant application. Both of these are achieved in the same way, involving adjustments to the local routing table on Linux, and using \verb'pf(4)' rules on FreeBSD.
\vspace{3mm} \noindent
Linux:
\inputminted{shell-session}{3_Implementation/Samples/shell/linux_remote_routing.sh}
\noindent
FreeBSD:
\inputminted{shell-session}{3_Implementation/Samples/shell/freebsd_remote_routing.sh}
These settings combined will provide the proxying effect via the TUN interface configured in software. It is also likely worth firewalling much more aggressively at the remote proxy side, as dropping packets before saturating the low bandwidth connections between the local and remote proxy improves resilience to denial of service attacks. This can be completed either with similar routing and firewall rules to those above, or externally with many cloud providers.
\subsection{Local Proxy Routing}
\label{section:implementation-local-proxy-routing}
Routing within the local proxy expects $1+N$ interfaces: one connected to the client device expecting the public IP, and $N$ connected to the wider Internet for communication with the other node. Referring to Figure \ref{fig:dataflow-overview}, it can be seen that no complex rules are required to achieve this routing, as each interface serves a different role. As such, there are three goals: ensure the packets for the remote IP are routed from the TUN to the client device and vice versa, ensuring that packets destined for the remote proxy are not routed to the client, and ensuring each connection is routed via the correct WAN connection. The first two will be covered in this section, with a discussion on the latter in the next section.
Routing the packets from/for the local proxy is pleasantly easy. Firstly, enable IP forwarding for Linux or gateway mode for FreeBSD, as seen previously. Secondly, routes must be setup. Fortunately, these routes are far simpler than those for the remote proxy. The routing for the local proxy client interface is as follows on Linux:
\inputminted{shell-session}{3_Implementation/Samples/shell/linux_local_interface.sh}
\noindent
Or on FreeBSD:
\inputminted{shell-session}{3_Implementation/Samples/shell/freebsd_local_interface.sh}
Then, on the client device, simply set the IP address statically to the remote proxy address, and the gateway to \verb'192.168.1.1'. Now the local proxy can send and receive packets to the remote proxy, but some further routing rules are needed to ensure that the packets from the proxy reach the remote proxy, and that forwarding works correctly. This falls to routing tables and \verb'pf(4)', so for Linux:
\inputminted{shell-session}{3_Implementation/Samples/shell/linux_local_routing.sh}
\noindent
FreeBSD:
\inputminted{shell-session}{3_Implementation/Samples/shell/freebsd_local_routing.sh}
These rules achieve both the listed criteria, of communicating with the remote proxy while also forwarding the packets necessary to the client. The local proxy can be extended with more functionality, such as NAT and DHCP. This allows plug and play for the client, while also allowing multiple clients to take advantage of the connection without another router present.
\subsection{Multi-Homed Behaviour}
\label{section:implementation-multi-interface-routing}
During testing, I discovered behaviour that I found surprising when it came to multi-homed hosts. Here I will detail some of this behaviour, and workarounds found to enable the software to still work well regardless.
The first piece of surprising behaviour comes from a device which has multiple interfaces lying on the same subnet. Consider a device with two Ethernet interfaces, each of which gains a DHCP IPv4 address from the same network. The first interface \verb'eth0' takes the IP \verb'10.10.0.2' and the second \verb'eth1' takes the IP \verb'10.10.0.3', each with a subnet mask of \verb'/24'. If a packet originates from userspace with source address \verb'10.10.0.2' and destination address \verb'10.10.0.1', it may leave via either \verb'eth0' or \verb'eth1'. I initially found this behaviour very surprising, as it seems clear that the packet should be delivered from \verb'eth0', as that is the interface which has the given IP. However, as the routing is completed by the source subnet, each of these interfaces match.
Although this may seem like a contrived use case, consider this: a dual WAN router lies in front of a server, which uses these two interfaces to take two IPs. Policy routing is used on the dual WAN router to allow this device control over choice of WAN, by using either of its LAN IPs. In this case, this default routing would mean that the userspace software has no control over the WAN, as one will be selected seemingly arbitrarily. The solution to this problem is manipulation of routing tables. By creating a high priority routing table for each interface, and routing packets more specifically than the default routes, the correct packets can be routed outbound via the correct interface.
The second issue follows a similar theme of IP addresses being owned by the host and not the interface which has that IP set, as Linux hosts respond to ARP requests for any of their IP addresses on all interfaces by default. This problem is known as ARP flux. Going back to our prior example of \verb'eth0' and \verb'eth1' on the same subnet, ARP flux means that if another host sends packets to \verb'10.10.0.2', they may arrive at either \verb'eth0' or \verb'eth1', and this changes with time. Once again, this is rather contrived, but also means that, for example, a private VPN IP will be responded to from the LAN a computer is on. Although this is desirable in some cases, it continues to seem like surprising default behaviour. The solution to this is also simple, a pair of kernel parameters, set by the following, resolve the issue.
\begin{minted}{shell-session}
sysctl -w net.ipv4.conf.all.arp_announce=1
sysctl -w net.ipv4.conf.all.arp_ignore=1
\end{minted}

Binary file not shown.

Binary file not shown.

237
4_Evaluation/evaluation.tex Normal file
View File

@ -0,0 +1,237 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Fourth Chapter *********************************
%*******************************************************************************
\chapter{Evaluation}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{4_Evaluation/Figs/Raster/}{4_Evaluation/Figs/PDF/}{4_Evaluation/Figs/}}
\else
\graphicspath{{4_Evaluation/Figs/Vector/}{4_Evaluation/Figs/}}
\fi
This chapter will discuss the methods used to evaluate my project and the results obtained. The results will be discussed in the context of the success criteria laid out in the Project Proposal (Appendix \ref{appendix:project-proposal}). This evaluation shows that a network using my method of combining Internet connections can see vastly superior network performance to one without. It will show the benefits to throughput, availability, and adaptability. The tests are performed on a Dell R710 Server with the following specifications:
\vspace{5mm}
\begin{tabular}{ll}
\textbf{CPU(s)} & 16 x Intel(R) Xeon(R) CPU X5667 @ 3.07GHz (2 Sockets) \\
\textbf{Memory} & 6 x 2GB DDR3 ECC RDIMMS \\
\textbf{Kernel} & Linux 5.4 LTS
\end{tabular}
\vspace{5mm}
When presenting data, error bars are given of the Inter-Quartile Range (IQR) of the data, with the plotted point being the median.
\section{Success Criteria}
\subsection{Flow Maintained}
The results for whether a flow can be maintained during a single connection loss are achieved using an iperf3 UDP test. The UDP test runs at a fixed bitrate, and measures the quantity of datagrams lost in transit. Three tests will be performed on a proxy with two connections: both connections remain up, one connection remains up, and both connections are lost. To satisfy this success criteria, the single connection lost may have a small amount of loss, while losing both connections should terminate the test.
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.X, port 5201
[ 5] local X.X.X.Y port 43039 connected to X.X.X.X port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 4.00-5.00 sec 129 KBytes 1.05 Mbits/sec 91
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate Jitter Lost/Total Datagrams
[ 5] 0.00-5.00 sec 641 KBytes 1.05 Mbits/sec 0.000 ms 0/453 (0%) sender
[ 5] 0.00-5.04 sec 641 KBytes 1.04 Mbits/sec 0.092 ms 0/453 (0%) receiver
\end{Verbatim}
\caption{iperf3 UDP results with two stable connections (inbound).}
\label{fig:maintained-both-connections-alive}
\end{figure}
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.X, port 5201
[ 5] local X.X.X.Y port 49929 connected to X.X.X.X port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 4.00-5.00 sec 129 KBytes 1.05 Mbits/sec 91
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate Jitter Lost/Total Datagrams
[ 5] 0.00-5.00 sec 641 KBytes 1.05 Mbits/sec 0.000 ms 0/453 (0%) sender
[ 5] 0.00-5.04 sec 635 KBytes 1.03 Mbits/sec 0.115 ms 4/453 (0.88%) receiver
\end{Verbatim}
\caption{iperf3 UDP results with a single connections loss (inbound).}
\label{fig:maintained-one-connections-down}
\end{figure}
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.X, port 5201
[ 5] local X.X.X.Y port 51581 connected to X.X.X.X port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 129 KBytes 1.05 Mbits/sec 91
\end{Verbatim}
\caption{iperf3 UDP results with a total connection loss (inbound).}
\label{fig:maintained-both-connections-down}
\end{figure}
These results are given in figures \ref{fig:maintained-both-connections-alive}, \ref{fig:maintained-one-connections-down} and \ref{fig:maintained-both-connections-down} respectively. The results are as expected: no connection loss handles the 1MB/s stream with no problems, and therefore no packets are lost, one connection loss causes slight packet loss ($0.88\%$) but the test is able to continue, and a complete connection loss stalls the test. Given the consistent external IP, this shows that a flow can be maintained through a single connection loss, with only a small loss of packets. This level of packet loss represents some loss on a phone call that lasts approximately 45ms, after which the call continues gracefully. This satisfies the success criteria.
\subsection{Bidirectional Performance Gains}
To demonstrate that all performance gains are bidirectional, I will provide graphs both inbound and outbound to the client for each performance test of the core success criteria. This will sufficiently show the performance gains in each case. Inbound tests occur with the test server running on the proxy client and the test client running outside, while outbound tests place the test server outside and the test client on the proxy client.
To demonstrate this somewhat succinctly, the same test will be executed both inbound and outbound, with each plotted as a series on a graph. To demonstrate that this requirement is satisfied for all cases, for each graph of results presented for the basic success criteria, the graph for the alternative direction will be provided in appendix \ref{appendix:outbound-graphs}.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{graphs/bidirectional-comparison}
\caption{Comparing the performance of packets inbound to the client to outbound from the client in three different test conditions.}
\label{fig:bidirectional-comparison}
\end{figure}
Figure \ref{fig:bidirectional-comparison} has two series for the same set of tests - one for the inbound (reaching in to the client, or download) performance and one for the outbound (the client reaching out, or upload) performance. The trend is consistent within a direction, however, there is a slight preference to outbound flows. This is due to the outbound flows being spread between interfaces, which avoids waiting for the kernel to finish locking an interface quite as often. In each case, both inbound and outbound performance satisfy the success criteria, so this is satisfied.
\subsection{IP Spoofing}
\label{section:ip-spoofing-evaluation}
Demonstrating that the IP of the client can be set to the IP of the remote proxy is achieved, as each test in this evaluation relies on this fact. When allocating virtual machines to test on, the client is given the IP of the remote proxy. In the given network structure, the speed test server, remote proxy and local proxy are each connected to one virtual switch, which acts as a mock Internet. There is then a separate virtual switch, which connects an additional interface of the local proxy to the client. The IP addresses of the interfaces used in these tests are listed in Figure \ref{fig:standard-network-structure-ips}. The IP addresses of the public interfaces are represented by letters, as they use arbitrary public IP addresses to ensure no local network firewall rules impact the configuration.
\begin{figure}
\centering
\begin{tabular}{c|c|c}
Machine & Interface & IP Address \\
\hline
Speed Test Server & eth0 & \emph{A} \\
\hline
Remote Proxy & eth0 & \emph{B} \\
\hline
\multirow{5}{*}{Local Proxy} & eth0 & \emph{C0} \\
& eth1 & \emph{C1} \\
& \vdots & \vdots \\
& ethN & \emph{CN} \\
& eth\{N+1\} & 192.168.1.1 \\
\hline
Client & eth0 & \emph{B}
\end{tabular}
\caption{The IP layout of the test network structure.}
\label{fig:standard-network-structure-ips}
\end{figure}
It is shown that the client in this testing setup shares an IP address with the remote proxy. The details of this configuration are provided in Section \ref{section:implementation-system-configuration}. This satisfies the success criteria.
\subsection{Security}
Success for security involves providing security no worse than a standard connection. This is achieved by using Message Authentication Codes, Replay Protection and extra authenticated information for connection authentication, described in detail in Section \ref{section:preparation-security}. Further, Section \ref{section:layered-security} provides an argument that the proxying of packets can be made secure by operating in a secure overlay network, such as a VPN. This ensures that security can be maintained, regardless of changes in the security landscape, by composing my proxy with additional security software.
\subsection{More Bandwidth over Two Equal Connections}
To demonstrate that more bandwidth is available over two equal connections through this proxy than one without, I will compare the iperf3 throughput between the two cases. Further, I will provide a comparison point against a single connection of the combined bandwidth, as this is the maximum theoretical performance of combining the two lower bandwidth connections.
\begin{figure}
\centering
\begin{subfigure}{.7\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/more-bandwidth-equal-a-inbound}
\caption{Throughput of 1+1MB/s connections compared with 1MB/s and 2MB/s (inbound).}
\label{fig:more-bandwidth-equal-lesser}
\end{subfigure}
\begin{subfigure}{.7\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/more-bandwidth-equal-b-inbound}
\caption{Throughput of 2+2MB/s connections compared with 2MB/s and 4MB/s (inbound).}
\label{fig:more-bandwidth-equal-greater}
\end{subfigure}
\caption{Graphs showing that the throughput of two connections proxied lie between one connection of the same speed and one connection of double the speed}
\label{fig:more-bandwidth-equal}
\end{figure}
The results of these tests are given in Figure \ref{fig:more-bandwidth-equal}, for both a pair of 1MB/s connections and a pair of 2MB/s connections. To satisfy this success criteria, the proxied bar on each graph should exceed the throughput of the direct bar of equal bandwidth. It can be seen in both cases that this occurs, and thus the success criteria is met. The throughput far exceeds the single direct connection, and is closer to the single double bandwidth connection than the single equal bandwidth connection, demonstrating a good portion of the maximum performance is achieved ($92.5\%$ for the 1+1MB/s proxy, and $88.5\%$ for the 2+2MB/s proxy).
\section{Extended Goals}
\subsection{More Bandwidth over Unequal Connections}
For showing improved throughput over connections which are not equal, three results will be compared. Connections of speed $x+x$, speeds $x+y$, and speeds $y+y$ will be shown, where $x < y$. To show that unequal connections exceed the performance of a pair of slower connections, the results for speeds $x+y$ should lie between $x+x$ and $y+y$. Further, to show that percentage throughput is invariant to the balance of connection throughput, the unequal connections should lie halfway between the two equal connection results.
\begin{figure}
\centering
\begin{subfigure}{0.7\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/more-bandwidth-unequal-a-inbound}
\caption{Bandwidth of 1+2MB/s connections compared to 1+1MB/s connections and 2+2MB/s connections.}
\label{fig:more-bandwidth-unequal-lesser}
\end{subfigure}
\begin{subfigure}{0.7\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/more-bandwidth-unequal-b-inbound}
\caption{Bandwidth of 2+4MB/s connections compared to 2+2MB/s connections and 4+4MB/s connections.}
\label{fig:more-bandwidth-unequal-greater}
\end{subfigure}
\caption{Graphs to demonstrate that the proxy appropriately balances between imbalanced connections, resulting in near-maximal throughput.}
\label{fig:more-bandwidth-unequal}
\end{figure}
Two sets of results are provided - one for 1MB/s and 2MB/s connections, and another for 2MB/s and 4MB/s connections. In both cases, it can be seen that the proxy with unequal connections lies between the equal connection proxies. Further, it can be seen that both unequal proxied connections lie approximately halfway between the equal pairs ($74.4\%$ of the maximum for 1+2MB/s, and $75.1\%$ of the maximum for 2+4MB/s). This suggests that the proxy design is successful in being invariant to the static balance of connection throughput.
\subsection{More Bandwidth over Four Equal Connections}
This criteria expands on the scalability in terms of number of connections of the proxy. Specifically, comparing the performance of three connections against four. To fulfil this, the results for each of two, three and four connections are included on each graph. This allows the trend of performance with an increasing number of connections to begin being visualised, which is expanded upon further in Section \ref{section:performance-evaluation}.
\begin{figure}
\centering
\includegraphics[width=0.7\linewidth]{graphs/more-bandwidth-four-b-inbound}
\caption{Scaling of 2-4 equal bandwidth connections when combined.}
\label{fig:more-bandwidth-four}
\end{figure}
Provided in Figure \ref{fig:more-bandwidth-four} are results for each of two, three and four combined 2MB/s connections. Firstly, it is clear that the proxy consisting of 4 connections exceeds the throughput of the proxy consisting of 3 connections. Secondly, it appears that a linear trend is forming. This trend will be further evaluated in Section \ref{section:performance-evaluation}, but suggests that the structure of the proxy suffers little loss in efficiency from adding further connections.
\subsection{Bandwidth Variation}
This criteria judges the adaptability of the congestion control system in changing network conditions. To test this, the bandwidth of one of the local portal's connections is varied during an iperf3 throughput test. Thus far, bar graphs have been sufficient to show the results of each test. In this case, as the performance should now be time sensitive, I will be presenting a line graph. Due to the nature of the time in these tests, producing consistent enough results to produce error bars was not feasible. The data is also smoothed across the x-axis with a 5-point moving average, to avoid intense fluctuations caused by the interface rate limiting.
The criteria will be met if the following are true: the throughput begins at a constant rate; the throughput stabilises at a lower rate after the increase/decrease; the throughput returns to the original rate after the bandwidth returns.
\begin{figure}
\centering
\includegraphics[width=0.8\textwidth]{graphs/connection_capacity_changes}
\caption{Connection capacity increasing and decreasing over time. The decrease is from 2+2MB/s connections to 1+2MB/s connections, and the increase from 1+1MB/s connections to 1+2MB/s connections.}
\label{fig:capacity-changes}
\end{figure}
The results are given in Figure \ref{fig:capacity-changes}. The decreasing series drops from 2+2MB/s connections, with a maximum throughput of 32Mbps, to 1+2MB/s connections, with a maximum throughput of 24Mbps. The increasing series increases from 1+1MB/s connections, with a maximum throughput of 16Mbps, to 1+2MB/s connections, with a maximum throughput of 24Mbps. The events occur at approximately the same time. The graph displays each series beginning at their constant rate, before converging at approximately 24Mbps in the center of the graph. Once the connection change is reversed, each series returns to its original throughput. This satisfies the success criteria for connection capacity changes.
\subsection{Connection Loss}
This criteria judges the ability of the proxy as a whole to handle a complete connection loss while maintaining proportional throughput, and later regaining that capacity when the connection becomes available again. As the proxy has redundant connections, it is feasible for this to cause a minimal loss of service. Unfortunately, losing a connection causes significant instability with the proxy, so this extended goal has not been met. This is due to the interactions between the proxy and the system kernel, where the proxy has very little control of the underlying TCP connection. With future work on UDP I am hopeful that this will be eventually satisfied, but it is not with the current implementation.
\subsection{Single Interface Remote Portal}
Similarly to Section \ref{section:ip-spoofing-evaluation}, a remote portal with a single interface is employed within the standard testing structure for this section, using techniques detailed in Section \ref{section:implementation-system-configuration}. By altering the routing tables such that all local traffic for the remote portal is sent to the local portal via the proxy, excluding the traffic for the proxy itself, the packets can be further forwarded from the local portal to the client which holds that IP address. As the standard testing structure employs a remote portal with a single interface, it is shown in each test result that this is a supported configuration, and thus this success criteria is met.
\subsection{Connection Metric Values}
The extended goal of connection metric values has not been implemented. Instead, peers which only transfer data in one direction were implemented, which covers some of the use cases for metric values. Though metric values for connections would have been useful in some cases, they do not represent the standard usage of the software, and the added complexity of managing live peers was deemed unnecessary for the core software. Instead, I would consider providing a better interface to control the software externally, which would allow a separate piece of software to manage live peers. This has not been completed at this time.
\section{Stretch Goals}
\subsection{UDP Proxy Flows}
UDP flows are implemented, and provide a solid base for UDP testing and development. The present implementation of a New Reno imitating congestion control mechanism still has some implementation flaws, meaning that UDP is not yet feasible for use. However, the API for writing congestion control mechanisms is strong, and some of the future work suggested in Section \ref{section:future-work} could be developed on this base, so that much is a success.
\section{Performance Evaluation}
\label{section:performance-evaluation}
The discussion of success criteria above used relatively slow network connections to test scaling in certain situations, while ensuring that hardware limitations have no impact on the tests. This section provides a brief analysis of how this solution would scale to providing a higher bandwidth connection, specifically by adding network connections.
The results of these tests are shown in Figure \ref{fig:n-connections-scaling}. Each of $1MB/s$, $2MB/s$ and $4MB/s$ capacity links are tested with 1 to 8 connections. The throughput demonstrated is largely linear, with a suggestion that eight $4MB/s$ connections are approaching the software's limits. This result is very promising, as it shows that the software can handle a large number of connections. While this is quite limiting for some higher download speed connections, many upload speeds are far slower, and would benefit from this quantity of links.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{graphs/n_connections_scaling}
\caption{Scaling of proxy throughput based on number of connections, for three speeds of connection.}
\label{fig:n-connections-scaling}
\end{figure}

View File

@ -0,0 +1,35 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Fifth Chapter **********************************
%*******************************************************************************
\chapter{Conclusions}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{5_Conclusions/Figs/Raster/}{5_Conclusions/Figs/PDF/}{5_Conclusions/Figs/}}
\else
\graphicspath{{5_Conclusions/Figs/Vector/}{5_Conclusions/Figs/}}
\fi
The software produced in this project provides a method of combining multiple Internet connections via a proxy, prioritising throughput and resilience in the resultant aggregate connection. The proxy provides a novel approach to combining Internet connections, responding well to very dynamic Internet connections. All of the core success criteria were met, along with many of the extended goals.
The multipath proxy built in this project provides an effective method to combine dynamic Internet connections, and it works in today's Internet. Though future work may make much of this redundant, the performance gains seen today are useful in many situations. As it becomes more common to see a variety of connections in homes, such as 5G, Low Earth Orbit and DSL, a method to combine these that dynamically adapts to the variability of the wireless connections can be a significant, practical benefit, especially in situations where gaining a single faster link is difficult.
\section{Lessons Learnt}
The lessons learned in this project fall in two classes: personal reflections on running this project, and lessons learnt if this proxy sees another implementation.
I learnt throughout this project the importance of producing a minimum viable product. Very early in the project, I produced a working proof of concept. Nearing the end of the project, once the design was mostly settled and with a view of how the program would be deployed, the code was refactored to produce a user friendly piece of software. This approach of fast development that did not commit early to a usage pattern served me very well with this project, as details of the deployment only became clear after some use.
Further, lessons were learnt on the quality of packages. A package being a part of the standard library for a language does not imply support or a full feature set, while packages from respected software companies can be superior.
On re-implementation of this work, more considerations should be made for the interface of the software. In particular, monitoring the current connections without a debugger is particularly difficult, and monitoring long term statistics is presently impossible. This compromise was made for code readability and clarity, increasing the likelihood of correct code, but does raise some issues for the network architects who implement this software.
Many of the lessons learnt relating to IP routing are detailed in Section \ref{section:implementation-system-configuration}, which would aid future implementations significantly, allowing the developer to focus only on what needs to occur in the application itself. Similarly, Figure \ref{fig:dataflow-overview} provides a comprehensive overview of the particularly complex dataflow within this application. These tools provide an effective summary of the information needed to implement this software again, reducing the complexity of such a new implementation, and allowing the developer to focus on the important features.
\section{Future Work}
\label{section:future-work}
Alternative methods of load balancing could take multipath proxies further. Having control of both proxies allows for a variety of load balancing mechanisms, of which congestion control is only one. An alternative method is to monitor packet loss, and use this to infer the maximum capacity of each link. These capacities can then be used to load balance packets by proportion as opposed to greedily with congestion control. This could provide performance benefits over congestion control by allowing the congestion control mechanisms of underlying flows to be better employed, while also having trade-offs with slower reaction to connection changes.
To increase performance, a kernel implementation of the proxy could be written. Kernel implementations avoid copying the packets between kernel- and user-space, as well as removing the cost of syscalls. This can increase maximum performance significantly, as well as reducing latency. These combine to make the software useful in more places, though enforcing platform compatibility to only that which uses the compatible kernel. Therefore, having kernel implementations maintain compatibility with a user-space implementation allows more systems to take advantage of the proxy.

View File

@ -0,0 +1,93 @@
#include...
struct Packet {
size_t len; uint8_t* data;
Packet(const uint8_t *input, size_t num_bytes) {
len = num_bytes; data = new uint8_t[len];
std::memcpy(data, input, len);
};
~Packet() { delete[] data; }
[[nodiscard]] std::string print() const {
std::stringstream out;
for (size_t i = 0; i < len; i++) {
int temp = data[i];
out << std::hex << temp << " ";
}
return out.str();
}
};
template <class T> class ThreadSafeQueue {
std::queue<T> _queue = std::queue<T>();
std::mutex _mutex; std::condition_variable _cond;
public:
ThreadSafeQueue() = default;
void push(T item) {
_mutex.lock(); _queue.push(item); _mutex.unlock();
_cond.notify_one();
}
T pop() {
while (true) {
std::unique_lock<std::mutex> unique(_mutex);
_cond.wait(unique);
if (!_queue.empty()) {
T out = _queue.front();
_queue.pop();
return out;
}
}
}
};
int tun_alloc(const char *dev, short flags) {
struct ifreq ifr{};
int fd, err;
if( (fd = open("/dev/net/tun" , O_RDWR)) < 0 ) {
perror("Opening /dev/net/tun");
return fd;
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = flags;
strncpy(ifr.ifr_name, dev, IFNAMSIZ);
if( (err = ioctl(fd, TUNSETIFF, (void *)&ifr)) < 0 ) {
perror("ioctl(TUNSETIFF)");
close(fd);
return err;
}
return fd;
}
std::mutex print_lock;
void consumer(const int index, ThreadSafeQueue<Packet*> *queue) {
std::cout << "thread " << index << "starting" << std::endl;
while (!stop) {
Packet *p = queue->pop();
print_lock.lock();
std::cout << "thread " << index << " received a packet with content `" << p->print() << "`" << std::endl;
print_lock.unlock();
delete p;
}
}
int main() {
int tun = tun_alloc("nc%d", IFF_TUN);
auto queue = new ThreadSafeQueue<Packet*>();
std::thread threads[10];
for (int i = 0; i < 10; i++) {
const int i_safe = i;
threads[i] = std::thread ([i_safe, queue]() {
consumer(i_safe, queue);
});
}
std::thread reader([tun, queue]() {
uint8_t buffer[1500];
while (true) {
int num_bytes = read(tun, &buffer, 1500);
if (num_bytes != 0) {
auto *packet = new Packet(buffer, num_bytes);
queue->push(packet);
}
}
});
}

View File

@ -0,0 +1,48 @@
package main
import (
"fmt"
"github.com/pkg/taptun"
"os"
"os/signal"
"syscall"
)
type Packet struct {
Data []byte
}
func main() {
tun, err := taptun.NewTun("nc%d")
if err != nil { panic(err) }
inboundPackets := make(chan Packet, 128)
go func() {
bufferSize := 1500
buffer := make([]byte, bufferSize)
for {
read, err := tun.Read(buffer)
if err != nil { panic(err) }
if read == 0 { panic("0 bytes read!") }
p := Packet{}
p.Data = make([]byte, read)
copy(p.Data, buffer)
inboundPackets <- p
}
}()
for i := 0; i < 10; i++ {
i := i
go func() {
for {
p := <-inboundPackets
fmt.Printf("Reader %d: %v\n", i, p)
}
}()
}
}

View File

@ -0,0 +1,34 @@
use std::thread;
use tun_tap::{Iface, Mode};
#[derive(Debug)]
struct Packet {
data: [u8; 1504],
}
fn main() {
let (mut tx, rx) = spmc::channel();
let iface = Iface::new("nc%d", Mode::Tun).expect("failed to create TUN device");
let mut buffer = vec![0; 1504];
for i in 0..10 {
let rx = rx.clone();
thread::spawn(move || {
let packet: Packet = rx.recv().unwrap();
println!("Thread {}: {:?}", i, packet);
});
}
for _ in 0..500 {
iface.recv(&mut buffer).unwrap();
let mut packet = Packet{ data: [0; 1504] };
for i in 0..1504 {
packet.data[i] = buffer[i];
}
tx.send(packet).unwrap();
}
}

View File

@ -0,0 +1,28 @@
%!TEX root = ../thesis.tex
% ********************** Thesis Appendix A - Language Samples *************************
\chapter{Language Samples}
\label{appendix:language-samples}
\begin{figure}
\inputminted[firstline=1,lastline=48]{cpp}{A1_LanguageSamples/Samples/main.cpp}
\caption{A sample script written in C++ to collect packets from a TUN interface and print them from multiple threads}
\label{fig:cpp-tun-sample}
\end{figure}
\begin{figure}
\ContinuedFloat
\inputminted[firstline=49]{cpp}{A1_LanguageSamples/Samples/main.cpp}
\end{figure}
\begin{figure}
\inputminted{rust}{A1_LanguageSamples/Samples/main.rs}
\caption{A sample script written in Rust to collect packets from a TUN interface and print them from multiple threads}
\label{fig:rust-tun-sample}
\end{figure}
\begin{figure}
\inputminted{go}{A1_LanguageSamples/Samples/main.go}
\caption{A sample script written in Go to collect packets from a TUN interface and print them from multiple threads}
\label{fig:go-tun-sample}
\end{figure}

View File

@ -0,0 +1,88 @@
%!TEX root = ../thesis.tex
% ********************** Thesis Appendix B - Layered Security *************************
\chapter{Layered Security Packet Diagrams}
\label{appendix:layered-security}
\begin{figure}
\begin{leftfullpage}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$} \\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Proxied\\Wireguard\\Packet}
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$} \\
\begin{leftwordgroup}{UDP Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{leftwordgroup} \\
\begin{leftwordgroup}{Wireguard\\Header}
\bitbox{8}{type} & \bitbox{24}{reserved} \\
\wordbox{1}{receiver} \\
\wordbox{2}{counter}
\end{leftwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords\\
\wordbox[blr]{1}{}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Security\\Footer}
\bitbox{32}{Data sequence number} \\
\wordbox[tlr]{1}{Message authentication code} \\
\wordbox[blr]{1}{$\cdots$}
\end{rightwordgroup}
\end{bytefield}
\caption{Packet structure for a configuration with a Wireguard client behind my multipath proxy.}
\label{fig:whole-network-vpn-behind}
\end{leftfullpage}
\end{figure}
\begin{figure}
\begin{fullpage}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$}\\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Wireguard\\Header}
\bitbox{8}{type} & \bitbox{24}{reserved} \\
\wordbox{1}{receiver} \\
\wordbox{2}{counter}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Tunnelled\\Proxy\\Packet}
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$}\\
\begin{leftwordgroup}{UDP Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{leftwordgroup} \\
\begin{leftwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{leftwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords\\
\wordbox[blr]{1}{}
\end{rightwordgroup}
\end{bytefield}
\caption{Packet structure for a configuration with a Wireguard client in front of my multipath proxy.}
\label{fig:whole-network-vpn-infront}
\end{fullpage}
\end{figure}

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

View File

@ -0,0 +1,69 @@
%!TEX root = ../thesis.tex
% ********************** Thesis Appendix B - Outbound Graphs *************************
\chapter{Outbound Graphs}
\label{appendix:outbound-graphs}
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.Y, port 5201
[ 5] local X.X.X.X port 53587 connected to X.X.X.Y port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 4.00-5.00 sec 129 KBytes 1.05 Mbits/sec 91
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate Jitter Lost/Total Datagrams
[ 5] 0.00-5.00 sec 641 KBytes 1.05 Mbits/sec 0.000 ms 0/453 (0%) sender
[ 5] 0.00-5.04 sec 641 KBytes 1.04 Mbits/sec 0.070 ms 0/453 (0%) receiver
\end{Verbatim}
\caption{iperf3 UDP results with two stable connections (outbound).}
\end{figure}
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.Y, port 5201
[ 5] local X.X.X.X port 38793 connected to X.X.X.Y port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 4.00-5.00 sec 127 KBytes 1.04 Mbits/sec 90
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval Transfer Bitrate Jitter Lost/Total Datagrams
[ 5] 0.00-5.00 sec 641 KBytes 1.05 Mbits/sec 0.000 ms 0/453 (0%) sender
[ 5] 0.00-5.04 sec 635 KBytes 1.03 Mbits/sec 0.086 ms 4/453 (0.88%) receiver
\end{Verbatim}
\caption{iperf3 UDP results with a single connections loss (outbound).}
\end{figure}
\begin{figure}
\begin{Verbatim}[fontsize=\small]
Connecting to host X.X.X.Y, port 5201
[ 5] local X.X.X.X port 35549 connected to X.X.X.Y port 5201
[ ID] Interval Transfer Bitrate Total Datagrams
[ 5] 0.00-1.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 1.00-2.00 sec 127 KBytes 1.04 Mbits/sec 90
[ 5] 2.00-3.00 sec 129 KBytes 1.05 Mbits/sec 91
[ 5] 3.00-4.00 sec 127 KBytes 1.04 Mbits/sec 90
\end{Verbatim}
\caption{iperf3 UDP results with a total connection loss (outbound).}
\end{figure}
\setcounter{figure}{5}
\begin{figure}
\centering
\begin{subfigure}{.7\textwidth}
\includegraphics[width=0.9\linewidth]{4_Evaluation/Figs/graphs/more-bandwidth-equal-a-outbound}
\caption{Throughput of 1+1MB/s connections compared with 1MB/s and 2MB/s (outbound).}
\end{subfigure}
\begin{subfigure}{.7\textwidth}
\includegraphics[width=0.9\linewidth]{4_Evaluation/Figs/graphs/more-bandwidth-equal-b-outbound}
\caption{Throughput of 2+2MB/s connections compared with 2MB/s and 4MB/s (outbound).}
\end{subfigure}
\caption{Graphs showing that the throughput of two connections proxied lie between one connection of the same speed and one connection of double the speed}
\end{figure}

Binary file not shown.

View File

@ -0,0 +1,6 @@
% ************************** Proposal **************************
\chapter{Project Proposal}
\label{appendix:project-proposal}
\includepdf[pages=-]{A4_ProjectProposal/project-proposal.pdf}

View File

@ -1,27 +0,0 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Fifth Chapter **********************************
%*******************************************************************************
\chapter{Conclusions}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{Chapter5/Figs/Raster/}{Chapter5/Figs/PDF/}{Chapter5/Figs/}}
\else
\graphicspath{{Chapter5/Figs/Vector/}{Chapter5/Figs/}}
\fi
\section{Future Work}
The most interesting future work on multi-homed devices would focus on adding additional features to gateways.
Work on the most effective method of allowing a gateway to inform a device behind it that it is worth adding additional MPTCP subflows.
Work on gateways understanding the Layer 4 concepts of MPTCP and adapting their load balancing algorithms to ensure that multiple subflows of the same MPTCP flow are split appropriately between the available links.
Work on gateways that understand MPTCP to take a non-MPTCP flow and transparently convert it into a MPTCP flow at the gateway, and back again as it reaches the device behind.
Work on IPv6 multi-homing to more effectively inform devices behind it of when they have multiple homes.
TODO: Check, for all of these, whether they should actually be in past work. Particularly the IPv6 multi-homing one.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View File

@ -1,278 +0,0 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Fourth Chapter *********************************
%*******************************************************************************
\chapter{Evaluation}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{Evaluation/Figs/Raster/}{Evaluation/Figs/PDF/}{Evaluation/Figs/}}
\else
\graphicspath{{Evaluation/Figs/Vector/}{Evaluation/Figs/}}
\fi
This chapter will discuss the methods used to evaluate my project and the results gained. The results will be discussed in the context of the success criteria laid out in the Project Proposal.
This evaluation shows that a network using my method of combining Internet connections can see vastly superior network performance to one without. It will show the benefits to throughput, availability, and adaptability.
\section{Evaluation Methodology}
I performed my experiments on a local Proxmox\footnote{\url{https://proxmox.com}} server. To encourage frequent and thorough testing, a harness was built in Python, allowing tests to be added easily and repeated with any code changes.
Proxmox was chosen due to its RESTful API, for integration with Python. It provides the required tools to limit connection speeds and disable connections. The server that ran these tests holds only a single other virtual machine which handles routing. This limits the effect of external factors on the tests.
The tests are performed on a Dell R710 Server with the following specifications:
\vspace{5mm} \noindent \textbf{CPU(s)} 16 x Intel(R) Xeon(R) CPU X5667 @ 3.07GHz (2 Sockets)
\\ \noindent \textbf{Memory} 6 x 2GB DDR3 ECC RDIMMS
\\ \noindent \textbf{Kernel} Linux 5.4 LTS
\section{Graphs}
\subsection{Bar Graphs}
The majority of data presented in this chapter will be in the form of bar graphs.
\subsection{Line Graphs}
Time sensitive data will be presented in the form of line graphs. These are generated in a consistent format, using a script found in appendix (Possibly Appendix).
\begin{figure}
\centering
\begin{subfigure}{.3\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/TIS0-R0-1R1-1R2-1R3-1T10S1-R0-1R1-1R2-1T10S2-R0-1R1-1T10}
\caption{No error bars}
\label{fig:errorbars-none}
\end{subfigure}
\begin{subfigure}{.3\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/TIExS0-R0-1R1-1R2-1R3-1T10S1-R0-1R1-1R2-1T10S2-R0-1R1-1T10}
\caption{X error bars}
\label{fig:errorbars-x}
\end{subfigure}
\begin{subfigure}{.3\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/TIEyS0-R0-1R1-1R2-1R3-1T10S1-R0-1R1-1R2-1T10S2-R0-1R1-1T10}
\caption{Y error bars}
\label{fig:errorbars-y}
\end{subfigure}
\caption{Line graphs produced with varying error bars enabled}
\label{fig:errorbars}
\end{figure}
In figure \ref{fig:errorbars}, examples are shown of the same graph without any error bars, with error bars on the X axis, and with error bars on the Y axis. Error bars for the X axis are plotted as the range of all of the results, while error bars on the Y axis are plotted as $1.5*\sigma$, where $\sigma$ represents the standard deviation of the results.
In figure \ref{fig:errorbars-x}, it is shown that the range of the timestamps provided is incredibly tight. For this reason, I will not be including error bars in the X axis on the graphs shown from this point onwards.
In figure \ref{fig:errorbars-y}, it can be seen that the error bars on the Y axis are far more significant. Thus, error bars will continue to be included in the Y axis.
To generate these results, a fresh set of VMs (Virtual Machines) are created and the software installed on them. Once this is complete, each test is repeated until the coefficient of variance ($\sigma / \mu$, where $\mu$ is the arithmetic mean and $\sigma$ the standard deviation) is below a desired level, or too many attempts have been completed. The number of attempts taken for each series will be shown in the legend of each graph.
\begin{figure}
\centering
\begin{tikzpicture}[
squarednode/.style={rectangle, draw=black!60, fill=red!5, very thick, minimum size=5mm},
]
% Nodes
\node[squarednode] at (0,0) (speedtest) {Speed Test Server};
\node[squarednode] at (4,0) (remoteportal) {Remote Portal};
\node[squarednode] at (8,0) (localportal) {Local Portal};
\node[squarednode] at (11,0) (client) {Client};
% Edges
\draw[->] ([yshift=6mm]speedtest.north) -- (speedtest.north);
\draw[->] ([yshift=6mm]remoteportal.north) -- (remoteportal.north);
\draw[->] ([xshift=-7mm,yshift=6mm]localportal.north) -- ([xshift=-7mm]localportal.north);
\draw[->] ([yshift=6mm]localportal.north) -- (localportal.north);
\draw[->] ([xshift=7mm,yshift=6mm]localportal.north) -- ([xshift=7mm]localportal.north);
\draw[->] ([yshift=6mm]client.north) -- (client.north);
\draw[-] ([yshift=6mm]speedtest.north) -- ([yshift=6mm]localportal.north);
\draw[-] ([xshift=7mm,yshift=6mm]localportal.north) -- ([yshift=6mm]client.north);
% Edge Label
\node at ([xshift=-3.5mm,yshift=9mm]localportal.north) {0 .. N};
\end{tikzpicture}
\caption{The network structure of standard tests}
\label{fig:standard-network-structure}
\end{figure}
The network structure of all standard tests is shown in figure \ref{fig:standard-network-structure}. Any deviations from this structure will be mentioned. The Local Portal has as many interfaces as referenced in any test, plus one to connect to the client. All Virtual Machines also have an additional interface for management, but this has no effect on the tests.
\section{Success Criteria}
\subsection{Flow Maintained}
TODO.
\subsection{Bidirectional Performance Gains}
The performance gains measured are visible in both directions (inbound and outbound to the client). The graphs shown in this evaluation section are inbound unless stated otherwise, with the outbound graphs being available in Appendix \ref{appendix:outbound-graphs}.
\begin{figure}
\centering
\begin{subfigure}{.49\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/IES0-R0-1R1-1T10S1-R0-1R1-2T10S2-R0-2R1-2T10}
\caption{The inbound graph}
\label{fig:example-inbound}
\end{subfigure}
\begin{subfigure}{.49\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/OES0-R0-1R1-1T10S1-R0-1R1-2T10S2-R0-2R1-2T10}
\caption{The outbound graph}
\label{fig:example-outbound}
\end{subfigure}
\caption{The same test performed both inbound and outbound}
\label{fig:example-inbound-outbound}
\end{figure}
Figure \ref{fig:example-inbound-outbound} shows two graphs of the same test - one for the inbound performance and one for the outbound. It can be seen that both graphs show the same shape.
\subsection{IP Spoofing}
\begin{figure}
\begin{minted}{shell-session}
#IPv4 Forwarding
sysctl -w net.ipv4.ip_forward=1
# Route packets from the remote portal address on the client interface via the tunnel
ip route flush 12
ip route add table 12 to 1.1.1.0/24 via 172.19.152.2 dev nc0
ip rule add from 1.1.1.3 iif eth3 table 12 priority 12
# Route packets to the remote portal address out of the client interface
ip route flush 13
ip route add table 13 to 1.1.1.3 dev eth3
ip rule add to 1.1.1.3 table 13 priority 13
\end{minted}
\caption{The script necessary for the Local Portal to accept packets from a client with the spoofed IP.}
\label{fig:policy-based-routing-script-local-portal}
\end{figure}
This goal was to ensure the Client could use its network interface as if it really had that IP. This is achieved through Policy Based Routing. Example scripts are shown in figure \ref{fig:policy-based-routing-script-local-portal}. Linux also requires the kernel parameter \mintinline{shell-session}{net.ipv4.ip_forward} to be set to 1.
\subsection{Security}
TODO.
\subsection{More Bandwidth over Two Equal Connections}
This success criteria is satisfied, as seen in figure \ref{fig:two-equal-connections}. This graph compares the combined performance of two equal connections to one connection of the same speed and one connection of double the speed. It can be seen that the connection comes very close to the double speed connection, showing that this is an effective method of combining, in terms of raw TCP throughput. These tests are conducted using the TCP method of congestion control.
\begin{figure}
\centering
\includegraphics[width=10cm]{graphs/IES0-DR0-1T10S1-R0-1R1-1T10S2-DR0-2T10}
\caption{A graph demonstrating that two connections proxied lie between one connection of the same speed and one connection of double the speed in terms of throughput}
\label{fig:two-equal-connections}
\end{figure}
\section{Extended Goals}
\subsection{More Bandwidth over Unequal Connections}
\begin{figure}
\centering
\includegraphics[width=10cm]{graphs/IES0-R0-2R1-2T10S1-R0-1R1-2T10S2-R0-1R1-1T10}
\caption{A graph demonstrating that two unequal connections proxied lie between two connections of the lower bandwidth and two connections of the higher bandwidth}
\label{fig:unequal-connections}
\end{figure}
This is demonstrated by showing that $1x1MB + 1x2MB$ connections can exceed the performance of $2x1MB$ connections. The results for this can be seen in figure \ref{fig:unequal-connections}, compared against $2x2MB$ and $1x2MB$. It can be seen that the uneven connections fall between the two, which is as expected.
\subsection{More Bandwidth over Four Equal Connections}
\begin{figure}
\centering
\begin{subfigure}{.49\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/IES0-R0-1R1-1R2-1R3-1T10S1-R0-1R1-1R2-1T10S2-R0-1R1-1T10}
\caption{1MB connections}
\label{fig:four-equal-connections-1MB}
\end{subfigure}
\begin{subfigure}{.49\textwidth}
\includegraphics[width=0.9\linewidth]{graphs/IES0-R0-2R1-2R2-2R3-2T10S1-R0-2R1-2R2-2T10S2-R0-2R1-2T10}
\caption{2MB connections}
\label{fig:four-equal-connections-2MB}
\end{subfigure}
\caption{Scaling of equal connections}
\label{fig:four-equal-connections}
\end{figure}
This criteria is about throughput increasing with the number of equal connections added. It is demonstrated by comparing the throughput of $2x1MB$, $3x1MB$ and $4x1MB$ connections. This can be seen in figure \ref{fig:four-equal-connections-1MB}. A further example is provided of $2x2MB$, $3x2MB$ and $4x2MB$ in figure \ref{fig:four-equal-connections-2MB}.
\subsection{Bandwidth Variation}
This criteria is about the adaptability of the congestion control system. This can be seen in figure \ref{fig:bandwidth-variation}, a graph in which the capacity of one of the two links is decreased. For $0 < t \leq 10$, both links are $2MB$. For $10 < t \leq 20$, one link is decreased to $1MB$ and the other remains at $2MB$. For $20 < t \leq 30$, both links are returned to $2MB$.
\begin{figure}
\centering
\includegraphics[width=10cm]{graphs/TIEyS0-R0-2R1-2E10R0-1E20R0-2T30}
\caption{A graph demonstrating the effect of decreasing the bandwidth of one of the two connections}
\label{fig:bandwidth-variation}
\end{figure}
It can be seen in the graph that the bandwidth initially stabilises at approximately $30Mbps$, before decreasing at just after $t = 10$. The connection then quickly recovers at $t=20$, to the initial rate.
\subsection{Connection Loss}
TODO.
\subsection{Single Interface Remote Portal}
\begin{figure}
\begin{minted}{shell-session}
# IPv4 Forwarding
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv4.conf.eth0.proxy_arp=1
# Deliberately break local routing
ip rule add from all table local priority 20
ip rule del 0 || true
# Route packets to the interface but for nc to this host
ip rule add to 1.1.1.3 dport 1234 table local priority 9
# Route packets to the interface but not for nc via the tunnel
ip route flush 10
ip route add table 10 to 1.1.1.3 via 172.19.152.3 dev nc0
ip rule add to 1.1.1.3 table 10 priority 10
\end{minted}
\caption{The scripts necessary to allow the Remote Portal to only use a single interface.}
\label{fig:policy-based-routing-script-remote-portal}
\end{figure}
The single interface Remote Portal is achieved using a similar set of commands to IP Spoofing. The majority of the work is again done by policy based routing, with some kernel parameters needing to be set too. A sample script is shown in figure \ref{fig:policy-based-routing-script-remote-portal}.
\subsection{Connection Metric Values}
Not implemented yet.
\section{Stretch Goals}
\subsection{IPv4/IPv6 Support}
The project is only tested with IPv4.
\subsection{UDP Proxy Flows}
TODO
\subsection{IP Proxy Packets}
The project only supports TCP and UDP flows for carrying the proxied data.
\section{Performance Evaluation}
\label{section:performance-evaluation}
The discussion of success criteria above used slow network connections to test scaling in certain situations. This section will focus on testing how the solution scales, in terms of faster individual connections, and with many more connections. Further, all of the above tests were automated and carried out entirely on virtual hardware. This section will show some 'real-world' data, using a Raspberry Pi 4B and real Internet connections.
\subsection{Faster Connections Scaling}
TODO
\subsection{Number of Connections Scaling}
TODO
\subsection{Real World Testing}
TODO

View File

@ -1,316 +0,0 @@
%*******************************************************************************
%****************************** Third Chapter **********************************
%*******************************************************************************
\chapter{Implementation}
% **************************** Define Graphics Path **************************
\ifpdf
\graphicspath{{Implementation/Figs/Raster/}{Implementation/Figs/PDF/}{Implementation/Figs/}}
\else
\graphicspath{{Implementation/Figs/Vector/}{Implementation/Figs/}}
\fi
% ----------------------- Greedy Load Balancing ---------------------------- %
\section{Greedy Load Balancing}
The implementation of this project is built on the concept of greedy load balancing. By placing the packets to send in a FIFO queue, with each consumer (a flow connected to the other side) grabbing from the queue as soon as their congestion controlled link permits, a fast and adaptive system of network load balancing is achieved.
Greedy load balancing is so important to this solution that it influenced the choice of language, as discussed in section \ref{section:language-selection}. The thread synchronisation, internal flow control and greedy load balancing itself were all able to be solved using Go channels. A channel is a multi-reader multi-writer circular queue, that cooperates with the scheduler to effectively block and unblock as data is available.
If each link has perfect congestion control, greed is the perfect metric for load balancing between connections - it means that if any connection has available capacity to send a packet, the packet will be consumed. The complication with this technique arises when deciding where this congestion control should reach to. The decision with this solution is to have the congestion control reach a known point, under the assumption that this point will always have more capacity available than the links reaching it. This is a safe assumption in most cases, as connections where this solution is most useful are often in the tens of megabits, while datacenter connections (the Cloud) can reach multiple gigabits. By relying on this assumption that the Remote Portal will never be the bandwidth limit, the metric of congestion between the Local Portal and Remote Portal is an excellent measure of how much data can be sent down each link. In contrast, relying on any congestion control to the end of the link would cause this assumption to fail, complicating the greed significantly.
Given the language choice, the implementation is quite simple. The local TUN interface has both a channel of input and a channel of output. When the TUN interface receives packets to be proxied, they are placed in a channel for consumers to consume, where a consumer is a link through which the packet can be sent. When a packet is received through one of these links, it is placed into the output channel, where it is taken by the TUN interface and released. A further assumption here is that the local networking stack beyond the TUN interface will be faster than the combined links, but a further benefit of channels is the blocking - if the TUN interface cannot keep up with packets supplied by the producers, the producers will end up blocking on channels, allowing for successful end to end flow control between the TUN interfaces of the Local Portal and the Remote Portal.
% --------------------------------- TCP ------------------------------------ %
\section{TCP}
The first implementation is built on TCP. TCP provides congestion control and flow control, which are all that is necessary for this form of greedy load balancing, and therefore solves almost all of the issues given here. To implement such a solution on TCP, the only difference that needs to be made is punctuating the connection. As TCP provides a byte stream and not distinct datagrams, a distinction must be made between the packets. One option is to use a punctuating character, though this would reduce the character set of the packets, and therefore require escape sequences in the packets. The second option is to read the length of the packets and then read the correct amount of data from the stream.
My implementation uses the second option, of punctuating the stream by providing the length of each packet. Although the IP packets do provide their length internally, I kept the TCP flow as flexible as possible. That is, it is kept as simple as possible, so that it doesn't have to be updated for transmitting any other sort of packets. Therefore, the TCP flow is punctuated by sending the length of the packet before the packet itself within the stream. Then, this number of bytes can be read.
\begin{figure}
\centering
\begin{bytefield}[bitwidth=0.9em]{32}
\bitheader{0-31} \\
\begin{rightwordgroup}{TCP\\Header}
\bitbox{16}{Source Port} & \bitbox{16}{Destination Port} \\
\wordbox{1}{Sequence Number} \\
\wordbox{1}{Acknowledgment Number} \\
\bytefieldsetup{bitheight=5em}\bitbox{4}{Data offset} & \bitbox{3}{\begin{turn}{-65}Reserved\end{turn}} & \bitbox{1}{N S} & \bitbox{1}{C R W} & \bitbox{1}{E C E} & \bitbox{1}{U R G} & \bitbox{1}{A C K} & \bitbox{1}{P S H} & \bitbox{1}{R S T} & \bitbox{1}{S Y N} & \bitbox{1}{F I N} & \bitbox{16}{Window Size} \\
\bitbox{16}{Checksum} & \bitbox{16}{Urgent Pointer}
\end{rightwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords \\
\wordbox[blr]{1}{} \\
\begin{rightwordgroup}{Security\\Footer}
\bitbox{32}{Unix Timestamp} \\
\wordbox[tlr]{1}{Message Authentication Code} \\
\wordbox[blr]{1}{$\cdots$}
\end{rightwordgroup}
\end{bytefield}
\caption{TCP packet structure}
\label{fig:tcp-packet-structure}
\end{figure}
% --------------------------------- UDP ------------------------------------ %
\section{UDP}
The second implementation is built on UDP. Sending each tunnelled packet in a discrete datagram has the benefit of an increased likeness to unproxied IP packets. An example of this is Path MTU Discovery. Over TCP, any length of packet will reach the Remote Portal, as TCP will fragment the byte stream across multiple TCP packets, regardless of the Do Not Fragment flag on an IPv4 packet, or the presence of an IPv6 packet. With UDP, packets that are too large to be transmitted across the link (including the UDP packet itself) will be dropped. Though this does not immediately react in the same way as an unproxied IP packet (ICMP packets need additional handling), it brings the link much closer to transmitting correctly sized IP packets than TCP, which can increase performance.
Further, sending one datagram per packet has a benefit in terms of head of line blocking. Head of line blocking occurs when a TCP flow must resend the front of the window while the window is full, preventing the flow from transmitting any more data. With the UDP solution, each packet received represents exactly one datagram, and as such can be immediately forwarded, regardless of the order in which it's received. Although this does not entirely solve head of line blocking, it allows an entire window size of packets to be transmitted with significantly reduced latency before the blocker at the start of the window is dealt with (the method for which is not retransmission, and is discussed further later).
Beyond the benefits of the similarity between datagrams and packets, using UDP as opposed to TCP provides for more flexibility in terms of transmission guarantees. The method of greedy load balancing given relies on congestion control and flow control. However, TCP also provides reliable delivery and ordering. By reducing these guarantees to the minimum congestion control and flow control, transport efficiency can be increased, while still allowing transparency for the proxied packets to provide these guarantees where necessary. Lesser guarantees can also allow for a shorter header, increasing per-packet efficiency, though this is not the case with the presented structure.
\subsection{Packet Structure}
The packet structure was decided to allow for effective congestion control, with implicit flow control, and nothing else. This is achieved with a simple 3 part, 12 byte header (shown in figure \ref{fig:udp-packet-structure}). Similarly to TCP, each packet contains an acknowledgement number (ACK) and a sequence number (SEQ). These serve the same purpose as in TCP: providing a method for a congestion controller to know which packets have been received by their partner. However, they are implemented slightly differently. TCP sequence numbers are based on bytes, and as such the sequence number of a packet is the sequence number of the first byte that it contains. As this UDP based protocol is designed for transmitting packets, losing part of a packet does not make sense. They will also never be split, as this protocol does not support partial transmission, and as such are atomic. This means that the sequence number can represent an entire packet, as opposed to a byte.
\begin{figure}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{rightwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords \\
\wordbox[blr]{1}{} \\
\begin{rightwordgroup}{Security\\Footer}
\bitbox{32}{Unix timestamp} \\
\wordbox[tlr]{1}{Message authentication code} \\
\wordbox[blr]{1}{$\cdots$}
\end{rightwordgroup}
\end{bytefield}
\caption{UDP packet structure}
\label{fig:udp-packet-structure}
\end{figure}
In addition to these two fields, a further Negative Acknowledgement (NACK) field is required. Due to TCP's promise of reliable transmission, negative acknowledgements can never occur. Either the sender must resend the packet in question, or the flow is terminated. In my protocol, however, it is necessary that the receiver has a method to provide a discontinuous stream of acknowledgements. If this was attempted without a separate NACK number, it would be required that each ACK number is sent and received individually. This decreases the efficiency and correctness of ACKs, both in terms of missing packets, and having to send at least one packet for every packet received.
The benefit of a NACK is demonstrated in figure \ref{fig:sequence-ack-nack-comparison}. Figure \ref{fig:sequence-ack-continuous} shows a series of ACKs for a perfect set of sequence numbers. This is rather pointless, as there is no point to ACKing packets if you never intend to lose any, but is a situation that can occur for large portions of a flow, given good congestion control and reliable networking. Figure \ref{fig:sequence-ack-discontinuous} shows the same ACK system for a stream of sequence numbers with one missing. It can be seen that the sender and receiver reach an impasse: the receiver cannot increase its ACK number, as it has not received packet 5, and the sender cannot send more packets, as its window is full. The only possibility is for the receiver to increase its ACK number and rely on the sender realising that it took too long to acknowledge the missing packet, though this is equivalent to a lost ACK, and would therefore be difficult to detect reliably.
Figure \ref{fig:sequence-ack-nack-discontinuous} shows how this same situation can be responded to with a NACK field. After the receiver has concluded that the intermediate packet(s) were lost in transit (a function of RTT, to be discussed further later), it updates the NACK field to the highest lost packet, allowing the ACK field to be increased from one after the lost packet. This solution resolves the deadlock of not being able to increase the ACK number without requiring reliable delivery. That is, the receiver increases their NACK similarly to when a TCP sender would retransmit.
\begin{figure}
\hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\begin{tabular}{|c|c|}
SEQ & ACK \\
1 & 0 \\
2 & 0 \\
3 & 2 \\
4 & 2 \\
5 & 2 \\
6 & 5 \\
6 & 6
\end{tabular}
\caption{ACKs responding to in order sequence numbers}
\label{fig:sequence-ack-continuous}
\end{subfigure}\hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\begin{tabular}{|c|c|}
SEQ & ACK \\
1 & 0 \\
2 & 0 \\
3 & 2 \\
5 & 3 \\
6 & 3 \\
7 & 3 \\
7 & 3
\end{tabular}
\caption{ACKs responding to a missing sequence number}
\label{fig:sequence-ack-discontinuous}
\end{subfigure}\hfill
\begin{subfigure}[t]{0.35\textwidth}
\centering
\begin{tabular}{|c|c|c|}
SEQ & ACK & NACK \\
1 & 0 & 0 \\
2 & 0 & 0 \\
3 & 2 & 0 \\
5 & 2 & 0 \\
6 & 2 & 0 \\
7 & 6 & 4 \\
7 & 7 & 4
\end{tabular}
\caption{ACKs and NACKs responding to a missing sequence number}
\label{fig:sequence-ack-nack-discontinuous}
\end{subfigure}
\caption{ACKs and NACKs responding to sequence numbers}
\label{fig:sequence-ack-nack-comparison}
\hfill
\end{figure}
As this was a new UDP protocol, I wrote a Wireshark\footnote{\url{https://wireshark.org}} dissector, shown in figure \ref{fig:udp-wireshark-dissector}. This is a Lua script that requests Wireshark use the given dissector function for UDP traffic on port 1234 (a port chosen for testing). It extracts the three congestion control parameters from the UDP datagram, showing them in a far easier to read format and allowing more efficient debugging of congestion control protocols. The extracted data can be seen in figure \ref{fig:udp-wireshark-dissector-results}.
\begin{figure}
\inputminted{go}{Implementation/Samples/wireshark_dissector.lua}
\caption{Wireshark dissector}
\label{fig:udp-wireshark-dissector}
\end{figure}
\begin{figure}
\centering
TODO
\caption{Wireshark dissector screenshots}
\label{fig:udp-wireshark-dissector-results}
\end{figure}
\subsection{Congestion Control}
To allow for flexibility in congestion control, I started by building an interface (shown in figure \ref{fig:congestion-control-interface}) for congestion controllers. The aim of the interface is to provide the controller with every update that could be used for congestion control, while also providing it every opportunity to set an ACK or NACK on a packet.
\begin{figure}
\inputminted{go}{Implementation/Samples/congestion.go}
\caption{Congestion controller interface}
\label{fig:congestion-control-interface}
\end{figure}
A benefit of the chosen language (Go\footnote{\url{https://golang.org}} is the powerful management of threads of execution, or Goroutines. This is demonstrated in the interface, particularly the method \mintinline{go}{Sequence() uint32}. This method expects a congestion controller to block until it can provide the packet with a sequence number for dispatch. Given that the design runs each producer and consumer in a separate Goroutine, this is an effective way to synchronise the packet sending with the congestion controller, and should be effective for any potential method of congestion control.
\subsubsection{New Reno}
The first congestion control protocol I implemented is based on TCP New Reno. It is a well understood and powerful congestion control protocol. The pseudocode for the two most interesting functions are shown in figure \ref{fig:udp-congestion-newreno-pseudocode}.
\begin{figure}
\begin{minted}{c}
def findAck(start):
ack = start
while acksToSend.Min() == ack+1:
ack = acksToSend.PopMin()
return ack
def updateAckNack(lastAck, lastNack):
nack = lastNack
ack = findAck(lastAck, acksToSend)
if ack == lastAck:
if acksToSend.Min().IsDelayedMoreThan(NackTimeout):
nack = acksToSend.Min() - 1
ack = findAck(acksToSend.PopMin(), acksToSend)
return ack, nack
def ReceivedNack(nack):
if !nack.IsFresh():
return
windowSize /= 2
def ReceivedAck(ack):
if !ack.IsFresh():
return
if slowStart:
windowSize += numberAcked
else:
windowCount += numberAcked
if windowCount >= windowSize:
windowSize += 1
windowCount -= windowSize
\end{minted}
\caption{UDP New Reno pseudocode}
\label{fig:udp-congestion-newreno-pseudocode}
\end{figure}
My implementation of New Reno functions differently to the TCP version, given that it responds with NACKs instead of retransmits. In TCP, updating the ACK is similar - the ACK sent is the highest ACK available that remains a continuous stream. The interesting part is visible when the controller decides to send a NACK. Whenever a hole is seen in the packets waiting to be acknowledged, the delay of the minimum packet waiting to be sent is checked. If the packet has been waiting for more than a multiple of the round trip time, chosen presently to be $3*RTT$, the NACK is updated to one below the next packet that can be sent, indicating that a packet has been missed. The ACK can then be incremented from the next available.
A point of interest is the \mintinline{go}{acksToSend} data structure. It can be seen that three methods are required: \mintinline{go}{Min()}, \mintinline{go}{PopMin()} and \mintinline{go}{Insert()} (in a section of code not shown in the pseudocode). A data structure that implements these methods particularly efficiently is the binary heap, providing Min in $O(1)$ time, with Insert and PopMin in $O(log n)$ time. Therefore, I implemented a binary heap to store the ACKs to send.
% ------------------------------- Security --------------------------------- %
\section{Security}
The security in this solution is achieved by providing a set of interfaces for potential cryptographic systems to implement. This can be seen in figure \ref{fig:message-authenticator-interface}. As with all interfaces, the goal here was to create a flexible but minimal interface.
\begin{figure}
\inputminted{go}{Implementation/Samples/mac.go}
\caption{Message authenticator interface}
\label{fig:message-authenticator-interface}
\end{figure}
As far as is possible, the security of the application relies on external libraries. Although an interesting exercise, implementing security algorithms directly from papers is far more likely to result in errors and thus security flaws. Due to this, I will be using trusted and open source libraries for the scheme I have chosen.
\subsection{Message Authentication Algorithms}
The shared key algorithm I chose to implement is BLAKE2s (Aumasson et al., “BLAKE2.”, \cite{hutchison_blake2_2013}). It is extremely fast (comparable to MD5) while remaining cryptographically secure. Further to this, BLAKE2s is available in the Go \verb'crypto' library\footnote{\url{https://github.com/golang/crypto}}, which is a trusted and open source implementation.
By appending a timestamp to the packet prior to computing the MAC, a guarantee of freshness can be included.
\subsection{Implementation Details}
\subsubsection{UDP}
By computing the message authentication code for the entire application controlled segment of the message, including the congestion control header, UDP meets the initial security criteria with no further work. Given that the beginning of a flow includes the congestion control header, it ensures that no flow is started without proving the partner's identity.
\subsubsection{TCP}
TCP requires further work than UDP, as the TCP handshake is out of the control of the application. Therefore, a flow level handshake must be completed beyond the initial, for the threat model to be satisfied.
TODO
\subsection{Repeated Packets}
\label{section:implementation-repeated-packets}
As discussed in section \ref{section:preparation-repeated-packets}, care must be taken to avoid a bad actor repeating packets. Although some degree of freshness is provided by including a timestamp in the hashed packet data, it is inadequate in the case of a determined attacker. To resolve this, a data structure is built, given in figure \ref{fig:data-structure-hash-store}. The data structure has a focus on execution speed, as this check could quickly become costly. To achieve this, the data structure combines both a map, implemented efficiently in the Go runtime, and a Binary Heap. This provides the time complexity given in figure \ref{fig:time-complexity-hash-store}. The code given, although almost syntactically correct, is pseudocode: Go's lack of support for generic programming unfortunately means that \verb'BinaryHeap[*packetStore]' is not valid Go code, and must be replaced with a data structure explicitly for that type, such as \verb'PacketStoreBinaryHeap', written specifically for the \verb'*packetStore' type.
\begin{figure}
\begin{minted}{go}
type packetStore struct {
h [16]byte
t time.Time
}
type HashStore struct {
m map[[16]byte]*packetStore
h BinaryHeap[*packetStore]
}
func (s *HashStore) Size() int {
return len(s.m)
}
func (s *HashStore) RemoveOldest() {
o := h.Pop()
delete m[o.h]
}
func (s *HashStore) Store(h [16]byte, t time.Time) (existed bool) {
p, found := s.m[h]
if found {
p.t = t
s.h.Update(p)
return true
}
p := &packetStore{h: h, t: t}
s.m[h] = p
s.h.Insert(p)
return false
}
\end{minted}
\caption{Pseudocode for the implementation of HashStore, a mechanism of storing a number of most recent hashes in a Set}
\label{fig:data-structure-hash-store}
\end{figure}
\begin{figure}
\centering
\begin{tabular}{c|c|c}
Method & Fresh Packet & Repeated Packet \\
\hline
Store & $O(1)$ & $O(n)$ \\
\hline
Size & \multicolumn{2}{c}{$O(1)$} \\
\hline
RemoveOldest & \multicolumn{2}{c}{$O(log n)$} \\
\hline
\end{tabular}
\caption{Caption}
\label{fig:time-complexity-hash-store}
\end{figure}
In the stable state, each of \verb'Store', \verb'Size' and \verb'RemoveOldest' are called for each received packet. Each packet will first call Store, before diverging based on freshness. A repeated packet will update the store, at a cost of $O(n)$ - this is due to the cost of traversing the heap in order to find the correct entry to heapify from. Although this is a particularly expensive operation, taking linear time, this is deemed acceptable. This is for two reasons: the average case is far better, and the code can react in other ways to prevent repeat packets. The average case is better due to the layout of a heap - the least least item in a heap must be in only a few positions.

View File

@ -1,43 +0,0 @@
%!TEX root = ../thesis.tex
%*******************************************************************************
%*********************************** First Chapter *****************************
%*******************************************************************************
\chapter{Introduction} %Title of the First Chapter
\ifpdf
\graphicspath{{Introduction/Figs/Raster/}{Introduction/Figs/PDF/}{Introduction/Figs/}}
\else
\graphicspath{{Introduction/Figs/Vector/}{Introduction/Figs/}}
\fi
Most UK residential broadband speeds receive broadband speeds advertised at between 30Mbps and 100Mbps download (Ofcom, “UK Home Broadband Performance.”, \cite{ofcom_performance_2020}). However, it is often possible to have multiple low bandwidth connections installed. More generally, a wider variety of Internet connections for fixed locations are becoming available with time. These include: DSL, Fibre To The Premises, 4G, 5G, Wireless ISPs such as LARIAT and Low Earth Orbit ISPs such as Starlink. This work focuses on a method of providing an aggregate link from multiple distinct connections, regardless of their likeness.
\section{Existing Work}
\subsection{MultiPath TCP}
MultiPath TCP (Handley et al., “TCP Extensions for Multipath Operation with Multiple Addresses.”, \cite{handley_tcp_2020}) is an extension to the regular Transmission Control Protocol, allowing the creation of subflows. MultiPath TCP was designed with two purposes: increasing resiliency and throughput for multi-homed mobile devices, and providing multi-homed servers with better control over balancing flows between their interfaces.
The first reason that MPTCP does not satisfy the motivation for this project is temporal. MPTCP is most effective at creating flows when the device has distinct interfaces or IP addresses. In the case of an IPv4 home connection, it is often the case that a single IPv4 address is provided to the home. This leads to the use of NAT for IPv4 addresses behind the router. If an MPTCP capable device lies behind a NAT router which has two external IPv4 addresses, the device itself will have no knowledge of this. This can be solved for server devices by having an IP on each NAT, but does not provide a good solution for more standard devices. These range from WiFi interfaces to IP phones and smart televisions.
TODO: IPv6 autoconf wrt. multihoming
Further, it is important to remember legacy devices. Many legacy devices will never support IPv6, and will never support MPTCP. Though it is possible that these devices will not require the performance benefits available from multiple Internet connections, it is likely that they would benefit particularly from a more reliable connection. Being able to apply speed and reliability benefits to an entire network without control over every device on it is a significant benefit to the solution provided in this work.
The second reason that MPTCP may not provide the change to the Internet that was once hoped is the UDP based protocols that are coming into existence. Although MPTCP is now making its way into the Linux kernel, many services are switching to lighter UDP protocols such as QUIC. The most interesting example of this is HTTP/3, which was previously known as HTTP over QUIC. This shift to application controlled network connections which do not contain unnecessary overhead for each specific application seems to be the direction that the Internet is going in, but suggests that it will be a very long time before even modern applications can benefit from multipathing.
TODO: Find a study on how many of the connections on the Internet are TCP or UDP, particularly over time
\section{Aims}
This project aimed to provide a method of combining a variety of Internet connections, such as the situations listed above.
When combining Internet connections, there are three main measures that one can prioritise: throughput, resilience and latency. This project aimed to provide throughput and resilience at the cost of latency. This is achieved by inserting additional items into the network stack, in order to split/combine over bottlenecks, as seen in figure \ref{fig:combining-bottlenecks}.
\begin{figure}
\centering
\includegraphics[width=\linewidth]{Bottlenecks}
\caption{A high level overview of the bottlenecks that are combined in this solution.}
\label{fig:combining-bottlenecks}
\end{figure}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

View File

@ -1,8 +0,0 @@
%!TEX root = ../thesis.tex
% ********************** Thesis Appendix B - Outbound Graphs *************************
\chapter{Outbound Graphs}
\label{appendix:outbound-graphs}
The graphs shown in the evaluation section are Inbound to the Client (unless otherwise specified).
This appendix contains the same tests but Outbound from the client.

View File

@ -932,7 +932,14 @@ wish to left align your text}
\thispagestyle{empty}
% Author
\vspace*{-5.5em}
\hspace{14em}
{\usebox{\PHD@author}}
\vspace*{6.5em}
\begin{singlespace}
\begin{center}
% University Crest Long if college crest is defined
@ -957,12 +964,6 @@ wish to left align your text}
\vspace{.15\PHD@titlepagespacing}
\fi
% Author
{\usebox{\PHD@author}}
\vspace*{1em}
% Supervisor
\ifPHD@supervisor%
{\usebox{\PHD@supervisor}}

View File

@ -5,7 +5,7 @@
% Set {innerside margin / outerside margin / topmargin / bottom margin} and
% other page dimensions
\ifsetCustomMargin
\RequirePackage[left=37mm,right=30mm,top=35mm,bottom=30mm]{geometry}
\RequirePackage[left=20mm,right=20mm,top=32mm,bottom=20mm]{geometry}
\setFancyHdr % To apply fancy header after geometry package is loaded
\fi
@ -67,7 +67,7 @@
% *************************** Graphics and figures *****************************
%\usepackage{rotating}
\usepackage{rotating}
%\usepackage{wrapfig}
% Uncomment the following two lines to force Latex to place the figure.
@ -85,9 +85,12 @@
\usepackage{bytefield}
\usepackage{rotating}
\usepackage{dpfloat}
\usepackage{dirtree}
\usepackage{svg}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usetikzlibrary{shapes.multipart}
% ********************************** Tables ************************************
\usepackage{booktabs} % For professional looking tables
@ -97,7 +100,6 @@
%\usepackage{longtable}
\usepackage{tabularx}
% *********************************** SI Units *********************************
\usepackage{siunitx} % use this package module for SI units
@ -111,6 +113,7 @@
% \onehalfspacing
% \singlespacing
\setstretch{1.0}
% ************************ Formatting / Footnote *******************************
@ -199,12 +202,15 @@
%\ifsetDraft
% \usepackage[colorinlistoftodos]{todonotes}
% \newcommand{\mynote}[1]{\todo[author=kks32,size=\small,inline,color=green!40]{#1}}
% \newcommand{\mynote}[1]{\todo[author=jsh77,size=\small,inline,color=green!40]{#1}}
%\else
% \newcommand{\mynote}[1]{}
% \newcommand{\listoftodos}{}
%\fi
\usepackage[colorinlistoftodos]{todonotes}
\newcommand{\mynote}[1]{\todo[author=jsh77,size=\small,inline,color=orange!40]{#1}}
% Example todo: \mynote{Hey! I have a note}
% ******************************** Highlighting Changes **********************************

View File

@ -1,379 +0,0 @@
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{Preparation}
\ifpdf
\graphicspath{{Preparation/Figs/Raster/}{Preparation/Figs/PDF/}{Preparation/Figs/}}
\else
\graphicspath{{Preparation/Figs/Vector/}{Preparation/Figs/}}
\fi
Proxying packets is the process of taking packets that arrive at one location and transporting them to leave at another. This chapter focuses on the preparatory work to achieve this practically and securely, given the design outlined in the previous chapter, in which the proxy consolidates multiple connections to appear as one to both the wider Internet and devices on the local network. In sections \ref{section:risk-analysis} to \ref{section:preparation-security}, I discuss the security risks and plans to confront them. In section \ref{section:language-selection}, I present three languages: Go, Rust and C++, and provide context for choosing Go as the implementation language. Finally, in sections \ref{section:requirements-analysis} and \ref{section:engineering-approach}, I present a requirements analysis and a description of the engineering approach for the project.
% ---------------------------- Risk Analysis ------------------------------- %
\section{Risk Analysis}
\label{section:risk-analysis}
Proxying a network connection via a Remote Portal creates an expanded set of security risks than connecting directly to the Internet via a modem. In this section, I will analyse these risks, in both isolation, and compared to the case of connecting directly.
Firstly, this analysis focuses on transparent security. This is the case of the Local Portal and Remote Portal, with everything in between, being viewed as an Internet connection. The focus is on how the risks compare to that of a standard Internet connection, and what guarantees must be made to reach the same set of risks.
Secondly, this section focuses on the connection between the Local Portal and the Remote Portal. This section focuses primarily on the risks of incorrectly validating authenticity of connections and packets.
These security problems will be considered in the context of the success criteria: provide security no worse than not using this solution at all. That is, the security should be identical or stronger than the threats in the first case, and provide no additional vectors of attack in the second.
\subsection{Transparent Security}
A convenient factor of the Internet being an interconnected set of smaller networks is that there are very few guarantees of security. At layer 3, none of anonymity, integrity, privacy or freshness are provided, so it is up to the application to ensure its own security on top of this lack of guarantees. For the purposes of this software, this is very useful: if there are no guarantees to maintain, applications can be expected to act correctly regardless of how easy it is for these cases to occur.
Therefore, to maintain the same level of security for applications, it is sufficient to guarantee that the set of packets which leave the Remote Portal is a subset of those that entered the Local Portal, and vice versa. In such a case, all of the security implemented above Layer 3 will be maintained. This means that whether a user is accessing insecure websites over HTTP, running a corporate VPN connection or sending encrypted emails, the security of these applications will be unaltered.
\subsection{Portal to Portal Communication}
\subsubsection{Cost}
Many Internet connections have limits or charges for bandwidth usage. In a standard network, the control of your cap is physical, in that, if someone wished to increase the load, they would have to physically connect to the modem.
Due to this, it is important that care is taken with regards to cost, as rather than needing physical access, all one needs is Internet access to send data through your connection. A conceivable threat is for someone to send packets to your Remote Portal from their own connection, causing the Portal to forward these packets, and thus using your limited or costly bandwidth.
\subsubsection{Denial of Service}
\label{subsubsection:threats-denial-of-service}
Proxying packets in this way provides a new method of Denial of Service. If an attacker can convince either Portal to send them a portion of the packets due for the other portal, the packet loss of the overall connection will increase immensely. Consider a pair of portals with two equal flows between them. If an attacker connects a third equal flow, a third of the packets are now, from the perspective of the clients behind the other portal, lost. For connections that rely on packet loss for congestion control, they will permanently believe that they are sending too many packets, and thus slow down hugely. Given the quantity of flows on the Internet that use loss based congestion control, this will perform an effective Denial of Service, with very little bandwidth.
\subsection{Privacy}
Though the packets leaving a modem have no reasonable expectation of privacy, having the packets enter the Internet at two points does increase this vector. However, this is equivalent to your packets taking a longer route through the Internet, with more hops. Therefore, comparatively, this is not worse.
Further, if an attacker convinces the Remote Portal that they are a valid connection from the Local Portal, a portion of packets will be sent to them. However, as a fortunate side effect, this method to attempt sniffing would cause a significant Denial of Service to any congestion controlled links based on packet loss, due to the amount of packet loss caused. Therefore, as long as it is ensured that each packet is not sent to multiple places, privacy should be maintained at a similar level to simple Internet access.
% ----------------------------- Threat Model ------------------------------- %
\section{Threat Model}
\label{section:threat-model}
In this section, we discuss a set of threats that expose the risk discussed in section \ref{section:risk-analysis}.
\subsection{Stealing Packets}
This section focuses on an attacker that exploits the connection between the Local Portal and Remote Portal to prevent packets sent from one side arriving at the other. Recall, as stated in the Risk Analysis section, that this is high risk - taking packets causes significant packet loss and thus effectively denies service for loss based congestion control mechanisms.
Methods such as cutting cables, which would apply equally without this solution, are excluded. This solution prioritises resilience in such a case, and would lose less packets than either connection by themselves, so the risk is less than or equal to a solitary connection.
\subsubsection{Unauthenticated Flows}
If the flows provide no additional authentication, it is trivial for an attacker to create a flow of their own and connect.
\subsubsection{Reflection Attacks}
\label{section:reflection-attacks}
An attack vector for someone attempting to read but not write packets is a reflection attack. A reflection attack is an attack where an attacker is able to provide the challenge you have given either to the victim itself or a friend of the victim, to receive the correct challenge response \citep[pp. 76-78]{anderson_security_2008}.
\begin{align*}
A \longrightarrow M &: N \\
M \longrightarrow A &: N \\
A \longrightarrow M &: \{N\}_k \\
M \longrightarrow A &: \{N\}_k
\end{align*}
An attacker, $M$, can use a reflection attack to begin a flow using a shared key system for authentication. This would cause the far side, in the above exchange $A$, to trust $M$ for the remainder of their flow. $M$ would then receive a portion of the packets dispatched by $A$, as a function of $M$'s bandwidth over the total bandwidth of $A$'s receivers.
\subsection{Integrity Attacks}
These threats resolve around an actor sending packets of their own design to one of the servers, such that the server proxies it. Aimed at the Local Portal, this is a non-issue (anyone could achieve the same by simply sending a packet to the Remote Portal's public interface), so this will be focused on a bad actor sending packets outbound via the Remote Portal.
\subsubsection{Simple Packets with Unauthenticated Flows}
If packets are sent with no additional security data, using either TCP, UDP, or raw IP, it is trivial for a new attacker to create a flow and generate the correct structure to send their own packets.
\subsubsection{Simple Packets with Authenticated Flows}
If packets are sent with no additional security data, but the flows are authenticated, sending packets becomes more complex. However, the threat is still very much open - using a Man in the Middle attack and layer 4 knowledge, one can inject packets to an existing flow. With TCP or UDP packets and an equal length tunnelled packet, it is simply a case of swapping the packet and correcting the checksums.
\subsection{Replay Attacks}
An attacker having the ability to cause the Remote Portal to resend a packet relates to the Cost section of the Risk Analysis given above. As each packet forwarded has an essentially fixed cost, repeating these packets one or many times directly costs the subject. This threat exists if a message does not successfully guarantee freshness.
\subsubsection{Man in the Middle}
This threat is based on an attacker wishing to force cost upon you. In the example layout given in figure \ref{fig:mitm-middlebox}, the middlebox can sniff the packets from somewhere on the path(s) between the Local Portal and the Remote Portal. In this case, the middlebox could increase the traffic leaving the remote portal by a factor of 100 at full load, or even more at less load, increasing the cost by a factor of 100 or more.
\begin{figure}
\centering
\includegraphics[width=12cm]{Middlebox.png}
\caption{A middlebox placed to perform a repeating packet man in the middle attack between a local and remote portal}
\label{fig:mitm-middlebox}
\end{figure}
% ------------------------------- Security --------------------------------- %
\section{Security}
\label{section:preparation-security}
This section provides means of confronting the threats given in section \ref{section:threat-model}, in order to alleviate the additional risk of proxying traffic.
\subsection{Message Authentication}
To provide integrity and authentication for each message, I evaluate two choices: Message Authentication Codes (MACs) or Digital Signatures. A MAC combines the data with a shared key using a specific method, before using a one-way hash function to generate a message authentication code, and thus the result is only verifiable by someone with the same private key \citep[pp. 352]{menezes_handbook_1997}. Producing a digital signature for a message uses the private key in public/private keypair to produce a digital signature for a message, proving that the message was produced by the owner of the private key, which can be verified by anyone with the public key \citep[pp. 147-149]{anderson_security_2008}. In both cases, the message authentication code is appended to the message, such that the integrity and authenticity of the message can be verified.
The comparison is as such: signatures provide non-repudiation, while MACs do not - one can know the owner of which private key signed a message, while anyone with the shared key could have produced an MAC for a message. The second point is that digital signatures are much more computationally complex than MACs, and thus, given that the control of both ends lies with the same party, MAC is the message authentication of choice for this project.
\subsection{IP Authentication Header}
The security requirements for this project are equivalent to those provided by the IP Authentication Header \citep{kent_ip_2005}. The IP authentication header operates between IP and the transport layer, using IP protocol number 51. The authentication header uses a hash function and a secret shared key to provide an Integrity Check Value. This check value covers all immutable parts of the IP header, the authentication header itself, and the data below the authentication header. Combined, this provides connectionless integrity and authenticity, as the IP header is authenticated. Further, the header contains a sequence number, which is used to prevent replay attacks.
Unfortunately, there are two reasons why this solution cannot be used: difficulties with NAT traversal, and inaccessibility for user-space programs. As the IP packet provides integrity for the source and destination addresses, any NAT that alters these addresses violates the integrity of the packet. Although NAT traversal is not an explicit success criteria for this project, it is implicit, as the flexibility of the project for different network structures is a priority, including those where NAT is unavoidable. The second is that IP authentication headers, being an IP protocol and not transport layer, would cause issues interacting with user-space programs. Given that the first implementation of transport is completed using TCP, having IP Authentication Headers would require the user-space program to handle the TCP connection without the aid of the kernel, complicating multiplexing and being an unsupported setup.
Overall, using the IP authentication header would function similarly to running over a VPN, described in section \ref{section:layered-security}. Although this will be a supported configuration, the shortfalls mean that it will not be the base implementation. However, inspiration can be taken from the header structure, shown in figure \ref{fig:ip-auth-header-structure}.
\begin{figure}
\centering
\begin{bytefield}[bitwidth=0.8em]{32}
\bitheader{0-31} \\
\bitbox{8}{Next Header} & \bitbox{8}{Payload Len} & \bitbox{16}{Reserved} \\
\wordbox{1}{Security Parameters Index} \\
\wordbox{1}{Sequence Number} \\
\wordbox[tlr]{1}{Integrity Check Value} \\
\wordbox[blr]{1}{$\cdots$}
\end{bytefield}
\caption{IP authentication header structure}
\label{fig:ip-auth-header-structure}
\end{figure}
It is first important to note the differences between the use of IP authentication headers and the security footers used in this application. Firstly, the next header field is unnecessary, given that headers are not being chained. Secondly, given the portals having a fixed security configuration by static configuration, the payload length field is unnecessary - the payloads will always be of a predetermined length. Similarly, the security parameters index is unnecessary, as the parameters will be equal.
The difference in security arises from the lack of integrity given to the fields above the application layer. That is, the IP header itself, and the TCP or UDP header. However, there is an important distinction between the TCP and UDP cases: TCP congestion control will not be covered by any application provided security, while the UDP congestion control will. That is, this application can do nothing to authenticate the ACKs of a TCP connection, as these are created outside of the control of the application. As such, the TCP implementation provided by the solution should be used in one of two ways: as a baseline test for the performance of other algorithms, or taking advantage of layered security as given in section \ref{section:layered-security}. The rest of this section will therefore focus on securing the UDP transport.
Further differences arising from the lack of integrity above the application layer still apply to UDP transport. Although the congestion control layer and therefore packet flow is authenticated, the source and destination of packets are not.
\subsubsection{Initial Exchange}
An initial exchange at the beginning of a flow must take place for the security of this application. However, one must be wary of the implications of reflection attacks, mentioned in section \ref{section:reflection-attacks}. Given is the chosen authentication exchange for this project:
\begin{align*}
A \longrightarrow B &: \{N_1, A, T_0\}_k \\
B \longrightarrow A &: \{N_1, A, N_2, B, T_1\}_k \\
A \longrightarrow B &: \{A, B, N_2, T_2\}_k
\end{align*}
The initial message between $A$ and $B$ is comprised of the following: a nonce, $A$'s identity, and the current time. The nonce is a number used once, ensuring that the initiation message is fresh. $A$ includes its identity to prevent reflection attacks - if $B$ is talking to $C$ and yet the message includes $A$'s identity, it can discard the message. The timestamp is used to cheaply discard messages that are not fresh, and to reduce the storage of nonces, as only nonces within the recency limit need be stored. Further, the timestamp places a time limit on this exchange, allowing flows that have not completed the exchange to be terminated at a fixed time. Finally, the MAC of the message is included, demonstrating that $A$ possesses either the correct key, or is replaying the message.
The second message is a similarly formatted response. The response from $B$ to $A$ includes $A$'s chosen nonce and $A$'s identity. Then, $B$ chooses a nonce and includes its identity. The current timestamp is again appended, for the same purpose. This demonstrates that $B$ can produce a MAC for the arbitrary nonce $A$ has selected, knows that it is speaking to $A$, knows that it is $B$, and provides a nonce of its own choice for $A$ to demonstrate their ability to authenticate.
$B$ has multiple checks to perform before replying to $A$'s request. If the message is not sufficiently fresh based on its timestamp, $B$ immediately discards it, at very little computational expense. Secondly, the MAC can be verified, and if it's not correct, the flow discarded. Finally, the identity of $A$ can be verified against the identifier that it has provided in the message. It is chosen that the identifier $A$ provides should be either the IP address through which it contacts $B$, or an FQDN that resolves to that. This allows a factor of the connection to be compared to the identity, confirming that the details of the connection align with the provided identity. This demonstrates the cryptographic principle of \emph{silence is a virtue}.
Finally, $A$ responds to $B$ with both their identities, $B$'s chosen nonce and the current time. Prior to sending this message, $A$ can be confident that $B$'s identity is correct. $A$ performs the same checks as $B$ previously before responding, terminating the flow in the case of a bad response. The timestamp and MAC are checked, the values from the previous message verified to be equal, and $B$'s identity compared to that expected. Once these are verified, $A$ is confident that it is talking to $B$, so responds to the message with enough information to confirm to $B$ that it knows who its talking to, $B$'s nonce, and the timestamp and key. This concludes the exchange.
It is possible to create shorter crpytographic exchanges, but for this project it is not necessary. The flows generated by this project are very long lived, and as such, the constant length of the initial exchange is amortised. Therefore, keeping the exchange clear and simple is reasonable.
\subsubsection{Message Passing}
After authentication, each exchange is then a simple case of providing authenticity for each packet sent. This is as follows, in both directions:
\begin{align*}
A \longrightarrow B &: \{P_0, T_0\}_k \\
B \longrightarrow A &: \{P_1, T_1\}_k
\end{align*}
This simplicity in messages is chosen to provide a good balance of freshness and per-packet efficiency. Further, it is a composable system. As this project supports multiple transport mechanisms, the $P_0$ can be expanded as necessary, to provide integrity, authenticity and freshness to additional data. An example of this is in figure \ref{fig:udp-packet-structure}, where $P$ includes the congestion control of each message, preventing attacks that would require modifying the congestion control header. However, the freshness guarantee provided by the timestamp here is too weak for some attacks, so is discussed in the next section.
\subsection{Replay Attacks}
\label{section:preparation-repeated-packets}
Although a timestamp is included with each packet, the time delay between the packet being dispatched by one side and received by the other is significant. As this is the case, there must be significant flexibility in how old a received packet can be - chosen to be 5 seconds. An attacker, as pictured in figure \ref{fig:mitm-middlebox}, could therefore send a number of packets only limited by their own bandwidth, if they can gain a fresh packet at least once every 5 seconds.
To avoid this case, additional measures must be taken to avoid proxying repeated packets. The solution I have chosen is the \emph{IPsec Anti-Replay Algorithm without Bit Shifting} \citep{tsou_ipsec_2012}, employed in Wireguard \citep{donenfeld_wireguard_2017}. The implementation of this is given in section \ref{section:implementation-repeated-packets}.
The sliding window technique requires each packet to have a strictly increasing sequence number. This takes advantage of the composable structure mentioned above - the sequence number can be placed within the packet sent. The sequence number here must be globally unique within the connection, and thus is not equivalent to the independent sequence number of TCP or UDP flows. This is similar to the issue given in congestion control for multipath TCP, where a second sequence number must be added, named the data sequence number. The data sequence number provides a separation between the loss control of indvidual subflows and the data transfer of the flow as a whole \citep[pp. 11]{wischik_design_2011}.
\subsection{Layered Security}
\label{section:layered-security}
It was previously mentioned that this solution focuses on providing transparent security for the proxied packets. Further to this, this solution provides transparent security in the other direction. Consider the case of a satellite office that employs both a whole network corporate VPN and this solution. The network can be configured in each of two cases: the multipath proxy runs behind the VPN, or the VPN runs behind the multipath proxy.
These two examples are given in figures \ref{fig:whole-network-vpn-behind} and \ref{fig:whole-network-vpn-infront}, for the VPN Wireguard \citep{donenfeld_wireguard_2017}. In this setup, it is assumed that the portals are only accessible via the VPN protected network. It can be seen that the packet in figure \ref{fig:whole-network-vpn-infront} is shorter, given the removal of the message authentication code and the data sequence number. The data sequence number is unnecessary, given that Wireguard uses the same anti-replay algorithm, and thus replayed packets would have been caught entering the secure network. Further, the message authentication code is unnecessary, as the authenticity of packets is now guaranteed by Wireguard.
Supporting and encouraging this layering of protocols provides a second benefit: if the security in this solution breaks with time, there are two options to repair it. One can either fix the open source application, or compose it with a security solution that is not broken, but perhaps provides extraneous security guarantees and therefore causes reduced performance. To this end, the security features mentioned will all be configurable. This allows for flexibility in implementation.
\begin{figure}
\begin{leftfullpage}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$} \\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Proxied\\Wireguard\\Packet}
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$} \\
\begin{leftwordgroup}{UDP Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{leftwordgroup} \\
\begin{leftwordgroup}{Wireguard\\Header}
\bitbox{8}{type} & \bitbox{24}{reserved} \\
\wordbox{1}{receiver} \\
\wordbox{2}{counter}
\end{leftwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords\\
\wordbox[blr]{1}{}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Security\\Footer}
\bitbox{32}{Data sequence number} \\
\wordbox[tlr]{1}{Message authentication code} \\
\wordbox[blr]{1}{$\cdots$}
\end{rightwordgroup}
\end{bytefield}
\caption{A Wireguard client behind the multipath proxy.}
\label{fig:whole-network-vpn-behind}
\end{leftfullpage}
\end{figure}
\begin{figure}
\begin{fullpage}
\centering
\begin{bytefield}[bitwidth=0.6em]{32}
\bitheader{0-31} \\
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$}\\
\begin{rightwordgroup}{UDP\\Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Wireguard\\Header}
\bitbox{8}{type} & \bitbox{24}{reserved} \\
\wordbox{1}{receiver} \\
\wordbox{2}{counter}
\end{rightwordgroup} \\
\begin{rightwordgroup}{Tunnelled\\Proxy\\Packet}
\wordbox[tlr]{1}{IPv4 Header} \\
\wordbox[blr]{1}{$\cdots$}\\
\begin{leftwordgroup}{UDP Header}
\bitbox{16}{Source port} & \bitbox{16}{Destination port} \\
\bitbox{16}{Length} & \bitbox{16}{Checksum}
\end{leftwordgroup} \\
\begin{leftwordgroup}{CC\\Header}
\bitbox{32}{Acknowledgement number} \\
\bitbox{32}{Negative acknowledgement number} \\
\bitbox{32}{Sequence number}
\end{leftwordgroup} \\
\wordbox[tlr]{1}{Proxied IP packet} \\
\skippedwords\\
\wordbox[blr]{1}{}
\end{rightwordgroup}
\end{bytefield}
\caption{A Wireguard client in front of the multipath proxy.}
\label{fig:whole-network-vpn-infront}
\end{fullpage}
\end{figure}
% -------------------------- Language Selection ---------------------------- %
\section{Language Selection}
\label{section:language-selection}
In this section, I evaluate three potential languages (C++, Rust and Go) for the implementation of this software. To support this evaluation, I have provided a sample program in each language. The sample program is intended to be a minimal example of reading packets from a TUN interface, placing them in a queue from a single thread, and consuming the packets from the queue with multiple threads. These examples are given in figures \ref{fig:cpp-tun-sample} through \ref{fig:go-tun-sample}. The primary considerations will be the performance of the language, clarity of code of the style needed to complete this software, and the ecosystem of the language. This culminates in choosing Go for the implementation language.
Alongside the implementation language, a language is chosen to evaluate the implementation. Two potential languages are considered here, Python and Java. Though Python was initially chosen for rapid development and better ecosystem support, the final result is a combination of both Python and Java - Python for data processing, and Java for systems interaction.
\subsection{Implementation Languages}
\subsubsection{C++}
There are two primary advantages to completing this project in C++: speed of execution, and C++ being low level enough to achieve these goals. The negatives of using C++ are demonstrated in the sample script, given in figure \ref{fig:cpp-tun-sample}, where it is immediately obvious that to achieve even the base of this project, the code in C++ is multiple times the length of equivalent code in either Rust or Go, at 93 lines compared to 34 for Rust or 48 for Go. This difference arises from the need to manually implement the required thread safe queue, while it is available as a library for both Rust and Go, and can be handled by the respective package managers. This manual implementation gives rise to additional risk of incorrect implementation, specifically with regards to thread safety, that could cause undefined behaviour and great difficulty debugging.
The lack of memory safety in C++ is a significant negative of the language. Although C++ would provide increased performance over a language such as Go with a runtime, it is avoided due to the massive incidental complexity of manual memory management and the difficulty of manual thread safety.
\subsubsection{Rust}
Rust is memory safe and thread safe, solving the latter issues with C++. Rust also has no runtime, allowing for similar execution speed, comparable to C or C++. The Rust sample is given in figure \ref{fig:rust-tun-sample}, and it is pleasantly concise.
For the purposes of this project, the downsides of Rust come from its youthfulness. This is two-faceted: IDE support and Crate stability. Firstly, the IDE support for Rust in my IDEs of choice is provided via a plugin to IntelliJ, and is not as well supported as many other languages. Secondly, the crate available for TUN support (tun-tap\footnote{\url{https://docs.rs/tun-tap/}}) does not yet provide a stable API, which was noticed during the development of even this test program. Between writing the program initially and re-testing it to place in this document, the API of the Crate had changed to the point where my script no longer type checked. Further, the old version had disappeared, and thus I was left with a program that didn't compile or function. Although writing the API for TUN interaction is not an issue, the safety benefits of Rust would be less pronounced, as the direct systems interaction would require unsafe code, leading to an increased potential for bugs.
\subsubsection{Go}
The final language to evaluate is Go, often written as GoLang. The primary difference between Go and the other two evaluated languages is the presence of a runtime. Regardless, it is the language of choice for this project, with a sample provided in figure \ref{fig:go-tun-sample}. Go is significantly higher level than the other two languages mentioned, and provides a memory management model that is both simpler than C++ and more standard than Rust.
For the greedy structure of this project, Go's focus on concurrency is extremely beneficial. Go has channels in the standard runtime, which support any number of both producers and consumers. In this project, both SPMC (Single Producer Multi Consumer) and MPSC (Multi Producer Single Consumer) queues are required, so having these provided as a first class feature of the language is beneficial.
Garbage collection and first order concurrency come together to make the code produced for this project highly readable. The downside of this runtime is that the speed of execution is negatively affected. However, for the purposes of this first production, that compromise is acceptable. By producing code that makes the functionality of the application clear, future implementations could more easily be built to mirror it. Given the sample of speeds displayed in section (Ref Needed: Introduction Comments on Speed), and the performance shown in section \ref{section:performance-evaluation}, the compromise of using a well-suited high-level language is one worth taking.
\subsection{Evaluation Languages}
\subsubsection{Python}
Python is a dynamically typed, and was chosen as the initial implementation language. The first reason for this is \verb'matplotlib'\footnote{\url{https://matplotlib.org/}}, a widely used graphing library that can produce the graphs needed for this evaluation. The second reason is \verb'proxmoxer'\footnote{\url{https://github.com/proxmoxer/proxmoxer}}, a fluent API for interacting with a Proxmox server.
Having the required modules available allowed for a swift initial development sprint. This showed that the method of evaluation was viable and effective. However, the requirements of evaluation changed with the growth of the software, and an important part of an agile process is adapting to changing requirements. The lack of static typing limits the refactorability of Python, and becomes increasingly challenging as the project grows. Therefore, after the initial proof of concept, it became necessary to explore another language for the Proxmox interaction.
\subsubsection{Java}
Java is statically typed, and became the implementation language for all external interaction. One of the initial reasons for not choosing Java was the availability of an equivalent library to \verb'proxmoxer'. Although two libraries to interact with Proxmox are available for Java, one was released under an incompatible license, and the other does not have adequate type safety. To this end, to develop in Java, I would need to develop my own Proxmox library. However, after the initial development in Python, it became clear that this was a valuable use of time, and thus development began. By developing a type safe Proxmox API library, and having learnt from the initial development in Python, a clear path to producing the appropriate Java libraries was available.
However, as Python is an incredibly popular language for data processing, the solution was not to use purely Java. Given the graphing existed already in Python and worked perfectly well, a combined solution with Java gathering the data and Python processing it was chosen.
% ------------------------- Requirements Analysis -------------------------- %
\section{Requirements Analysis}
\label{section:requirements-analysis}
The requirements of the project are detailed in the Success Criteria of the Project Proposal (Appendix \ref{appendix:project-proposal}), and are the primary method of evaluation for project success. They are split into three categories: success criteria, extended goals and stretch goals.
The three categories of success criteria can be summarised as follows. The success criteria, or must have elements, are to provide a multi-path proxy that is functional, secure and improves speed and resilience in specific cases. The extended goals, or should have elements, are focused on increasing the performance and flexibility of the solution. The stretch goals, or could have elements, are aimed at increasing performance by reducing overheads, and supporting IPv6 alongside IPv4.
% ------------------------- Engineering Approach --------------------------- %
\section{Engineering Approach}
\label{section:engineering-approach}
\subsubsection{Software Development Model}
The development of this software followed the agile methodology. Work was organised into 2-7 day sprints, aiming for increased functionality in the software each time. By focusing on sufficient but not excessive planning, a minimum viable product was quickly established. From there, the remaining features could be extracted in the correct sized segments. Examples of these sprints are: initial build including configuration, TUN adapters and main program; TCP transport, enabling an end-to-end connection between the two parts; repeatable testing, providing the data to evaluate each iteration of the project against its success criteria; UDP for performance and control.
One of the most important features of any agile methodology is welcoming changing requirements \citep{beck_manifesto_2001}. As the project grew, it became clear where shortcomings existed, and these could be fixed in short sprints. An example is given in figure \ref{fig:changing-requirements}, in which the type of a variable was changed from \mintinline{go}{string} to \mintinline{go}{func() string}. This allowed for lazy evaluation, when it became clear that configuring fixed IP addresses or DNS names could be impractical with certain setups. The static typing in the chosen language enables refactors like this to be completed with ease, particularly with the development tools mentioned in the next section, reducing the incidental complexity of the agile methodology.
\begin{figure}
\centering
\begin{subfigure}[t]{0.45\textwidth}
\centering
\inputminted{go}{Preparation/Samples/string.go}
\caption{The structure with a fixed local address.}
\end{subfigure}
\begin{subfigure}[t]{0.45\textwidth}
\centering
\inputminted{go}{Preparation/Samples/funcstring.go}
\caption{The structure with a dynamic local address.}
\end{subfigure}
\caption{An example of refactoring for changing requirements.}
\label{fig:changing-requirements}
\end{figure}
\subsubsection{Development Tools}
A large part of the language choice focused on development tools. As discussed in section \ref{section:language-selection}, IDE support was important to me. Given that my preferred IDEs are those supplied by JetBrains\footnote{\url{https://jetbrains.com/}}, generously provided for education and academic research free of charge, I used GoLand for the Go development of this project, and PyCharm for the Python evaluation program. Using an intelligent IDE, particularly with the statically typed Go, significantly increases my productivity as a programmer, and thus reduces incidental complexity.
I used Git version control, with a self-hosted Gitea\footnote{\url{https://gitea.com/}} server as the remote. My repositories have a multitude of on- and off-site backups at varying frequencies (2xUSB + 2xDistinct Cloud Storage + NAS + Multiple Computers).
Alongside my self-hosted Gitea server, I have a self hosted Drone by Harness\footnote{\url{http://drone.io/}} server for continuous integration. This made it simple to add a Drone file to the repository, allowing for the Go tests to be run, formatting verified, and artifacts built. On a push, after the verification, each artefact is built and uploaded to a central repository, where it is saved for the branch name. This is particularly useful for automated testing, as the relevant artefact can be downloaded automatically from a known location for the branch under test. Further, artefacts can be built for multiple architectures, particularly useful when performing real world testing spread between x86\_64 and ARMv7 architectures.
\subsubsection{Licensing}
I have chosen to license this software under the MIT license. The MIT license is simple and permissive, enabling reuse and modification of the code, subject to including the license. Alongside the hopes that the code will receive updated pull requests over time, a permissive license allows others to build upon the given solution. A potential example of a solution that could build from this is a company employing a SaaS (Software as a Service) model to configure remote portals on your behalf, perhaps including the hardware required to convert this fairly involved solution into a plug-and-play option.
% ---------------------------- Starting Point ------------------------------ %
\section{Starting Point}
I had significant experience with the language Go before the start of this project, though not formally taught. My knowledge of networking is limited to that of a user, and the content of the Part IB Tripos courses \emph{Computer Networking} and \emph{Principles of Communication} (the latter given after the start of this project). The security analysis drew from the Part IA course \emph{Software and Security Engineering} and the Part IB course \emph{Security}. As the software is highly concurrent, the Part IB course \emph{Concurrent and Distributed Systems} and the Part II Unit of Assessment \emph{Multicore Semantics and Programming} were applied.
% -------------------------------- Summary --------------------------------- %
\section{Summary}
Security is a large area in this project - perhaps more than the single success criteria suggests. This preparation has led to two clear concepts in security: the system must be adaptable in code, and flexible in deployment. Being adaptable allows more options to be provided in the future, while deployment flexibility allows the solution to better fit into a network with special security requirements.
Go has a concurrency structure excellently suited to this project, and the large library reduces incidental complexity. Using a high level language allows for more readable code, which future implementations in a lower level language could effectively build off of. The structure of this project suggests a large initial program base, from which further features can be merged to reach the success criteria.

View File

@ -1,7 +0,0 @@
% ************************** Thesis Proforma **************************
\begin{proforma}
TODO
\end{proforma}

View File

@ -1,86 +1,64 @@
@inproceedings{donenfeld_wireguard_2017,
address = {San Diego, CA},
title = {{WireGuard}: {Next} {Generation} {Kernel} {Network} {Tunnel}},
isbn = {978-1-891562-46-4},
shorttitle = {{WireGuard}},
url = {https://www.ndss-symposium.org/ndss2017/ndss-2017-programme/wireguard-next-generation-kernel-network-tunnel/},
doi = {10.14722/ndss.2017.23160},
abstract = {WireGuard is a secure network tunnel, operating at layer 3, implemented as a kernel virtual network interface for Linux, which aims to replace both IPsec for most use cases, as well as popular user space and/or TLS-based solutions like OpenVPN, while being more secure, more performant, and easier to use. The virtual tunnel interface is based on a proposed fundamental principle of secure tunnels: an association between a peer public key and a tunnel source IP address. It uses a single round trip key exchange, based on NoiseIK, and handles all session creation transparently to the user using a novel timer state machine mechanism. Short pre-shared static keys—Curve25519 points—are used for mutual authentication in the style of OpenSSH. The protocol provides strong perfect forward secrecy in addition to a high degree of identity hiding. Transport speed is accomplished using ChaCha20Poly1305 authenticated-encryption for encapsulation of packets in UDP. An improved take on IP-binding cookies is used for mitigating denial of service attacks, improving greatly on IKEv2 and DTLSs cookie mechanisms to add encryption and authentication. The overall design allows for allocating no resources in response to received packets, and from a systems perspective, there are multiple interesting Linux implementation techniques for queues and parallelism. Finally, WireGuard can be simply implemented for Linux in less than 4,000 lines of code, making it easily audited and verified.},
@misc{postel_user_1980,
title = {User {Datagram} {Protocol}},
url = {https://tools.ietf.org/html/rfc768},
language = {en},
urldate = {2020-11-19},
booktitle = {Proceedings 2017 {Network} and {Distributed} {System} {Security} {Symposium}},
publisher = {Internet Society},
author = {Donenfeld, Jason A.},
year = {2017},
file = {Donenfeld - 2017 - WireGuard Next Generation Kernel Network Tunnel.pdf:/home/jake/Zotero/storage/6MEQYC9J/Donenfeld - 2017 - WireGuard Next Generation Kernel Network Tunnel.pdf:application/pdf},
urldate = {2021-03-01},
author = {Postel, J.},
month = aug,
year = {1980},
file = {Snapshot:/home/jake/Zotero/storage/5N2EGXIC/rfc768.html:text/html},
}
@inproceedings{hacker_effects_2002,
title = {The {Effects} of {Systemic} {Packet} {Loss} on {Aggregate} {TCP} {Flows}},
doi = {10.1109/SC.2002.10029},
abstract = {The use of parallel TCP connections to increase throughput for bulk transfers is common practice within the high performance computing community. However, the effectiveness, fairness, and efficiency of data transfers across parallel connections is unclear. This paper considers the impact of systemic non-congestion related packet loss on the effectiveness, fairness, and efficiency of parallel TCP transmissions. The results indicate that parallel connections are effective at increasing aggregate throughput, and increase the overall efficiency of the network bottleneck. In the presence of congestion related losses, parallel flows steal bandwidth from other single stream flows. A simple modification is presented that reduces the fairness problems when congestion is present, but retains effectiveness and efficiency.},
booktitle = {{SC} '02: {Proceedings} of the 2002 {ACM}/{IEEE} {Conference} on {Supercomputing}},
author = {Hacker, T. J. and Noble, B. D. and Athey, B. D.},
month = nov,
year = {2002},
note = {ISSN: 1063-9535},
keywords = {Aggregates, Bandwidth, Biology computing, Computer hacking, Concurrent computing, High performance computing, Internet, Loss measurement, Robustness, Throughput},
pages = {7--7},
file = {IEEE Xplore Full Text PDF:/home/jake/Zotero/storage/GGX3FAK6/Hacker et al. - 2002 - The Effects of Systemic Packet Loss on Aggregate T.pdf:application/pdf;IEEE Xplore Abstract Record:/home/jake/Zotero/storage/F9XVJNZS/1592843.html:text/html},
}
@misc{ofcom_performance_2020,
title = {The performance of fixed-line broadband delivered to {UK} residential customers},
shorttitle = {{UK} {Home} {Broadband} {Performance}},
url = {https://www.ofcom.org.uk/research-and-data/telecoms-research/broadband-research/home-broadband-performance-2019},
abstract = {Our annual home broadband performance report compares how different broadband packages perform, using data from monitors installed on people's broadband routers.},
@article{kohler_designing_nodate,
title = {Designing {DCCP}: {Congestion} {Control} {Without} {Reliability}},
abstract = {DCCP, the Datagram Congestion Control Protocol, is a new transport protocol in the TCP/UDP family that provides a congestion-controlled flow of unreliable datagrams. Delay-sensitive applications, such as streaming media and telephony, prefer timeliness to reliability. These applications have historically used UDP and implemented their own congestion control mechanisms—a difficult task—or no congestion control at all. DCCP will make it easy to deploy these applications without risking congestion collapse. It aims to add to a UDP-like foundation the minimum mechanisms necessary to support congestion control, such as possibly-reliable transmission of acknowledgement information. This minimal design should make DCCP suitable as a building block for more advanced application semantics, such as selective reliability. We introduce and motivate the protocol and discuss some of its design principles. Those principles particularly shed light on the ways TCPs reliable byte-stream semantics influence its implementation of congestion control.},
language = {en},
urldate = {2020-11-21},
journal = {Ofcom},
author = {Ofcom},
month = may,
year = {2020},
file = {Snapshot:/home/jake/Zotero/storage/437YQTVF/home-broadband-performance-2019.html:text/html;2020 - UK home broadband performance, measurement period .pdf:/home/jake/Zotero/storage/HPR3TALB/2020 - UK home broadband performance, measurement period .pdf:application/pdf},
author = {Kohler, Eddie and Handley, Mark and Floyd, Sally},
pages = {12},
file = {Kohler et al. - Designing DCCP Congestion Control Without Reliabi.pdf:/home/jake/Zotero/storage/KMDTAT3J/Kohler et al. - Designing DCCP Congestion Control Without Reliabi.pdf:application/pdf},
}
@article{peng_multipath_2016,
title = {Multipath {TCP}: {Analysis}, {Design}, and {Implementation}},
volume = {24},
issn = {1558-2566},
shorttitle = {Multipath {TCP}},
doi = {10.1109/TNET.2014.2379698},
abstract = {Multipath TCP (MP-TCP) has the potential to greatly improve application performance by using multiple paths transparently. We propose a fluid model for a large class of MP-TCP algorithms and identify design criteria that guarantee the existence, uniqueness, and stability of system equilibrium. We clarify how algorithm parameters impact TCP-friendliness, responsiveness, and window oscillation and demonstrate an inevitable tradeoff among these properties. We discuss the implications of these properties on the behavior of existing algorithms and motivate our algorithm Balia (balanced linked adaptation), which generalizes existing algorithms and strikes a good balance among TCP-friendliness, responsiveness, and window oscillation. We have implemented Balia in the Linux kernel. We use our prototype to compare the new algorithm to existing MP-TCP algorithms.},
number = {1},
journal = {IEEE/ACM Transactions on Networking},
author = {Peng, Q. and Walid, A. and Hwang, J. and Low, S. H.},
month = feb,
year = {2016},
note = {Conference Name: IEEE/ACM Transactions on Networking},
keywords = {Aggregates, Algorithm design and analysis, Asymptotic stability, balanced linked adaptation, Balia algorithm, Computer networks, convergence, Heuristic algorithms, Linux kernel, MP-TCP algorithms, multipath TCP, nonlinear dynamical systems, Oscillators, Stability analysis, TCPIP, transport protocols, Vectors, window oscillation},
pages = {596--609},
file = {IEEE Xplore Full Text PDF:/home/jake/Zotero/storage/9QTMKA3G/Peng et al. - 2016 - Multipath TCP Analysis, Design, and Implementatio.pdf:application/pdf;IEEE Xplore Abstract Record:/home/jake/Zotero/storage/S2L269MS/7000573.html:text/html},
}
@incollection{hutchison_blake2_2013,
address = {Berlin, Heidelberg},
title = {{BLAKE2}: {Simpler}, {Smaller}, {Fast} as {MD5}},
volume = {7954},
isbn = {978-3-642-38979-5 978-3-642-38980-1},
shorttitle = {{BLAKE2}},
url = {http://link.springer.com/10.1007/978-3-642-38980-1_8},
abstract = {We present the hash function BLAKE2, an improved version of the SHA-3 finalist BLAKE optimized for speed in software. Target applications include cloud storage, intrusion detection, or version control systems. BLAKE2 comes in two main flavors: BLAKE2b is optimized for 64-bit platforms, and BLAKE2s for smaller architectures. On 64bit platforms, BLAKE2 is often faster than MD5, yet provides security similar to that of SHA-3: up to 256-bit collision resistance, immunity to length extension, indifferentiability from a random oracle, etc. We specify parallel versions BLAKE2bp and BLAKE2sp that are up to 4 and 8 times faster, by taking advantage of SIMD and/or multiple cores. BLAKE2 reduces the RAM requirements of BLAKE down to 168 bytes, making it smaller than any of the five SHA-3 finalists, and 32\% smaller than BLAKE. Finally, BLAKE2 provides a comprehensive support for tree-hashing as well as keyed hashing (be it in sequential or tree mode).},
@misc{kent_ip_2005,
title = {{IP} {Authentication} {Header}},
url = {https://tools.ietf.org/html/rfc4302},
language = {en},
urldate = {2020-11-28},
booktitle = {Applied {Cryptography} and {Network} {Security}},
publisher = {Springer Berlin Heidelberg},
author = {Aumasson, Jean-Philippe and Neves, Samuel and Wilcox-OHearn, Zooko and Winnerlein, Christian},
editor = {Hutchison, David and Kanade, Takeo and Kittler, Josef and Kleinberg, Jon M. and Mattern, Friedemann and Mitchell, John C. and Naor, Moni and Nierstrasz, Oscar and Pandu Rangan, C. and Steffen, Bernhard and Sudan, Madhu and Terzopoulos, Demetri and Tygar, Doug and Vardi, Moshe Y. and Weikum, Gerhard and Jacobson, Michael and Locasto, Michael and Mohassel, Payman and Safavi-Naini, Reihaneh},
year = {2013},
doi = {10.1007/978-3-642-38980-1_8},
note = {Series Title: Lecture Notes in Computer Science},
pages = {119--135},
file = {Aumasson et al. - 2013 - BLAKE2 Simpler, Smaller, Fast as MD5.pdf:/home/jake/Zotero/storage/ZG25MG4B/Aumasson et al. - 2013 - BLAKE2 Simpler, Smaller, Fast as MD5.pdf:application/pdf},
urldate = {2021-01-29},
author = {Kent, Stephen},
month = dec,
year = {2005},
file = {Snapshot:/home/jake/Zotero/storage/8DTCGSYY/rfc4302.html:text/html},
}
@article{dolev_security_1983,
title = {On the {Security} of {Public} {Key} {Protocols}},
language = {en},
number = {2},
journal = {IEEE TRANSACTIONS ON INFORMATION THEORY},
author = {Dolev, Danny and Yao, Andrew C.},
year = {1983},
pages = {11},
file = {Dolev - 1983 - On the Security of Public Key Protocols.pdf:/home/jake/Zotero/storage/X6DEMNBM/Dolev - 1983 - On the Security of Public Key Protocols.pdf:application/pdf},
}
@misc{beck_manifesto_2001,
title = {Manifesto for {Agile} {Software} {Development}},
url = {http://agilemanifesto.org/},
urldate = {2021-01-29},
author = {Beck, Kent and Beedle, Mike and van Bekkenum, Arie and Cockburn, Alistair and Cunningham, Ward and Fowler, Martin and Grenning, James and Highsmith, Jim and Hunt, Andrew and Jeffries, Ron and Kern, Jon and Marick, Brian and Martin, Robert C. and Mellor, Steve and Schwaber, Ken and Sutherland, Jeff and Thomas, Dave},
year = {2001},
file = {Manifesto for Agile Software Development:/home/jake/Zotero/storage/93M8RQJR/agilemanifesto.org.html:text/html},
}
@book{menezes_handbook_1997,
address = {Boca Raton},
series = {{CRC} {Press} series on discrete mathematics and its applications},
title = {Handbook of applied cryptography},
isbn = {978-0-8493-8523-0},
publisher = {CRC Press},
author = {Menezes, A. J. and Van Oorschot, Paul C. and Vanstone, Scott A.},
year = {1997},
keywords = {Access control Handbooks, manuals, etc, Computers, Cryptography, Handbooks, manuals, etc},
}
@techreport{dworkin_recommendation_2005,
@ -212,7 +190,7 @@
author = {Wischik, Damon and Handley, Mark and Raiciu, Costin},
editor = {Núñez-Queija, Rudesindo and Resing, Jacques},
year = {2009},
keywords = {multipath TCP, congestion control, fluid model, load balancing, resource pooling},
keywords = {congestion control, fluid model, load balancing, multipath TCP, resource pooling},
pages = {204--218},
file = {Springer Full Text PDF:/home/jake/Zotero/storage/3Y23DZS8/Wischik et al. - 2009 - Control of Multipath TCP and Optimization of Multi.pdf:application/pdf},
}
@ -237,44 +215,283 @@ Section: Proceedings on Privacy Enhancing Technologies},
file = {Snapshot:/home/jake/Zotero/storage/IMQSR22L/journals\$002fpopets\$002f2020\$002f4\$002farticle-p69.html:text/html;Full Text PDF:/home/jake/Zotero/storage/H59PHVNZ/Sharma et al. - 2020 - The Road Not Taken Re-thinking the Feasibility of.pdf:application/pdf},
}
@book{menezes_handbook_1997,
address = {Boca Raton},
series = {{CRC} {Press} series on discrete mathematics and its applications},
title = {Handbook of applied cryptography},
isbn = {978-0-8493-8523-0},
publisher = {CRC Press},
author = {Menezes, A. J. and Van Oorschot, Paul C. and Vanstone, Scott A.},
year = {1997},
keywords = {Access control Handbooks, manuals, etc, Computers, Cryptography, Handbooks, manuals, etc},
}
@misc{beck_manifesto_2001,
title = {Manifesto for {Agile} {Software} {Development}},
url = {http://agilemanifesto.org/},
urldate = {2021-01-29},
author = {Beck, Kent and Beedle, Mike and van Bekkenum, Arie and Cockburn, Alistair and Cunningham, Ward and Fowler, Martin and Grenning, James and Highsmith, Jim and Hunt, Andrew and Jeffries, Ron and Kern, Jon and Marick, Brian and Martin, Robert C. and Mellor, Steve and Schwaber, Ken and Sutherland, Jeff and Thomas, Dave},
year = {2001},
file = {Manifesto for Agile Software Development:/home/jake/Zotero/storage/93M8RQJR/agilemanifesto.org.html:text/html},
}
@article{dolev_security_1983,
title = {On the {Security} of {Public} {Key} {Protocols}},
@incollection{hutchison_blake2_2013,
address = {Berlin, Heidelberg},
title = {{BLAKE2}: {Simpler}, {Smaller}, {Fast} as {MD5}},
volume = {7954},
isbn = {978-3-642-38979-5 978-3-642-38980-1},
shorttitle = {{BLAKE2}},
url = {http://link.springer.com/10.1007/978-3-642-38980-1_8},
abstract = {We present the hash function BLAKE2, an improved version of the SHA-3 finalist BLAKE optimized for speed in software. Target applications include cloud storage, intrusion detection, or version control systems. BLAKE2 comes in two main flavors: BLAKE2b is optimized for 64-bit platforms, and BLAKE2s for smaller architectures. On 64bit platforms, BLAKE2 is often faster than MD5, yet provides security similar to that of SHA-3: up to 256-bit collision resistance, immunity to length extension, indifferentiability from a random oracle, etc. We specify parallel versions BLAKE2bp and BLAKE2sp that are up to 4 and 8 times faster, by taking advantage of SIMD and/or multiple cores. BLAKE2 reduces the RAM requirements of BLAKE down to 168 bytes, making it smaller than any of the five SHA-3 finalists, and 32\% smaller than BLAKE. Finally, BLAKE2 provides a comprehensive support for tree-hashing as well as keyed hashing (be it in sequential or tree mode).},
language = {en},
number = {2},
journal = {IEEE TRANSACTIONS ON INFORMATION THEORY},
author = {Dolev, Danny and Yao, Andrew C.},
year = {1983},
pages = {11},
file = {Dolev - 1983 - On the Security of Public Key Protocols.pdf:/home/jake/Zotero/storage/X6DEMNBM/Dolev - 1983 - On the Security of Public Key Protocols.pdf:application/pdf},
urldate = {2020-11-28},
booktitle = {Applied {Cryptography} and {Network} {Security}},
publisher = {Springer Berlin Heidelberg},
author = {Aumasson, Jean-Philippe and Neves, Samuel and Wilcox-OHearn, Zooko and Winnerlein, Christian},
editor = {Hutchison, David and Kanade, Takeo and Kittler, Josef and Kleinberg, Jon M. and Mattern, Friedemann and Mitchell, John C. and Naor, Moni and Nierstrasz, Oscar and Pandu Rangan, C. and Steffen, Bernhard and Sudan, Madhu and Terzopoulos, Demetri and Tygar, Doug and Vardi, Moshe Y. and Weikum, Gerhard and Jacobson, Michael and Locasto, Michael and Mohassel, Payman and Safavi-Naini, Reihaneh},
year = {2013},
doi = {10.1007/978-3-642-38980-1_8},
note = {Series Title: Lecture Notes in Computer Science},
pages = {119--135},
file = {Aumasson et al. - 2013 - BLAKE2 Simpler, Smaller, Fast as MD5.pdf:/home/jake/Zotero/storage/ZG25MG4B/Aumasson et al. - 2013 - BLAKE2 Simpler, Smaller, Fast as MD5.pdf:application/pdf},
}
@misc{kent_ip_2005,
title = {{IP} {Authentication} {Header}},
url = {https://tools.ietf.org/html/rfc4302},
@article{peng_multipath_2016,
title = {Multipath {TCP}: {Analysis}, {Design}, and {Implementation}},
volume = {24},
issn = {1558-2566},
shorttitle = {Multipath {TCP}},
doi = {10.1109/TNET.2014.2379698},
abstract = {Multipath TCP (MP-TCP) has the potential to greatly improve application performance by using multiple paths transparently. We propose a fluid model for a large class of MP-TCP algorithms and identify design criteria that guarantee the existence, uniqueness, and stability of system equilibrium. We clarify how algorithm parameters impact TCP-friendliness, responsiveness, and window oscillation and demonstrate an inevitable tradeoff among these properties. We discuss the implications of these properties on the behavior of existing algorithms and motivate our algorithm Balia (balanced linked adaptation), which generalizes existing algorithms and strikes a good balance among TCP-friendliness, responsiveness, and window oscillation. We have implemented Balia in the Linux kernel. We use our prototype to compare the new algorithm to existing MP-TCP algorithms.},
number = {1},
journal = {IEEE/ACM Transactions on Networking},
author = {Peng, Q. and Walid, A. and Hwang, J. and Low, S. H.},
month = feb,
year = {2016},
note = {Conference Name: IEEE/ACM Transactions on Networking},
keywords = {multipath TCP, Aggregates, Algorithm design and analysis, Asymptotic stability, balanced linked adaptation, Balia algorithm, Computer networks, convergence, Heuristic algorithms, Linux kernel, MP-TCP algorithms, nonlinear dynamical systems, Oscillators, Stability analysis, TCPIP, transport protocols, Vectors, window oscillation},
pages = {596--609},
file = {IEEE Xplore Abstract Record:/home/jake/Zotero/storage/S2L269MS/7000573.html:text/html;IEEE Xplore Full Text PDF:/home/jake/Zotero/storage/9QTMKA3G/Peng et al. - 2016 - Multipath TCP Analysis, Design, and Implementatio.pdf:application/pdf},
}
@misc{ofcom_performance_2020,
title = {The performance of fixed-line broadband delivered to {UK} residential customers},
shorttitle = {{UK} {Home} {Broadband} {Performance}},
url = {https://www.ofcom.org.uk/research-and-data/telecoms-research/broadband-research/home-broadband-performance-2019},
abstract = {Our annual home broadband performance report compares how different broadband packages perform, using data from monitors installed on people's broadband routers.},
language = {en},
urldate = {2021-01-29},
author = {Kent, Stephen},
month = dec,
urldate = {2020-11-21},
journal = {Ofcom},
author = {Ofcom},
month = may,
year = {2020},
file = {2020 - UK home broadband performance, measurement period .pdf:/home/jake/Zotero/storage/HPR3TALB/2020 - UK home broadband performance, measurement period .pdf:application/pdf;Snapshot:/home/jake/Zotero/storage/437YQTVF/home-broadband-performance-2019.html:text/html},
}
@inproceedings{hacker_effects_2002,
title = {The {Effects} of {Systemic} {Packet} {Loss} on {Aggregate} {TCP} {Flows}},
doi = {10.1109/SC.2002.10029},
abstract = {The use of parallel TCP connections to increase throughput for bulk transfers is common practice within the high performance computing community. However, the effectiveness, fairness, and efficiency of data transfers across parallel connections is unclear. This paper considers the impact of systemic non-congestion related packet loss on the effectiveness, fairness, and efficiency of parallel TCP transmissions. The results indicate that parallel connections are effective at increasing aggregate throughput, and increase the overall efficiency of the network bottleneck. In the presence of congestion related losses, parallel flows steal bandwidth from other single stream flows. A simple modification is presented that reduces the fairness problems when congestion is present, but retains effectiveness and efficiency.},
booktitle = {{SC} '02: {Proceedings} of the 2002 {ACM}/{IEEE} {Conference} on {Supercomputing}},
author = {Hacker, T. J. and Noble, B. D. and Athey, B. D.},
month = nov,
year = {2002},
note = {ISSN: 1063-9535},
keywords = {Aggregates, Bandwidth, Biology computing, Computer hacking, Concurrent computing, High performance computing, Internet, Loss measurement, Robustness, Throughput},
pages = {7--7},
file = {IEEE Xplore Abstract Record:/home/jake/Zotero/storage/F9XVJNZS/1592843.html:text/html;IEEE Xplore Full Text PDF:/home/jake/Zotero/storage/GGX3FAK6/Hacker et al. - 2002 - The Effects of Systemic Packet Loss on Aggregate T.pdf:application/pdf},
}
@inproceedings{donenfeld_wireguard_2017,
address = {San Diego, CA},
title = {{WireGuard}: {Next} {Generation} {Kernel} {Network} {Tunnel}},
isbn = {978-1-891562-46-4},
shorttitle = {{WireGuard}},
url = {https://www.ndss-symposium.org/ndss2017/ndss-2017-programme/wireguard-next-generation-kernel-network-tunnel/},
doi = {10.14722/ndss.2017.23160},
abstract = {WireGuard is a secure network tunnel, operating at layer 3, implemented as a kernel virtual network interface for Linux, which aims to replace both IPsec for most use cases, as well as popular user space and/or TLS-based solutions like OpenVPN, while being more secure, more performant, and easier to use. The virtual tunnel interface is based on a proposed fundamental principle of secure tunnels: an association between a peer public key and a tunnel source IP address. It uses a single round trip key exchange, based on NoiseIK, and handles all session creation transparently to the user using a novel timer state machine mechanism. Short pre-shared static keys—Curve25519 points—are used for mutual authentication in the style of OpenSSH. The protocol provides strong perfect forward secrecy in addition to a high degree of identity hiding. Transport speed is accomplished using ChaCha20Poly1305 authenticated-encryption for encapsulation of packets in UDP. An improved take on IP-binding cookies is used for mitigating denial of service attacks, improving greatly on IKEv2 and DTLSs cookie mechanisms to add encryption and authentication. The overall design allows for allocating no resources in response to received packets, and from a systems perspective, there are multiple interesting Linux implementation techniques for queues and parallelism. Finally, WireGuard can be simply implemented for Linux in less than 4,000 lines of code, making it easily audited and verified.},
language = {en},
urldate = {2020-11-19},
booktitle = {Proceedings 2017 {Network} and {Distributed} {System} {Security} {Symposium}},
publisher = {Internet Society},
author = {Donenfeld, Jason A.},
year = {2017},
file = {Donenfeld - 2017 - WireGuard Next Generation Kernel Network Tunnel.pdf:/home/jake/Zotero/storage/6MEQYC9J/Donenfeld - 2017 - WireGuard Next Generation Kernel Network Tunnel.pdf:application/pdf},
}
@misc{schooler_sip_2002,
title = {{SIP}: {Session} {Initiation} {Protocol}},
shorttitle = {{SIP}},
url = {https://tools.ietf.org/html/rfc3261},
language = {en},
urldate = {2021-04-30},
author = {Schooler, Eve and Camarillo, Gonzalo and Handley, Mark and Peterson, Jon and Rosenberg, Jonathan and Johnston, Alan and Schulzrinne, Henning and Sparks, Robert},
month = jun,
year = {2002},
file = {Snapshot:/home/jake/Zotero/storage/VQKGNJGF/rfc3261.html:text/html},
}
@misc{bishop_hypertext_2021,
title = {Hypertext {Transfer} {Protocol} {Version} 3 ({HTTP}/3)},
url = {https://tools.ietf.org/html/draft-ietf-quic-http-34},
language = {en},
urldate = {2021-04-30},
author = {Bishop, Mike},
month = feb,
year = {2021},
file = {Snapshot:/home/jake/Zotero/storage/SA5YGQGZ/draft-ietf-quic-http-34.html:text/html},
}
@inproceedings{honda_understanding_2005,
title = {Understanding {TCP} over {TCP}: effects of {TCP} tunneling on end-to-end throughput and latency},
volume = {6011},
shorttitle = {Understanding {TCP} over {TCP}},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/6011/60110H/Understanding-TCP-over-TCP--effects-of-TCP-tunneling-on/10.1117/12.630496.short},
doi = {10.1117/12.630496},
abstract = {TCP tunnel is a technology that aggregates and transfers packets sent between end hosts as a single TCP connection. By using a TCP tunnel, the fairness among aggregated flows can be improved and several protocols can be transparently transmitted through a firewall. Currently, many applications such as SSH, VTun, and HTun use a TCP tunnel. However, since most applications running on end hosts generally use TCP, two TCP congestion controls (i.e., end-to-end TCP and tunnel TCP) operate simultaneously and interfere each other. Under certain conditions, it has been known that using a TCP tunnel severely degrades the end-to-end TCP performance. Namely, it has known that using a TCP tunnel drastically degrades the end-to-end TCP throughput for some time, which is called \textit{TCP meltdown} problem. On the contrary, under other conditions, it has been known that using a TCP tunnel significantly improves the end-to-end TCP performance. However, it is still an open issue --- how, when, and why is a TCP tunnel malicious for end-to-end TCP performance? In this paper, we therefore investigate effect of TCP tunnel on end-to-end TCP performance using simulation experiments. Specifically, we quantitatively reveal effects of several factors (e.g., the propagation delay, usage of SACK option, TCP socket buffer size, and sender buffer size of TCP tunnel) on performance of end-to-end TCP and tunnel TCP.},
urldate = {2021-04-30},
booktitle = {Performance, {Quality} of {Service}, and {Control} of {Next}-{Generation} {Communication} and {Sensor} {Networks} {III}},
publisher = {International Society for Optics and Photonics},
author = {Honda, Osamu and Ohsaki, Hiroyuki and Imase, Makoto and Ishizuka, Mika and Murayama, Junichi},
month = oct,
year = {2005},
file = {Snapshot:/home/jake/Zotero/storage/8DTCGSYY/rfc4302.html:text/html},
pages = {60110H},
file = {Full Text PDF:/home/jake/Zotero/storage/HZ4HF793/Honda et al. - 2005 - Understanding TCP over TCP effects of TCP tunneli.pdf:application/pdf;Snapshot:/home/jake/Zotero/storage/VQLH6I65/12.630496.html:text/html},
}
@inproceedings{amin_assessing_2013,
address = {Berlin, Heidelberg},
series = {Lecture {Notes} in {Computer} {Science}},
title = {Assessing the {Impact} of {Latency} and {Jitter} on the {Perceived} {Quality} of {Call} of {Duty} {Modern} {Warfare} 2},
isbn = {978-3-642-39265-8},
doi = {10.1007/978-3-642-39265-8_11},
abstract = {Jane McGonigal stated in her 2010 TED Talk that humans spend 3 billion hours a week playing video games around the planet. Americans alone devote 183 million hours per week to gaming. With numbers like these, its no wonder why end user demands for bandwidth have increased exponentially and the potential for network congestion is always present. We conduct a user study that focuses on the question: “How much network impairment is acceptable before users are dissatisfied?” In particular, the main objective of our study is to measure a gamers perceived Quality of Experience (QoE) for a real-time first person shooter (FPS) online game Call of Duty Modern Warfare 2 in presence of varied levels of network congestion. We develop a Mean Opinion Score (MOS) metric to determine each gamers QoE. We investigate the following hypothesis: The gamers perceived QoE correlates to their skill level.},
language = {en},
booktitle = {Human-{Computer} {Interaction}. {Users} and {Contexts} of {Use}},
publisher = {Springer},
author = {Amin, Rahul and Jackson, France and Gilbert, Juan E. and Martin, Jim and Shaw, Terry},
editor = {Kurosu, Masaaki},
year = {2013},
keywords = {First Person Shooter Games, Network Impairment, Online Gaming, Quality of Experience},
pages = {97--106},
file = {Springer Full Text PDF:/home/jake/Zotero/storage/QASE3YCW/Amin et al. - 2013 - Assessing the Impact of Latency and Jitter on the .pdf:application/pdf},
}
@article{roychoudhuri_impact_2006,
series = {Monitoring and {Measurements} of {IP} {Networks}},
title = {On the impact of loss and delay variation on {Internet} packet audio transmission},
volume = {29},
issn = {0140-3664},
url = {https://www.sciencedirect.com/science/article/pii/S0140366406001381},
doi = {10.1016/j.comcom.2006.04.004},
abstract = {The quality of audio in IP telephony is significantly influenced by various factors, including type of encoder, delay, delay variation, rate and distribution of packet loss, and type of error concealment. Hence, the performance of IP telephony systems is highly dependent on understanding the contribution of these factors to audio quality, and their impact on adaptive transport mechanisms such as error and buffer control. We conducted a large-scale audio transmission experiment over the Internet in a 12-month-period in order to evaluate the effects and the correlation of such parameters on audio transmission over IP. We have noticed that the correlation of loss and delay is not linear, but stronger correlation is observed as the delay approaches certain thresholds. We have made a number of new observations on various delay thresholds that are significant for loss prediction for adaptive audio transmission over IP networks. We also have made new observations to assess the audio quality of PCM μ-law and G.728 codecs under different loss and delay conditions. The paper provides a number of recommendations for implementing efficient adaptive FEC mechanisms based on our measurement observations and analysis.},
language = {en},
number = {10},
urldate = {2021-05-09},
journal = {Computer Communications},
author = {Roychoudhuri, Lopamudra and Al-Shaer, Ehab and Brewster, Gregory B.},
month = jun,
year = {2006},
keywords = {Internet measurement, IP telephony, Monitoring, VoIP},
pages = {1578--1589},
file = {ScienceDirect Full Text PDF:/home/jake/Zotero/storage/JJSEUY94/Roychoudhuri et al. - 2006 - On the impact of loss and delay variation on Inter.pdf:application/pdf},
}
@misc{damjanovic_quic_2021,
title = {{QUIC} and {HTTP}/3 {Support} now in {Firefox} {Nightly} and {Beta} {Mozilla} {Hacks} - the {Web} developer blog},
url = {https://hacks.mozilla.org/2021/04/quic-and-http-3-support-now-in-firefox-nightly-and-beta},
abstract = {Support for QUIC and HTTP/3 is now enabled by default in Firefox Nightly and Firefox Beta. HTTP/3 will be available by the end of May.},
language = {en-US},
urldate = {2021-05-12},
journal = {Mozilla Hacks the Web developer blog},
author = {Damjanovic, Dragana},
month = apr,
year = {2021},
file = {Snapshot:/home/jake/Zotero/storage/M23DUPPY/quic-and-http-3-support-now-in-firefox-nightly-and-beta.html:text/html},
}
@misc{govindan_enabling_2020,
title = {Enabling {QUIC} in tip-of-tree},
url = {https://groups.google.com/a/chromium.org/g/net-dev/c/5M9Z5mtvg_Y/m/iw9co1VrBQAJ?pli=1},
urldate = {2021-05-12},
author = {Govindan, Dharani},
month = apr,
year = {2020},
file = {Enabling QUIC in tip-of-tree:/home/jake/Zotero/storage/YSCGU6UF/iw9co1VrBQAJ.html:text/html},
}
@misc{kinnear_boost_2020,
title = {Boost performance and security with modern networking - {WWDC} 2020 - {Videos}},
url = {https://developer.apple.com/videos/play/wwdc2020/10111/?time=644},
abstract = {Speed up your app and make it more nimble, private and secure with modern networking APIs. Learn about networking protocols like IPv6,...},
language = {en},
urldate = {2021-05-12},
journal = {Apple Developer},
author = {Kinnear, Eric},
month = jun,
year = {2020},
file = {Snapshot:/home/jake/Zotero/storage/E4SHEITG/10111.html:text/html},
}
@misc{pennarun_how_2020,
title = {How {Tailscale} works},
url = {https://tailscale.com/blog/how-tailscale-works/},
abstract = {People often ask us for an overview of how Tailscale works. We\&rsquo;ve been},
language = {en},
urldate = {2021-05-12},
journal = {Tailscale},
author = {Pennarun, Avery},
month = mar,
year = {2020},
file = {Snapshot:/home/jake/Zotero/storage/2CCG9LH2/how-tailscale-works.html:text/html},
}
@misc{torvalds_linux_2020,
title = {Linux 5.6 - {Linus} {Torvalds}},
url = {https://lore.kernel.org/lkml/CAHk-=wi9ZT7Stg-uSpX0UWQzam6OP9Jzz6Xu1CkYu1cicpD5OA@mail.gmail.com/},
urldate = {2021-05-12},
author = {Torvalds, Linus},
month = mar,
year = {2020},
file = {Linux 5.6 - Linus Torvalds:/home/jake/Zotero/storage/33QZKUPE/CAHk-=wi9ZT7Stg-uSpX0UWQzam6OP9Jzz6Xu1CkYu1cicpD5OA@mail.gmail.com.html:text/html},
}
@article{donenfeld_formal_nodate,
title = {Formal {Verification} of the {WireGuard} {Protocol}},
abstract = {WireGuard, the secure network tunnel, uses an interesting DiffieHellman authenticated key exchange protocol based on NoiseIK, custom tailored to suit its unique operational requirements. This paper enumerates the security properties of this key exchange and then explores the formal verification of such properties. The end result is a formally verified secure network tunnel protocol.},
language = {en},
author = {Donenfeld, Jason A and Milner, Kevin},
pages = {11},
file = {Donenfeld - Formal Verification of the WireGuard Protocol.pdf:/home/jake/Zotero/storage/IGVX3ECM/Donenfeld - Formal Verification of the WireGuard Protocol.pdf:application/pdf},
}
@incollection{preneel_cryptographic_2018,
address = {Cham},
title = {A {Cryptographic} {Analysis} of the {WireGuard} {Protocol}},
volume = {10892},
isbn = {978-3-319-93386-3 978-3-319-93387-0},
url = {http://link.springer.com/10.1007/978-3-319-93387-0_1},
abstract = {WireGuard (Donenfeld, NDSS 2017) is a recently proposed secure network tunnel operating at layer 3. WireGuard aims to replace existing tunnelling solutions like IPsec and OpenVPN, while requiring less code, being more secure, more performant, and easier to use. The cryptographic design of WireGuard is based on the Noise framework. It makes use of a key exchange component which combines long-term and ephemeral Diffie-Hellman values (along with optional preshared keys). This is followed by the use of the established keys in an AEAD construction to encapsulate IP packets in UDP. To date, WireGuard has received no rigorous security analysis. In this paper, we, rectify this. We first observe that, in order to prevent Key Compromise Impersonation (KCI) attacks, any analysis of WireGuards key exchange component must take into account the first AEAD ciphertext from initiator to responder. This message effectively acts as a key confirmation and makes the key exchange component of WireGuard a 1.5 RTT protocol. However, the fact that this ciphertext is computed using the established session key rules out a proof of session key indistinguishability for WireGuards key exchange component, limiting the degree of modularity that is achievable when analysing the protocols security. To overcome this proof barrier, and as an alternative to performing a monolithic analysis of the entire WireGuard protocol, we add an extra message to the protocol. This is done in a minimally invasive way that does not increase the number of round trips needed by the overall WireGuard protocol. This change enables us to prove strong authentication and key indistinguishability properties for the key exchange component of WireGuard under standard cryptographic assumptions.},
language = {en},
urldate = {2021-05-12},
booktitle = {Applied {Cryptography} and {Network} {Security}},
publisher = {Springer International Publishing},
author = {Dowling, Benjamin and Paterson, Kenneth G.},
editor = {Preneel, Bart and Vercauteren, Frederik},
year = {2018},
doi = {10.1007/978-3-319-93387-0_1},
note = {Series Title: Lecture Notes in Computer Science},
pages = {3--21},
file = {Dowling and Paterson - 2018 - A Cryptographic Analysis of the WireGuard Protocol.pdf:/home/jake/Zotero/storage/GGI6BMJF/Dowling and Paterson - 2018 - A Cryptographic Analysis of the WireGuard Protocol.pdf:application/pdf},
}
@misc{donenfeld_wireguard_2020,
title = {wireguard fixes for 5.6-rc7},
url = {https://lore.kernel.org/netdev/20200319003047.113501-1-Jason@zx2c4.com/},
urldate = {2021-05-12},
author = {Donenfeld, Jason A},
month = mar,
year = {2020},
file = {[PATCH net 0/5] wireguard fixes for 5.6-rc7 - Jason A. Donenfeld:/home/jake/Zotero/storage/QZLP2EZP/20200319003047.113501-1-Jason@zx2c4.com.html:text/html},
}
@misc{cloudflare_cloudflare_nodate,
title = {Cloudflare - {The} {Web} {Performance} \& {Security} {Company}},
url = {https://www.cloudflare.com/},
abstract = {Here at Cloudflare, we make the Internet work the way it should. Offering CDN, DNS, DDoS protection and security, find out how we can help your site.},
language = {en-us},
urldate = {2021-05-12},
journal = {Cloudflare},
author = {Cloudflare, Inc.},
file = {Snapshot:/home/jake/Zotero/storage/Y9XA4G7L/www.cloudflare.com.html:text/html},
}
@misc{henderson_newreno_2012,
title = {The {NewReno} {Modification} to {TCP}'s {Fast} {Recovery} {Algorithm}},
url = {https://tools.ietf.org/html/rfc6582},
urldate = {2021-05-13},
author = {Henderson, T. and Floyd, S. and Gurtov, A. and Nishida, Y.},
month = apr,
year = {2012},
file = {rfc6582:/home/jake/Zotero/storage/9X85DZDZ/rfc6582.html:text/html},
}

View File

@ -2,18 +2,18 @@
set -e
# Clean up previous code samples
rm /home/jake/repos/dissertation/4-dissertation/Preparation/Samples/* || true
rm /home/jake/repos/dissertation/4-dissertation/Implementation/Samples/* || true
rm /home/jake/repos/dissertation/4-dissertation/2_Preparation/Samples/* || true
rm /home/jake/repos/dissertation/4-dissertation/3_Implementation/Samples/* || true
# Copy code samples for preparation
printf "#include...\\n" > /home/jake/repos/dissertation/4-dissertation/Preparation/Samples/main.cpp
tail -n +17 /home/jake/repos/dissertation/1-language-tests/cpp/main.cpp >> /home/jake/repos/dissertation/4-dissertation/Preparation/Samples/main.cpp
printf "#include...\\n" > /home/jake/repos/dissertation/4-dissertation/2_Preparation/Samples/main.cpp
tail -n +17 /home/jake/repos/dissertation/1-language-tests/cpp/main.cpp >> /home/jake/repos/dissertation/4-dissertation/2_Preparation/Samples/main.cpp
cp /home/jake/repos/dissertation/1-language-tests/go/main.go /home/jake/repos/dissertation/4-dissertation/Preparation/Samples/
cp /home/jake/repos/dissertation/1-language-tests/rust/src/main.rs /home/jake/repos/dissertation/4-dissertation/Preparation/Samples/
cp /home/jake/repos/dissertation/1-language-tests/go/main.go /home/jake/repos/dissertation/4-dissertation/2_Preparation/Samples/
cp /home/jake/repos/dissertation/1-language-tests/rust/src/main.rs /home/jake/repos/dissertation/4-dissertation/2_Preparation/Samples/
# Copy code samples for implementation
cp /home/jake/repos/dissertation/2-code/udp/congestion.go /home/jake/repos/dissertation/4-dissertation/Implementation/Samples/
cp /home/jake/repos/dissertation/2-code/udp/wireshark_dissector.lua /home/jake/repos/dissertation/4-dissertation/Implementation/Samples/
cp /home/jake/repos/dissertation/2-code/proxy/mac.go /home/jake/repos/dissertation/4-dissertation/Implementation/Samples/
cp /home/jake/repos/dissertation/2-code/udp/congestion.go /home/jake/repos/dissertation/4-dissertation/3_Implementation/Samples/
cp /home/jake/repos/dissertation/2-code/udp/wireshark_dissector.lua /home/jake/repos/dissertation/4-dissertation/3_Implementation/Samples/
cp /home/jake/repos/dissertation/2-code/proxy/mac.go /home/jake/repos/dissertation/4-dissertation/3_Implementation/Samples/

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -e
# Clean up previous graphs
rm /home/jake/repos/dissertation/4-dissertation/Evaluation/Figs/graphs/* || true
rm /home/jake/repos/dissertation/4-dissertation/OutboundGraphs/Figs/graphs/* || true
# Copy graphs for evaluation
cp /home/jake/repos/dissertation/3-evaluation/output/I* /home/jake/repos/dissertation/4-dissertation/Evaluation/Figs/graphs/
cp /home/jake/repos/dissertation/3-evaluation/output/TI* /home/jake/repos/dissertation/4-dissertation/Evaluation/Figs/graphs/
cp /home/jake/repos/dissertation/3-evaluation/output/OES0-R0-1R1-1T10S1-R0-1R1-2T10S2-R0-2R1-2T10.png /home/jake/repos/dissertation/4-dissertation/Evaluation/Figs/graphs/
# Copy graphs for outbound graphs appendix
cp /home/jake/repos/dissertation/3-evaluation/output/O* /home/jake/repos/dissertation/4-dissertation/OutboundGraphs/Figs/graphs/
cp /home/jake/repos/dissertation/3-evaluation/output/TO* /home/jake/repos/dissertation/4-dissertation/OutboundGraphs/Figs/graphs/

5376
sty/tikz-uml.sty Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,7 +1,7 @@
% ******************************* PhD Thesis Template **************************
% Please have a look at the README.md file for info on how to use the template
\documentclass[a4paper,12pt,times,numbered,print,index]{PhDThesisPSnPDF}
\documentclass[a4paper,12pt,times,oneside,custommargin,numbered,print,index]{PhDThesisPSnPDF}
% ******************************************************************************
% ******************************* Class Options ********************************
@ -114,8 +114,10 @@
\maketitle
%\include{Dedication/dedication}
%TC:ignore
\include{Declaration/declaration}
\include{Proforma/proforma}
\include{0_Proforma/proforma}
%TC:endignore
%\include{Acknowledgement/acknowledgement}
%\include{Abstract/abstract}
@ -123,7 +125,7 @@
\tableofcontents
\listoffigures
% \listoffigures
% \listoftables
@ -135,11 +137,11 @@
% ******************************** Main Matter *********************************
\mainmatter
\include{Introduction/introduction}
\include{Preparation/preparation}
\include{Implementation/implementation}
\include{Evaluation/evaluation}
\include{Conclusions/conclusions}
\include{1_Introduction/introduction}
\include{2_Preparation/preparation}
\include{3_Implementation/implementation}
\include{4_Evaluation/evaluation}
\include{5_Conclusions/conclusions}
% ********************************** Back Matter *******************************
@ -175,9 +177,10 @@
%TC:ignore
\begin{appendices} % Using appendices environment for more functunality
\include{LanguageSamples/languagesamples}
\include{OutboundGraphs/outboundgraphs}
\include{Proposal/proposal}
\include{A1_LanguageSamples/languagesamples}
\include{A2_LayeredSecurity/layeredsecurity}
\include{A3_OutboundGraphs/outboundgraphs}
\include{A4_ProjectProposal/projectproposal}
\end{appendices}