Compare commits
758 Commits
main
...
gui-prebui
Author | SHA1 | Date | |
---|---|---|---|
5546e07191 | |||
|
61a64a6ed8 | ||
|
5767191bfc | ||
|
88a78262b1 | ||
|
d07dba00e3 | ||
|
9414e3270d | ||
|
c87f380e2e | ||
|
5094100e21 | ||
|
61a9d63d76 | ||
|
45cfaa8743 | ||
|
c5e44072a8 | ||
|
b31e417546 | ||
|
504c72f29d | ||
|
0590eadc17 | ||
|
e1c12674c5 | ||
|
8ec1a8de7d | ||
|
a3f84bcb0c | ||
|
5d775e61b1 | ||
|
9e6c421cc5 | ||
|
5c79f59b79 | ||
|
7b3770ed17 | ||
|
775ee860a5 | ||
|
207dad4362 | ||
|
0c913ab548 | ||
|
591971b4dc | ||
|
5d492a9e01 | ||
|
94bbda81dc | ||
|
4b660e58fa | ||
|
8a42cad6a1 | ||
|
b776382f18 | ||
|
594b0933f1 | ||
|
fb31761bad | ||
|
0467903b52 | ||
|
8568e49578 | ||
|
e03091dd51 | ||
|
a44ed9457e | ||
|
50c9f4c85a | ||
|
ea022ede46 | ||
|
a3a7df91e3 | ||
|
26b6ab8a3a | ||
|
dcc3245954 | ||
|
f749b8ff51 | ||
|
1ba314428f | ||
|
fd13cf1058 | ||
|
ab65572af0 | ||
|
116d8cbea1 | ||
|
1546732afc | ||
|
20a3045e1a | ||
|
03a8e7c81a | ||
|
4822b18472 | ||
|
7186525d5c | ||
|
6885ca27fb | ||
|
5e3cab29a2 | ||
|
d2819522c6 | ||
|
220920edb9 | ||
|
c2788ab6ae | ||
|
269dd5602e | ||
|
4620d6d4cc | ||
|
89dad05c65 | ||
|
adcd810e37 | ||
|
479fbb628c | ||
|
fb894b3720 | ||
|
3c2596e3e2 | ||
|
6b1c62d7b2 | ||
|
f09f352628 | ||
|
15b90661f4 | ||
|
26574fb2bd | ||
|
573ce712f2 | ||
|
07cb8dc677 | ||
|
11b083c611 | ||
|
587fa8fdff | ||
|
0f4f1ddde8 | ||
|
7c25e3733a | ||
|
51ba901737 | ||
|
f2eca99bde | ||
|
2d8cca49b2 | ||
|
b8b5afba73 | ||
|
0fdacfed8f | ||
|
866780bcde | ||
|
6d22ec21d0 | ||
|
f5164c78cd | ||
|
33fb21c8e0 | ||
|
2b4f347c33 | ||
|
0c591fa25a | ||
|
cbc82690d7 | ||
|
13d02d9d11 | ||
|
26a04a5929 | ||
|
bae3f7f73d | ||
|
bcde51138d | ||
|
ed67ce33bb | ||
|
257bdbac32 | ||
|
e474b9728c | ||
|
1ea81c8887 | ||
|
e67691d51c | ||
|
418673f7a2 | ||
|
720b75ad73 | ||
|
359c09b57f | ||
|
032faefa4b | ||
|
cfb7f55220 | ||
|
6834c04539 | ||
|
cd9518a3c3 | ||
|
a52934ef4d | ||
|
24370964ab | ||
|
1105deba48 | ||
|
25262d62f3 | ||
|
f1ce0ab6d0 | ||
|
2ed05314fc | ||
|
90ed7ca070 | ||
|
d2083281c9 | ||
|
40d67065ba | ||
|
9930b86791 | ||
|
e39f395cf1 | ||
|
fe890ff535 | ||
|
cd8e9bd044 | ||
|
a053b210a5 | ||
|
443a7b220e | ||
|
dc28dadbc2 | ||
|
7c7123a156 | ||
|
b96e7401c3 | ||
|
e9fd430d01 | ||
|
e592138c51 | ||
|
f2ccb910df | ||
|
cdcd3b2687 | ||
|
147076a696 | ||
|
98234e76b7 | ||
|
76594466ef | ||
|
83ea3772e2 | ||
|
07e2a9af06 | ||
|
c246d36adf | ||
|
c9430c22b5 | ||
|
539b32d01e | ||
|
dc5ae6f4f6 | ||
|
3e1b108b84 | ||
|
404bddd2a4 | ||
|
b56a158100 | ||
|
e27381f3af | ||
|
98032f2b1f | ||
|
7d8f0845fd | ||
|
0ef3247d44 | ||
|
100519321e | ||
|
23c592adeb | ||
|
b6e4f4a02d | ||
|
a712ee94aa | ||
|
084719cde1 | ||
|
00cb237a90 | ||
|
ae945b993a | ||
|
14b83bb390 | ||
|
fd55dad735 | ||
|
015cb94909 | ||
|
86decb1f44 | ||
|
d0f4447427 | ||
|
81506203c4 | ||
|
52b3ffd8c3 | ||
|
48877c05cc | ||
|
f6e357be52 | ||
|
f0f73fc8ae | ||
|
51fefb2882 | ||
|
74757ffc1d | ||
|
febd2091df | ||
|
e482e1296e | ||
|
51c930f532 | ||
|
b621a90745 | ||
|
e81b578606 | ||
|
9f39684799 | ||
|
9e0fff5e22 | ||
|
0a4a1098d8 | ||
|
38ad5a1318 | ||
|
b3e908f72f | ||
|
f2eb6942c5 | ||
|
e5e55ef266 | ||
|
46f7e5b7fa | ||
|
77cac8b465 | ||
|
513c3cc632 | ||
|
405491e8d0 | ||
|
42e1b088c2 | ||
|
8f59535f95 | ||
|
67f32bd519 | ||
|
f7a95e0077 | ||
|
32e67e5fab | ||
|
1fa918c255 | ||
|
55bddb6ce1 | ||
|
aeec4bd213 | ||
|
8be7414a52 | ||
|
e65345336f | ||
|
667b7c6f79 | ||
|
539253f646 | ||
|
4f07994bfb | ||
|
8ebf285081 | ||
|
27724835da | ||
|
a4edbdd005 | ||
|
eff1719977 | ||
|
1aadc0974d | ||
|
c8e4f0099c | ||
|
8b6c60f26b | ||
|
20dfcedd97 | ||
|
f5d717735b | ||
|
3ba452b301 | ||
|
ff16d2fa02 | ||
|
4ba2703783 | ||
|
f319af5a35 | ||
|
40e43826a9 | ||
|
4cbdc0342a | ||
|
bca46736bb | ||
|
6c2e66fa9e | ||
|
504d5c5651 | ||
|
a7e1378f89 | ||
|
97c98d72e4 | ||
|
988ebbaf8d | ||
|
9338f3f088 | ||
|
882c9d64e4 | ||
|
39c2bb9e4b | ||
|
6939c7ec25 | ||
|
ca9ab74df8 | ||
|
25c4e4eec1 | ||
|
d98498d17f | ||
|
080f58acfe | ||
|
a1a8c258d5 | ||
|
97ac27942c | ||
|
7239a99505 | ||
|
7de1178836 | ||
|
c79629e4da | ||
|
fe9afad8cd | ||
|
4bf5d75a8a | ||
|
d64e2167ce | ||
|
1891f6501d | ||
|
0c7ad88857 | ||
|
ff9013b8ab | ||
|
aed664a78d | ||
|
0f538093af | ||
|
e469ee6cce | ||
|
998babcfae | ||
|
a014af45eb | ||
|
d3429fafd0 | ||
|
1d5ea2d35c | ||
|
a6222afdd0 | ||
|
e5fd061e70 | ||
|
405c46d0f6 | ||
|
b2c6ec0091 | ||
|
67bd36ae4d | ||
|
979675374b | ||
|
74e4368cf1 | ||
|
a23d9d20aa | ||
|
be5302d9cc | ||
|
c031b2ad75 | ||
|
0c1306780e | ||
|
ab57cd30b0 | ||
|
be025a40cc | ||
|
b0a52f4b51 | ||
|
5c49ba1d85 | ||
|
a63a69dfd9 | ||
|
3e71ea555d | ||
|
188aa3011b | ||
|
07c382914c | ||
|
4e0ffd1a11 | ||
|
6ae28e2306 | ||
|
bce022ea7a | ||
|
0d144ee5af | ||
|
32b7b80666 | ||
|
6d03b92ea6 | ||
|
f8b59a50ff | ||
|
24ae79345b | ||
|
4721d2bd4e | ||
|
e0cf6f1e8b | ||
|
2cf4784b20 | ||
|
524e074a8c | ||
|
b2d2a8a744 | ||
|
45fdc64300 | ||
|
d7af97c919 | ||
|
7ba8a627bc | ||
|
2578580e21 | ||
|
e3713fddb8 | ||
|
41e16bc398 | ||
|
58f75502f3 | ||
|
23c5d6c287 | ||
|
281edfa585 | ||
|
d200726436 | ||
|
902cf5898f | ||
|
0eaf43120b | ||
|
c3fbac2e7a | ||
|
d8376a2a24 | ||
|
356eb43a9f | ||
|
ee33cb1289 | ||
|
7381b5e508 | ||
|
ad13cb2bf5 | ||
|
c9421d11e7 | ||
|
594e63f13a | ||
|
ee2b6e66de | ||
|
db3578d9ba | ||
|
863c96b771 | ||
|
f41e117918 | ||
|
db7c6d38e5 | ||
|
4e0f062cb5 | ||
|
7038ddef7f | ||
|
e3e303754b | ||
|
de4559d862 | ||
|
3019471514 | ||
|
9186365507 | ||
|
6308da2cc0 | ||
|
211659b9b0 | ||
|
386a978310 | ||
|
0db898b0a8 | ||
|
b6b9cccb72 | ||
|
99ba88ae2f | ||
|
dd3779a623 | ||
|
0cc04208db | ||
|
d60e0f0036 | ||
|
1c0b23a8d0 | ||
|
013ddbc57f | ||
|
3216674c19 | ||
|
3ca626a878 | ||
|
60bf9531af | ||
|
7f02b73b5d | ||
|
65f14ec3f5 | ||
|
d432a7197a | ||
|
e072b37a86 | ||
|
22ad017f12 | ||
|
cfbb5dac14 | ||
|
05901aa303 | ||
|
5d286399f3 | ||
|
27a13efb17 | ||
|
800bba7a0e | ||
|
99c4359062 | ||
|
3193ff9155 | ||
|
7b50ece931 | ||
|
95958a3c5c | ||
|
6961d2e19d | ||
|
02899dfae2 | ||
|
a2acf359ad | ||
|
c33475f63e | ||
|
47f344927d | ||
|
71c9547de5 | ||
|
956109a097 | ||
|
ac86eb397a | ||
|
bc7f621073 | ||
|
6304046e80 | ||
|
a06735c1b6 | ||
|
a2c162db9b | ||
|
8a1bedd367 | ||
|
2e87df380d | ||
|
512e063a79 | ||
|
487ac17870 | ||
|
2c76c1e6ae | ||
|
eddfacc2e9 | ||
|
fd835859d5 | ||
|
a7c246badc | ||
|
2f19636eb3 | ||
|
4d5335dbe9 | ||
|
25ffa3ad11 | ||
|
5295afb2da | ||
|
9d7ef17a26 | ||
|
bde3e48842 | ||
|
c6436e1500 | ||
|
bec981aa7a | ||
|
33fe731620 | ||
|
4dbf26e153 | ||
|
b069b9b038 | ||
|
c34202f5e2 | ||
|
1f400a2750 | ||
|
46ee1c1414 | ||
|
a14a18185b | ||
|
ae91fa3ce4 | ||
|
9587e09c78 | ||
|
e5f6be2f02 | ||
|
4737e912f0 | ||
|
b28439be24 | ||
|
41799ef86f | ||
|
31d42bb136 | ||
|
b3d12a2436 | ||
|
f40954c7b6 | ||
|
1d1f8811a8 | ||
|
a5c1d9aa19 | ||
|
f3dbeed239 | ||
|
1e3da9f276 | ||
|
c14e4b1eb4 | ||
|
58b98bc335 | ||
|
63645205c0 | ||
|
e1215d5da8 | ||
|
2a8e5aecfd | ||
|
72189330fd | ||
|
3d3785a605 | ||
|
f63f3f19ee | ||
|
0affe03007 | ||
|
05a276ecc7 | ||
|
f5af0f2268 | ||
|
f78cde93ac | ||
|
8689f609d7 | ||
|
6555a68fa9 | ||
|
48d7be7eab | ||
|
7ab7ac49c8 | ||
|
00484429d6 | ||
|
a9901cc7d0 | ||
|
822a13570e | ||
|
d67bb4f2c5 | ||
|
4cbfa28e10 | ||
|
4a9d5edbfc | ||
|
9a06de9058 | ||
|
fe0b5743b0 | ||
|
1b8bd6c082 | ||
|
c44e3d78d8 | ||
|
b4fdc49194 | ||
|
18d5caad7e | ||
|
7d0b8f6f8c | ||
|
5689083393 | ||
|
52d337496f | ||
|
a43f17d9a3 | ||
|
8381483f79 | ||
|
e563de6c81 | ||
|
5f4cd92cc5 | ||
|
e92acca937 | ||
|
0205a08c20 | ||
|
6a4abb7f14 | ||
|
15c0c675b1 | ||
|
123309c648 | ||
|
98921f9faa | ||
|
950672ca6c | ||
|
471111122b | ||
|
22261146be | ||
|
926076bffd | ||
|
e599df03a8 | ||
|
a2d37bc69a | ||
|
81d49ada06 | ||
|
0a063fdeb3 | ||
|
5bbd477a58 | ||
|
3104a830ae | ||
|
5d0934e4d9 | ||
|
95d87f5a22 | ||
|
fd679c329c | ||
|
ad5c2e171a | ||
|
c48f58e968 | ||
|
109c0d5e37 | ||
|
012e1fbc06 | ||
|
f42548ac1c | ||
|
8ad0bc5e61 | ||
|
1a8913e7a0 | ||
|
e4d6829971 | ||
|
ecc527ad3b | ||
|
bc517cae2f | ||
|
8a0d1e8b55 | ||
|
1efc0ceaa5 | ||
|
789b37c21f | ||
|
bd36749f7d | ||
|
975d953cb8 | ||
|
f4fe983b1e | ||
|
92a69c7de4 | ||
|
bd48a5cbe6 | ||
|
0f3ff66485 | ||
|
881137539c | ||
|
8f27425284 | ||
|
ec8f3b4528 | ||
|
8f1682941e | ||
|
ccb9b7ae8e | ||
|
f14fabc90a | ||
|
dcf3f25f93 | ||
|
7d8b231aaf | ||
|
fb04a22088 | ||
|
d91ee440ba | ||
|
4f8697568d | ||
|
c8f4f5210d | ||
|
89d682f49f | ||
|
e21978f11a | ||
|
4e4da7be6d | ||
|
088496efdf | ||
|
28ee6f024c | ||
|
afa5c54a35 | ||
|
2cf233ac23 | ||
|
f9ab2f0de7 | ||
|
487f64e164 | ||
|
8edb9c5f98 | ||
|
87bfb3b02b | ||
|
3e73d414d1 | ||
|
0a3ee6ff8a | ||
|
c31fb9c1cf | ||
|
3119b614ae | ||
|
9254dd2208 | ||
|
2bf4113821 | ||
|
54379fc0ee | ||
|
cd7d9cf079 | ||
|
c52554a2b9 | ||
|
4e499fb9bf | ||
|
8d1a765fd6 | ||
|
754bf5f8af | ||
|
df037564d7 | ||
|
ad87d1de74 | ||
|
82b108de69 | ||
|
6195b8cd52 | ||
|
091c72319a | ||
|
7311d08139 | ||
|
623b989973 | ||
|
6e1fd12930 | ||
|
f40baf8629 | ||
|
9ab934e2ae | ||
|
3f1ea4a0b9 | ||
|
775db4aa3c | ||
|
4e3e31a425 | ||
|
2d18f43b6a | ||
|
e2006d821c | ||
|
6896241933 | ||
|
c9591e9754 | ||
|
b4c95a3b28 | ||
|
28d498f91d | ||
|
b671641a28 | ||
|
c202929413 | ||
|
dcc4bd0d10 | ||
|
ca0ea50cba | ||
|
00194f54a2 | ||
|
31ec421299 | ||
|
0c9c37875d | ||
|
ebfbbca1be | ||
|
fecaa6a71a | ||
|
67371c43bd | ||
|
780c0e0b35 | ||
|
6e3da022e0 | ||
|
614d213432 | ||
|
a5b1c0432f | ||
|
d6e0987dd9 | ||
|
6219aba40c | ||
|
f0829d5961 | ||
|
f1e8cdfe3e | ||
|
4964ca5ffb | ||
|
c010e37374 | ||
|
273ebd61d7 | ||
|
2d8f396eeb | ||
|
ec42fdae6d | ||
|
b26df035f9 | ||
|
37d6df23fa | ||
|
5c12a3406d | ||
|
2cdc1a973f | ||
|
d34c1b6825 | ||
|
d76f059c55 | ||
|
61fe95c44a | ||
|
e44365d265 | ||
|
36f8eeb272 | ||
|
84e75d5994 | ||
|
fe9f69a757 | ||
|
5c155752d2 | ||
|
0070cd322a | ||
|
7fbabbcf16 | ||
|
a0c69568b5 | ||
|
5340a351b7 | ||
|
b0d072270b | ||
|
95a5cfe647 | ||
|
84ea80c1fd | ||
|
4ccce11893 | ||
|
973b54365e | ||
|
b66fc6dcdb | ||
|
516241e406 | ||
|
d10ce19f50 | ||
|
fdd4be80bf | ||
|
9cf9721abe | ||
|
7f9317aa48 | ||
|
b2780b028d | ||
|
005fb19a7b | ||
|
16588033fd | ||
|
ffa50f0758 | ||
|
ae2cba1d23 | ||
|
929dc80091 | ||
|
5a8ef89824 | ||
|
33c0a82fb7 | ||
|
6f47e178e2 | ||
|
d0c3a59f44 | ||
|
f46280c93b | ||
|
2b278bb05f | ||
|
f0afe0d2ea | ||
|
5522423871 | ||
|
db13e3ef15 | ||
|
c706df5218 | ||
|
ea38b9f78e | ||
|
957d8d6ca0 | ||
|
f2e607c70f | ||
|
b218002752 | ||
|
b5e1e5a9e2 | ||
|
5abed63f53 | ||
|
9ddc8b4ca3 | ||
|
80186ecc67 | ||
|
de7aabc8c9 | ||
|
a3067b7b3b | ||
|
93ce4a0e49 | ||
|
d0f5f06159 | ||
|
da08117fcd | ||
|
baef654197 | ||
|
c08792f066 | ||
|
0e17b1018c | ||
|
b70fb2f87f | ||
|
d701f9f081 | ||
|
1cbad0fcab | ||
|
03690daa35 | ||
|
792bb113bc | ||
|
b9206b1844 | ||
|
ceb7b7375c | ||
|
0fd7f2958f | ||
|
df60380793 | ||
|
969599b60b | ||
|
d7d196fe61 | ||
|
03b45162d9 | ||
|
dbc7443c9d | ||
|
8b0d25cde1 | ||
|
75f2152ae3 | ||
|
6c035a70af | ||
|
0335697664 | ||
|
1f261bcc70 | ||
|
6f01a81648 | ||
|
e226606ce8 | ||
|
6b2fb9dfc4 | ||
|
7d149dca0f | ||
|
a88711319c | ||
|
5bdb7bc3f0 | ||
|
81163321ad | ||
|
37a027af67 | ||
|
eace6e37b8 | ||
|
c931234e46 | ||
|
ea94bc7f6d | ||
|
dc41978743 | ||
|
9e3d54fec4 | ||
|
9e00d495c4 | ||
|
a00ec7af40 | ||
|
683119b835 | ||
|
887209bc24 | ||
|
55b23d2bdb | ||
|
34e1caa55a | ||
|
b21041d6f9 | ||
|
7be844351d | ||
|
9550b5f4a5 | ||
|
03f8ad323d | ||
|
6c3300c522 | ||
|
5345eadffa | ||
|
7f249ab7ca | ||
|
a5cbec7b3b | ||
|
00531fe2b0 | ||
|
185ebe3dcf | ||
|
f57bc81ce7 | ||
|
fa4f5a6ae9 | ||
|
256bd18120 | ||
|
a4f7f0634d | ||
|
e2e437dd95 | ||
|
e5bcb8b209 | ||
|
70f6b60d91 | ||
|
d8d3bb5033 | ||
|
6e46a926bb | ||
|
0b02a48a10 | ||
|
03c52f184e | ||
|
2ed08922d9 | ||
|
034542db8f | ||
|
f40805763e | ||
|
fac522d8dd | ||
|
7b2006a883 | ||
|
cebf255d64 | ||
|
391a63f9fa | ||
|
2d2f1b858e | ||
|
63c8cfe4c3 | ||
|
f7b39aaed4 | ||
|
7c65c0cea5 | ||
|
e78658d174 | ||
|
65aa9c11bc | ||
|
59034ca094 | ||
|
d40091a3cb | ||
|
af69e20cc8 | ||
|
ceef4b8362 | ||
|
6f002f4220 | ||
|
ae5e742a12 | ||
|
62f52c829a | ||
|
fcbb37fb66 | ||
|
c934974652 | ||
|
2c56599ca0 | ||
|
cc12a48c24 | ||
|
4cc167a6bd | ||
|
3d9c217627 | ||
|
28737f5c62 | ||
|
7487809476 | ||
|
16c5b3c340 | ||
|
6d94d6a681 | ||
|
5db0fd8846 | ||
|
28711e30ad | ||
|
5a03e29fca | ||
|
9086078ac7 | ||
|
73a279235a | ||
|
f30e0986b6 | ||
|
b16c8ba2e4 | ||
|
8ed4c573db | ||
|
4ac8320f3c | ||
|
d9525a0f27 | ||
|
6e4044b245 | ||
|
2c934d1cfd | ||
|
dc1509ee42 | ||
|
d45bc879c3 | ||
|
a058b7e982 | ||
|
7d1031feda | ||
|
ca263c05bb | ||
|
4cb85186b2 | ||
|
9a871bf3bc | ||
|
afae5b578e | ||
|
5317135416 | ||
|
7cc873a62a | ||
|
31bb6d54c7 | ||
|
23631dc8bb | ||
|
b1e7d70a86 | ||
|
583ad54d86 | ||
|
2ee0195eba | ||
|
0303920da7 | ||
|
df9a6e968e | ||
|
abe1463a73 | ||
|
c96c83e805 | ||
|
0f4371e84c | ||
|
0a8115b149 | ||
|
47a4d4986d | ||
|
5272fd8497 | ||
|
95761908b5 | ||
|
5234727886 | ||
|
5a1c3f7f19 | ||
|
4ee647a951 | ||
|
e8fcdc10a4 | ||
|
99128ab551 | ||
|
062ca285a0 | ||
|
465941b345 | ||
|
7e03ccfa46 | ||
|
9370bc4580 | ||
|
1f92e7acda | ||
|
a9d979e4d7 | ||
|
4108aa72ba | ||
|
4e876fbdba | ||
|
bd4d57c604 | ||
|
c79d1b0d2f | ||
|
fbda13c752 | ||
|
0f9a0ba9cd | ||
|
73d65fce9a | ||
|
1d62dc63f5 | ||
|
05f30740f5 | ||
|
97a89c3476 | ||
|
e0b5476e78 | ||
|
074457fa4e | ||
|
5fc6eaab17 | ||
|
70cdca5d3c | ||
|
8b4387a498 | ||
|
ced8657caa | ||
|
ece0cc5785 | ||
|
a85c080509 | ||
|
a4d68b9b7e | ||
|
ddf1f1c340 | ||
|
e3d2f09988 | ||
|
f819b6a210 | ||
|
1525324384 | ||
|
2c3464081f | ||
|
6a3802de4f | ||
|
a740f96f75 | ||
|
7ac2031cac | ||
|
21c1e66a85 | ||
|
f2cd7b0928 | ||
|
500b6244f8 | ||
|
1851d103f9 | ||
|
032546219c | ||
|
1173877167 | ||
|
cb41c51692 | ||
|
d38b8fa2c4 | ||
|
20a47034a5 | ||
|
01e33e7753 | ||
|
8482b37c14 | ||
|
f131047f1a | ||
|
8d8f6734de | ||
|
c006126d54 |
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: "\U0001F41B Bug report"
|
name: "\U0001F41B Bug report"
|
||||||
about: Bugs encountered while using Storj DCS or running a storage node.
|
about: Bugs encountered while using Storj or running a storage node.
|
||||||
title: ''
|
title: ''
|
||||||
labels: Bug
|
labels: Bug
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
10
Earthfile
10
Earthfile
@ -3,7 +3,7 @@ FROM golang:1.19
|
|||||||
WORKDIR /go/storj
|
WORKDIR /go/storj
|
||||||
|
|
||||||
multinode-web:
|
multinode-web:
|
||||||
FROM node:18
|
FROM node:18.17
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY web/multinode .
|
COPY web/multinode .
|
||||||
RUN ./build.sh
|
RUN ./build.sh
|
||||||
@ -21,7 +21,7 @@ wasm:
|
|||||||
SAVE ARTIFACT release/earthly/wasm wasm AS LOCAL web/satellite/static/wasm
|
SAVE ARTIFACT release/earthly/wasm wasm AS LOCAL web/satellite/static/wasm
|
||||||
|
|
||||||
storagenode-web:
|
storagenode-web:
|
||||||
FROM node:18
|
FROM node:18.17
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY web/storagenode .
|
COPY web/storagenode .
|
||||||
RUN ./build.sh
|
RUN ./build.sh
|
||||||
@ -29,16 +29,17 @@ storagenode-web:
|
|||||||
SAVE ARTIFACT static AS LOCAL web/storagenode/static
|
SAVE ARTIFACT static AS LOCAL web/storagenode/static
|
||||||
|
|
||||||
satellite-web:
|
satellite-web:
|
||||||
FROM node:18
|
FROM node:18.17
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY web/satellite .
|
COPY web/satellite .
|
||||||
RUN ./build.sh
|
RUN ./build.sh
|
||||||
COPY +wasm/wasm static/wasm
|
COPY +wasm/wasm static/wasm
|
||||||
SAVE ARTIFACT dist AS LOCAL web/satellite/dist
|
SAVE ARTIFACT dist AS LOCAL web/satellite/dist
|
||||||
|
SAVE ARTIFACT dist_vuetify_poc AS LOCAL web/satellite/dist_vuetify_poc
|
||||||
SAVE ARTIFACT static AS LOCAL web/satellite/static
|
SAVE ARTIFACT static AS LOCAL web/satellite/static
|
||||||
|
|
||||||
satellite-admin:
|
satellite-admin:
|
||||||
FROM node:16
|
FROM node:18.17
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY satellite/admin/ui .
|
COPY satellite/admin/ui .
|
||||||
RUN ./build.sh
|
RUN ./build.sh
|
||||||
@ -119,6 +120,7 @@ build-tagged-image:
|
|||||||
FROM img.dev.storj.io/storjup/base:20230208-1
|
FROM img.dev.storj.io/storjup/base:20230208-1
|
||||||
COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist
|
COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist
|
||||||
COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist
|
COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist
|
||||||
|
COPY +satellite-web/dist_vuetify_poc /var/lib/storj/storj/web/satellite/dist_vuetify_poc
|
||||||
COPY +satellite-admin/build /app/satellite-admin/
|
COPY +satellite-admin/build /app/satellite-admin/
|
||||||
COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static
|
COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static
|
||||||
COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist
|
COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist
|
||||||
|
37
Jenkinsfile
vendored
37
Jenkinsfile
vendored
@ -10,41 +10,6 @@ node('node') {
|
|||||||
|
|
||||||
echo "Current build result: ${currentBuild.result}"
|
echo "Current build result: ${currentBuild.result}"
|
||||||
}
|
}
|
||||||
if (env.BRANCH_NAME == "main") {
|
|
||||||
stage('Run Versions Test') {
|
|
||||||
lastStage = env.STAGE_NAME
|
|
||||||
try {
|
|
||||||
echo "Running Versions test"
|
|
||||||
|
|
||||||
env.STORJ_SIM_POSTGRES = 'postgres://postgres@postgres:5432/teststorj?sslmode=disable'
|
|
||||||
env.STORJ_SIM_REDIS = 'redis:6379'
|
|
||||||
|
|
||||||
echo "STORJ_SIM_POSTGRES: $STORJ_SIM_POSTGRES"
|
|
||||||
echo "STORJ_SIM_REDIS: $STORJ_SIM_REDIS"
|
|
||||||
sh 'docker run --rm -d -e POSTGRES_HOST_AUTH_METHOD=trust --name postgres-$BUILD_NUMBER postgres:12.3'
|
|
||||||
sh 'docker run --rm -d --name redis-$BUILD_NUMBER redis:latest'
|
|
||||||
|
|
||||||
sh '''until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to accept connections" > /dev/null)
|
|
||||||
do printf '.'
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
'''
|
|
||||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
|
||||||
// fetch the remote main branch
|
|
||||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
|
||||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.3'
|
|
||||||
}
|
|
||||||
catch(err){
|
|
||||||
throw err
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
sh 'docker stop postgres-$BUILD_NUMBER || true'
|
|
||||||
sh 'docker rm postgres-$BUILD_NUMBER || true'
|
|
||||||
sh 'docker stop redis-$BUILD_NUMBER || true'
|
|
||||||
sh 'docker rm redis-$BUILD_NUMBER || true'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Run Rolling Upgrade Test') {
|
stage('Run Rolling Upgrade Test') {
|
||||||
lastStage = env.STAGE_NAME
|
lastStage = env.STAGE_NAME
|
||||||
@ -69,7 +34,7 @@ node('node') {
|
|||||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||||
// fetch the remote main branch
|
// fetch the remote main branch
|
||||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS -e STORJ_MIGRATION_DB --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.3'
|
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS -e STORJ_MIGRATION_DB --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.21.3'
|
||||||
}
|
}
|
||||||
catch(err){
|
catch(err){
|
||||||
throw err
|
throw err
|
||||||
|
@ -229,50 +229,64 @@ pipeline {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stage('wasm npm') {
|
stage('Test Web') {
|
||||||
steps {
|
parallel {
|
||||||
dir(".build") {
|
stage('wasm npm') {
|
||||||
sh 'cp -r ../satellite/console/wasm/tests/ .'
|
steps {
|
||||||
sh 'cd tests && cp "$(go env GOROOT)/misc/wasm/wasm_exec.js" .'
|
dir(".build") {
|
||||||
sh 'cd tests && npm install && npm run test'
|
sh 'cp -r ../satellite/console/wasm/tests/ .'
|
||||||
|
sh 'cd tests && cp "$(go env GOROOT)/misc/wasm/wasm_exec.js" .'
|
||||||
|
sh 'cd tests && npm install && npm run test'
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('web/satellite') {
|
stage('web/satellite') {
|
||||||
steps {
|
steps {
|
||||||
dir("web/satellite") {
|
dir("web/satellite") {
|
||||||
sh 'npm run lint-ci'
|
sh 'npm run lint-ci'
|
||||||
sh script: 'npm audit', returnStatus: true
|
sh script: 'npm audit', returnStatus: true
|
||||||
sh 'npm run test'
|
sh 'npm run test'
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('web/storagenode') {
|
stage('web/storagenode') {
|
||||||
steps {
|
steps {
|
||||||
dir("web/storagenode") {
|
dir("web/storagenode") {
|
||||||
sh 'npm run lint-ci'
|
sh 'npm run lint-ci'
|
||||||
sh script: 'npm audit', returnStatus: true
|
sh script: 'npm audit', returnStatus: true
|
||||||
sh 'npm run test'
|
sh 'npm run test'
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('web/multinode') {
|
stage('web/multinode') {
|
||||||
steps {
|
steps {
|
||||||
dir("web/multinode") {
|
dir("web/multinode") {
|
||||||
sh 'npm run lint-ci'
|
sh 'npm run lint-ci'
|
||||||
sh script: 'npm audit', returnStatus: true
|
sh script: 'npm audit', returnStatus: true
|
||||||
sh 'npm run test'
|
sh 'npm run test'
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('satellite/admin/ui') {
|
stage('satellite/admin/ui') {
|
||||||
steps {
|
steps {
|
||||||
dir("satellite/admin/ui") {
|
dir("satellite/admin/ui") {
|
||||||
sh script: 'npm audit', returnStatus: true
|
sh script: 'npm audit', returnStatus: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('satellite/admin/back-office/ui') {
|
||||||
|
steps {
|
||||||
|
dir("satellite/admin/back-office/ui") {
|
||||||
|
sh 'npm install --prefer-offline --no-audit --loglevel verbose'
|
||||||
|
sh 'npm run lint-ci'
|
||||||
|
sh script: 'npm audit', returnStatus: true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,6 +125,7 @@ pipeline {
|
|||||||
sh 'check-atomic-align ./...'
|
sh 'check-atomic-align ./...'
|
||||||
sh 'check-monkit ./...'
|
sh 'check-monkit ./...'
|
||||||
sh 'check-errs ./...'
|
sh 'check-errs ./...'
|
||||||
|
sh 'check-deferloop ./...'
|
||||||
sh 'staticcheck ./...'
|
sh 'staticcheck ./...'
|
||||||
sh 'golangci-lint --config /go/ci/.golangci.yml -j=2 run'
|
sh 'golangci-lint --config /go/ci/.golangci.yml -j=2 run'
|
||||||
sh 'check-downgrades'
|
sh 'check-downgrades'
|
||||||
|
64
Jenkinsfile.versions
Normal file
64
Jenkinsfile.versions
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
def lastStage = ''
|
||||||
|
node('node') {
|
||||||
|
properties([disableConcurrentBuilds()])
|
||||||
|
try {
|
||||||
|
currentBuild.result = "SUCCESS"
|
||||||
|
|
||||||
|
stage('Checkout') {
|
||||||
|
lastStage = env.STAGE_NAME
|
||||||
|
checkout scm
|
||||||
|
|
||||||
|
echo "Current build result: ${currentBuild.result}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Run Versions Test') {
|
||||||
|
lastStage = env.STAGE_NAME
|
||||||
|
try {
|
||||||
|
echo "Running Versions test"
|
||||||
|
|
||||||
|
env.STORJ_SIM_POSTGRES = 'postgres://postgres@postgres:5432/teststorj?sslmode=disable'
|
||||||
|
env.STORJ_SIM_REDIS = 'redis:6379'
|
||||||
|
|
||||||
|
echo "STORJ_SIM_POSTGRES: $STORJ_SIM_POSTGRES"
|
||||||
|
echo "STORJ_SIM_REDIS: $STORJ_SIM_REDIS"
|
||||||
|
sh 'docker run --rm -d -e POSTGRES_HOST_AUTH_METHOD=trust --name postgres-$BUILD_NUMBER postgres:12.3'
|
||||||
|
sh 'docker run --rm -d --name redis-$BUILD_NUMBER redis:latest'
|
||||||
|
|
||||||
|
sh '''until $(docker logs postgres-$BUILD_NUMBER | grep "database system is ready to accept connections" > /dev/null)
|
||||||
|
do printf '.'
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
'''
|
||||||
|
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||||
|
// fetch the remote main branch
|
||||||
|
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||||
|
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.21.3'
|
||||||
|
}
|
||||||
|
catch(err){
|
||||||
|
throw err
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
sh 'docker stop postgres-$BUILD_NUMBER || true'
|
||||||
|
sh 'docker rm postgres-$BUILD_NUMBER || true'
|
||||||
|
sh 'docker stop redis-$BUILD_NUMBER || true'
|
||||||
|
sh 'docker rm redis-$BUILD_NUMBER || true'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (err) {
|
||||||
|
echo "Caught errors! ${err}"
|
||||||
|
echo "Setting build result to FAILURE"
|
||||||
|
currentBuild.result = "FAILURE"
|
||||||
|
|
||||||
|
slackSend color: 'danger', message: "@build-team ${env.BRANCH_NAME} build failed during stage ${lastStage} ${env.BUILD_URL}"
|
||||||
|
|
||||||
|
throw err
|
||||||
|
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
stage('Cleanup') {
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -33,7 +33,7 @@ Here we need to post changes for each topic(storj-sim, Uplink, Sattelite, Storag
|
|||||||
|
|
||||||
Then its time to cut the release branch:
|
Then its time to cut the release branch:
|
||||||
`git checkout -b v1.3` - will create and checkout branch v1.3
|
`git checkout -b v1.3` - will create and checkout branch v1.3
|
||||||
`git push origin v1.3`- will push release branch to the repo
|
`git push origin v1.3`- will push release branch to the repo\
|
||||||
Also we need to cut same release branch on tardigrade-satellite-theme repo
|
Also we need to cut same release branch on tardigrade-satellite-theme repo
|
||||||
`git checkout -b v1.3` - will create and checkout branch v1.3
|
`git checkout -b v1.3` - will create and checkout branch v1.3
|
||||||
`git push origin v1.3`- will push release branch to the repo
|
`git push origin v1.3`- will push release branch to the repo
|
||||||
@ -42,15 +42,22 @@ The next step is to create tag for `storj` repo using `tag-release.sh` which is
|
|||||||
Example:
|
Example:
|
||||||
`./scripts/tag-release.sh v1.3.0-rc`
|
`./scripts/tag-release.sh v1.3.0-rc`
|
||||||
`git push origin v1.3.0-rc`
|
`git push origin v1.3.0-rc`
|
||||||
Then verify that the Jenkins job of the build Storj V3 for such tag and branch has finished successfully.
|
Then verify that the Jenkins job of the build Storj V3 for such tag and branch has finished successfully.\
|
||||||
|
Pay attention to tardigrade-satellite-theme job - it should be successfully finished as well.
|
||||||
|
|
||||||
|
|
||||||
## How to cherry pick
|
## How to cherry pick
|
||||||
|
|
||||||
If you need to cherry-pick something after the release branch has been created then you need to create point release.
|
If you need to cherry-pick something after the release branch has been created then you need to create point release.
|
||||||
Make sure that you have the latest changes, checkout the release branch and execute cherry-pick:
|
Make sure that you have the latest changes, checkout the release branch and execute cherry-pick:
|
||||||
`git cherry-pick <your commit hash>`
|
```
|
||||||
You need to create pull request to the release branch with that commit. After the pull request will be approved and merged you should create new release tag:
|
git fetch
|
||||||
|
git checkout -b <xxx>/cherry-pick-v1.xx
|
||||||
|
git cherry-pick <your commit hash>
|
||||||
|
```
|
||||||
|
You need push and create pull request to the release branch with that commit.
|
||||||
|
`git push origin <xxx>/cherry-pick-v1.xx`
|
||||||
|
After the pull request will be approved, pass all tests and merged you should create new release tag:
|
||||||
`./scripts/tag-release.sh v1.3.1`
|
`./scripts/tag-release.sh v1.3.1`
|
||||||
and push the tag to the repo:
|
and push the tag to the repo:
|
||||||
`git push origin v1.3.1`
|
`git push origin v1.3.1`
|
||||||
@ -64,10 +71,25 @@ git push origin release-v1.3
|
|||||||
```
|
```
|
||||||
Update Jenkins job.
|
Update Jenkins job.
|
||||||
|
|
||||||
|
## Revert from release
|
||||||
|
|
||||||
|
If revert needed we proceed with next flow:
|
||||||
|
Ask developer to fix problem and push commit to main branch. After that cherry-pick fix to the release branch.
|
||||||
|
Why we do use this flow but not revert from the release branch? It's to prevent situation to fix bug in the main.
|
||||||
|
|
||||||
|
|
||||||
## Where to find the release binaries
|
## Where to find the release binaries
|
||||||
|
|
||||||
After Jenkins job for this release finished it will automaticaly post this tag on [GitHub release page](https://github.com/storj/storj/releases). The status will be `Draft`.
|
After Jenkins job for this release finished it will automaticaly post this tag on [GitHub release page](https://github.com/storj/storj/releases). The status will be `Draft`.
|
||||||
Update this tag with changelog that you previosly created.
|
Update this tag with changelog that you previously created.\
|
||||||
|
For now changelog is generated automatically, but binaries for darwin not. Darwin binaries should be generated manually and added to tag.\
|
||||||
|
Add New Contributors list to the release. To generate it:
|
||||||
|
`git shortlog -sn release-v1.2 | cut -f 2 > ../old.txt && git shortlog -sn release-v1.3 | cut -f 2 > ../new.txt && grep -Fxv -f ../old.txt ../new.txt`
|
||||||
|
Note, to run this command current and previous release should be on your local machine.
|
||||||
|
|
||||||
|
## Setting the 'Latest' release version
|
||||||
|
|
||||||
|
After 100% storagenodes rollout is finished -> new release should be set as 'Latest'.
|
||||||
|
|
||||||
## Which tests do we want to execute
|
## Which tests do we want to execute
|
||||||
Everything that could break production.
|
Everything that could break production.
|
||||||
|
20
Makefile
20
Makefile
@ -1,8 +1,8 @@
|
|||||||
GO_VERSION ?= 1.20.3
|
GO_VERSION ?= 1.21.3
|
||||||
GOOS ?= linux
|
GOOS ?= linux
|
||||||
GOARCH ?= amd64
|
GOARCH ?= amd64
|
||||||
GOPATH ?= $(shell go env GOPATH)
|
GOPATH ?= $(shell go env GOPATH)
|
||||||
NODE_VERSION ?= 16.11.1
|
NODE_VERSION ?= 18.17.0
|
||||||
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
|
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
|
||||||
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD | sed "s!/!-!g")
|
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD | sed "s!/!-!g")
|
||||||
GIT_TAG := $(shell git rev-parse --short HEAD)
|
GIT_TAG := $(shell git rev-parse --short HEAD)
|
||||||
@ -73,6 +73,8 @@ build-multinode-npm:
|
|||||||
cd web/multinode && npm ci
|
cd web/multinode && npm ci
|
||||||
build-satellite-admin-npm:
|
build-satellite-admin-npm:
|
||||||
cd satellite/admin/ui && npm ci
|
cd satellite/admin/ui && npm ci
|
||||||
|
# Temporary until the new back-office replaces the current admin API & UI
|
||||||
|
cd satellite/admin/back-office/ui && npm ci
|
||||||
|
|
||||||
##@ Simulator
|
##@ Simulator
|
||||||
|
|
||||||
@ -126,7 +128,7 @@ lint:
|
|||||||
-v ${GOPATH}/pkg:/go/pkg \
|
-v ${GOPATH}/pkg:/go/pkg \
|
||||||
-v ${PWD}:/storj \
|
-v ${PWD}:/storj \
|
||||||
-w /storj \
|
-w /storj \
|
||||||
storjlabs/ci-slim \
|
storjlabs/ci:slim \
|
||||||
make .lint LINT_TARGET="$(LINT_TARGET)"
|
make .lint LINT_TARGET="$(LINT_TARGET)"
|
||||||
|
|
||||||
.PHONY: .lint/testsuite/ui
|
.PHONY: .lint/testsuite/ui
|
||||||
@ -286,6 +288,14 @@ satellite-admin-ui:
|
|||||||
-u $(shell id -u):$(shell id -g) \
|
-u $(shell id -u):$(shell id -g) \
|
||||||
node:${NODE_VERSION} \
|
node:${NODE_VERSION} \
|
||||||
/bin/bash -c "npm ci && npm run build"
|
/bin/bash -c "npm ci && npm run build"
|
||||||
|
# Temporary until the new back-office replaces the current admin API & UI
|
||||||
|
docker run --rm -i \
|
||||||
|
--mount type=bind,src="${PWD}",dst=/go/src/storj.io/storj \
|
||||||
|
-w /go/src/storj.io/storj/satellite/admin/back-office/ui \
|
||||||
|
-e HOME=/tmp \
|
||||||
|
-u $(shell id -u):$(shell id -g) \
|
||||||
|
node:${NODE_VERSION} \
|
||||||
|
/bin/bash -c "npm ci && npm run build"
|
||||||
|
|
||||||
.PHONY: satellite-wasm
|
.PHONY: satellite-wasm
|
||||||
satellite-wasm:
|
satellite-wasm:
|
||||||
@ -464,7 +474,9 @@ binaries-upload: ## Upload binaries to Google Storage (jenkins)
|
|||||||
zip -r "$${zipname}.zip" "$${filename}" \
|
zip -r "$${zipname}.zip" "$${filename}" \
|
||||||
; fi \
|
; fi \
|
||||||
; done
|
; done
|
||||||
cd "release/${TAG}"; gsutil -m cp -r *.zip "gs://storj-v3-alpha-builds/${TAG}/"
|
cd "release/${TAG}" \
|
||||||
|
&& sha256sum *.zip > sha256sums \
|
||||||
|
&& gsutil -m cp -r *.zip sha256sums "gs://storj-v3-alpha-builds/${TAG}/"
|
||||||
|
|
||||||
.PHONY: draft-release
|
.PHONY: draft-release
|
||||||
draft-release:
|
draft-release:
|
||||||
|
@ -4,7 +4,11 @@
|
|||||||
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://pkg.go.dev/storj.io/storj)
|
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://pkg.go.dev/storj.io/storj)
|
||||||
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/main/cobertura)
|
[![Coverage Status](https://img.shields.io/badge/coverage-master-green.svg)](https://build.dev.storj.io/job/storj/job/main/cobertura)
|
||||||
|
|
||||||
<img src="https://github.com/storj/storj/raw/main/resources/logo.png" width="100">
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/storj/.github/assets/3217669/15b2f86d-e585-430f-83f8-67cccda07f73">
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://github.com/storj/.github/assets/3217669/de7657b7-0497-4b72-8d71-99bf210164dc">
|
||||||
|
<img alt="Storj logo" src="https://github.com/storj/.github/assets/3217669/de7657b7-0497-4b72-8d71-99bf210164dc" height="100">
|
||||||
|
</picture>
|
||||||
|
|
||||||
Storj is building a distributed cloud storage network.
|
Storj is building a distributed cloud storage network.
|
||||||
[Check out our white paper for more info!](https://storj.io/storj.pdf)
|
[Check out our white paper for more info!](https://storj.io/storj.pdf)
|
||||||
|
@ -62,8 +62,6 @@ func RunCommand(runCfg *Config) *cobra.Command {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("failed to load identity.", zap.Error(err))
|
log.Error("failed to load identity.", zap.Error(err))
|
||||||
|
@ -134,8 +134,6 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := getIdentity(ctx, &runCfg)
|
identity, err := getIdentity(ctx, &runCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("failed to load identity", zap.Error(err))
|
log.Error("failed to load identity", zap.Error(err))
|
||||||
|
@ -1,34 +1,51 @@
|
|||||||
ARG DOCKER_ARCH
|
ARG DOCKER_ARCH
|
||||||
# Satellite UI static asset generation
|
# Satellite UI static asset generation
|
||||||
FROM node:16.11.1 as ui
|
FROM node:18.17.0 as ui
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY web/satellite/ /app
|
COPY web/satellite/ /app
|
||||||
# Need to clean up (or ignore) local folders like node_modules, etc...
|
# Need to clean up (or ignore) local folders like node_modules, etc...
|
||||||
RUN npm install
|
RUN npm install
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
|
RUN npm run build-vuetify
|
||||||
|
|
||||||
# Fetch ca-certificates file for arch independent builds below
|
# Fetch ca-certificates file for arch independent builds below
|
||||||
FROM debian:buster-slim as ca-cert
|
FROM debian:buster-slim as ca-cert
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates
|
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates
|
||||||
RUN update-ca-certificates
|
RUN update-ca-certificates
|
||||||
|
|
||||||
|
# Install storj-up helper (for local/dev runs)
|
||||||
|
FROM --platform=$TARGETPLATFORM golang:1.19 AS storjup
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go install storj.io/storj-up@latest
|
||||||
|
|
||||||
|
# Install dlv (for local/dev runs)
|
||||||
|
FROM --platform=$TARGETPLATFORM golang:1.19 AS dlv
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go install github.com/go-delve/delve/cmd/dlv@latest
|
||||||
|
|
||||||
FROM ${DOCKER_ARCH:-amd64}/debian:buster-slim
|
FROM ${DOCKER_ARCH:-amd64}/debian:buster-slim
|
||||||
ARG TAG
|
ARG TAG
|
||||||
ARG GOARCH
|
ARG GOARCH
|
||||||
ENV GOARCH ${GOARCH}
|
ENV GOARCH ${GOARCH}
|
||||||
ENV CONF_PATH=/root/.local/share/storj/satellite \
|
ENV CONF_PATH=/root/.local/share/storj/satellite \
|
||||||
STORJ_CONSOLE_STATIC_DIR=/app \
|
STORJ_CONSOLE_STATIC_DIR=/app \
|
||||||
|
STORJ_MAIL_TEMPLATE_PATH=/app/static/emails \
|
||||||
STORJ_CONSOLE_ADDRESS=0.0.0.0:10100
|
STORJ_CONSOLE_ADDRESS=0.0.0.0:10100
|
||||||
|
ENV PATH=$PATH:/app
|
||||||
EXPOSE 7777
|
EXPOSE 7777
|
||||||
EXPOSE 10100
|
EXPOSE 10100
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY --from=ui /app/static /app/static
|
COPY --from=ui /app/static /app/static
|
||||||
COPY --from=ui /app/dist /app/dist
|
COPY --from=ui /app/dist /app/dist
|
||||||
|
COPY --from=ui /app/dist_vuetify_poc /app/dist_vuetify_poc
|
||||||
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||||
COPY release/${TAG}/wasm/access.wasm /app/static/wasm/
|
COPY release/${TAG}/wasm /app/static/wasm
|
||||||
COPY release/${TAG}/wasm/wasm_exec.js /app/static/wasm/
|
|
||||||
COPY release/${TAG}/wasm/access.wasm.br /app/static/wasm/
|
|
||||||
COPY release/${TAG}/wasm/wasm_exec.js.br /app/static/wasm/
|
|
||||||
COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite
|
COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite
|
||||||
|
COPY --from=storjup /go/bin/storj-up /usr/local/bin/storj-up
|
||||||
|
COPY --from=dlv /go/bin/dlv /usr/local/bin/dlv
|
||||||
|
# test identities for quick-start
|
||||||
|
COPY --from=img.dev.storj.io/storjup/base:20230607-1 /var/lib/storj/identities /var/lib/storj/identities
|
||||||
COPY cmd/satellite/entrypoint /entrypoint
|
COPY cmd/satellite/entrypoint /entrypoint
|
||||||
ENTRYPOINT ["/entrypoint"]
|
ENTRYPOINT ["/entrypoint"]
|
||||||
|
@ -11,6 +11,8 @@ import (
|
|||||||
"storj.io/private/process"
|
"storj.io/private/process"
|
||||||
"storj.io/private/version"
|
"storj.io/private/version"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
|
"storj.io/storj/satellite/accounting"
|
||||||
|
"storj.io/storj/satellite/accounting/live"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/satellitedb"
|
"storj.io/storj/satellite/satellitedb"
|
||||||
)
|
)
|
||||||
@ -19,8 +21,6 @@ func cmdAdminRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
@ -47,7 +47,21 @@ func cmdAdminRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
err = errs.Combine(err, metabaseDB.Close())
|
err = errs.Combine(err, metabaseDB.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
peer, err := satellite.NewAdmin(log, identity, db, metabaseDB, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
|
accountingCache, err := live.OpenCache(ctx, log.Named("live-accounting"), runCfg.LiveAccounting)
|
||||||
|
if err != nil {
|
||||||
|
if !accounting.ErrSystemOrNetError.Has(err) || accountingCache == nil {
|
||||||
|
return errs.New("Error instantiating live accounting cache: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warn("Unable to connect to live accounting cache. Verify connection",
|
||||||
|
zap.Error(err),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = errs.Combine(err, accountingCache.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
peer, err := satellite.NewAdmin(log, identity, db, metabaseDB, accountingCache, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,6 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
|
@ -21,8 +21,6 @@ func cmdAuditorRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
## production helpers
|
||||||
SETUP_PARAMS=""
|
SETUP_PARAMS=""
|
||||||
|
|
||||||
if [ -n "${IDENTITY_ADDR:-}" ]; then
|
if [ -n "${IDENTITY_ADDR:-}" ]; then
|
||||||
@ -21,6 +22,10 @@ if [ "${SATELLITE_API:-}" = "true" ]; then
|
|||||||
exec ./satellite run api $RUN_PARAMS "$@"
|
exec ./satellite run api $RUN_PARAMS "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "${SATELLITE_UI:-}" = "true" ]; then
|
||||||
|
exec ./satellite run ui $RUN_PARAMS "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "${SATELLITE_GC:-}" = "true" ]; then
|
if [ "${SATELLITE_GC:-}" = "true" ]; then
|
||||||
exec ./satellite run garbage-collection $RUN_PARAMS "$@"
|
exec ./satellite run garbage-collection $RUN_PARAMS "$@"
|
||||||
fi
|
fi
|
||||||
@ -37,4 +42,63 @@ if [ "${SATELLITE_AUDITOR:-}" = "true" ]; then
|
|||||||
exec ./satellite run auditor $RUN_PARAMS "$@"
|
exec ./satellite run auditor $RUN_PARAMS "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec ./satellite run $RUN_PARAMS "$@"
|
## storj-up helpers
|
||||||
|
if [ "${STORJUP_ROLE:-""}" ]; then
|
||||||
|
|
||||||
|
if [ "${STORJ_IDENTITY_DIR:-""}" ]; then
|
||||||
|
#Generate identity if missing
|
||||||
|
if [ ! -f "$STORJ_IDENTITY_DIR/identity.key" ]; then
|
||||||
|
if [ "$STORJ_USE_PREDEFINED_IDENTITY" ]; then
|
||||||
|
# use predictable, pre-generated identity
|
||||||
|
mkdir -p $(dirname $STORJ_IDENTITY_DIR)
|
||||||
|
cp -r /var/lib/storj/identities/$STORJ_USE_PREDEFINED_IDENTITY $STORJ_IDENTITY_DIR
|
||||||
|
else
|
||||||
|
identity --identity-dir $STORJ_IDENTITY_DIR --difficulty 8 create .
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${STORJ_WAIT_FOR_DB:-""}" ]; then
|
||||||
|
storj-up util wait-for-port cockroach:26257
|
||||||
|
storj-up util wait-for-port redis:6379
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${STORJUP_ROLE:-""}" == "satellite-api" ]; then
|
||||||
|
mkdir -p /var/lib/storj/.local
|
||||||
|
|
||||||
|
#only migrate first time
|
||||||
|
if [ ! -f "/var/lib/storj/.local/migrated" ]; then
|
||||||
|
satellite run migration --identity-dir $STORJ_IDENTITY_DIR
|
||||||
|
touch /var/lib/storj/.local/migrated
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# default config generated without arguments is misleading
|
||||||
|
rm /root/.local/share/storj/satellite/config.yaml
|
||||||
|
|
||||||
|
mkdir -p /var/lib/storj/.local/share/storj/satellite || true
|
||||||
|
|
||||||
|
if [ "${GO_DLV:-""}" ]; then
|
||||||
|
echo "Starting with go dlv"
|
||||||
|
|
||||||
|
#absolute file path is required
|
||||||
|
CMD=$(which $1)
|
||||||
|
shift
|
||||||
|
/usr/local/bin/dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec --check-go-version=false -- $CMD "$@"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# for backward compatibility reason, we use argument as command, only if it's an executable (and use it as satellite flags oterwise)
|
||||||
|
set +eo nounset
|
||||||
|
which "$1" > /dev/null
|
||||||
|
VALID_EXECUTABLE=$?
|
||||||
|
set -eo nounset
|
||||||
|
|
||||||
|
if [ $VALID_EXECUTABLE -eq 0 ]; then
|
||||||
|
# this is a full command (what storj-up uses)
|
||||||
|
exec "$@"
|
||||||
|
else
|
||||||
|
# legacy, run-only parameters
|
||||||
|
exec ./satellite run $RUN_PARAMS "$@"
|
||||||
|
fi
|
||||||
|
@ -20,8 +20,6 @@ func cmdGCBloomFilterRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
db, err := satellitedb.Open(ctx, log.Named("db"), runCfg.Database, satellitedb.Options{ApplicationName: "satellite-gc-bloomfilter"})
|
db, err := satellitedb.Open(ctx, log.Named("db"), runCfg.Database, satellitedb.Options{ApplicationName: "satellite-gc-bloomfilter"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("Error starting master database on satellite GC: %+v", err)
|
return errs.New("Error starting master database on satellite GC: %+v", err)
|
||||||
|
@ -20,8 +20,6 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
|
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
|
||||||
func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
func generateGracefulExitCSV(ctx context.Context, timeBased bool, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
||||||
db, err := satellitedb.Open(ctx, zap.L().Named("db"), reportsGracefulExitCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
db, err := satellitedb.Open(ctx, zap.L().Named("db"), reportsGracefulExitCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("error connecting to master database on satellite: %+v", err)
|
return errs.New("error connecting to master database on satellite: %+v", err)
|
||||||
@ -67,11 +67,14 @@ func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Tim
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
exitProgress, err := db.GracefulExit().GetProgress(ctx, id)
|
exitProgress := &gracefulexit.Progress{}
|
||||||
if gracefulexit.ErrNodeNotFound.Has(err) {
|
if !timeBased {
|
||||||
exitProgress = &gracefulexit.Progress{}
|
exitProgress, err = db.GracefulExit().GetProgress(ctx, id)
|
||||||
} else if err != nil {
|
if gracefulexit.ErrNodeNotFound.Has(err) {
|
||||||
return err
|
exitProgress = &gracefulexit.Progress{}
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exitStatus := node.ExitStatus
|
exitStatus := node.ExitStatus
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
"storj.io/storj/satellite/accounting/live"
|
"storj.io/storj/satellite/accounting/live"
|
||||||
"storj.io/storj/satellite/compensation"
|
"storj.io/storj/satellite/compensation"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/payments/stripe"
|
"storj.io/storj/satellite/payments/stripe"
|
||||||
"storj.io/storj/satellite/satellitedb"
|
"storj.io/storj/satellite/satellitedb"
|
||||||
)
|
)
|
||||||
@ -100,6 +100,11 @@ var (
|
|||||||
Short: "Run the satellite API",
|
Short: "Run the satellite API",
|
||||||
RunE: cmdAPIRun,
|
RunE: cmdAPIRun,
|
||||||
}
|
}
|
||||||
|
runUICmd = &cobra.Command{
|
||||||
|
Use: "ui",
|
||||||
|
Short: "Run the satellite UI",
|
||||||
|
RunE: cmdUIRun,
|
||||||
|
}
|
||||||
runRepairerCmd = &cobra.Command{
|
runRepairerCmd = &cobra.Command{
|
||||||
Use: "repair",
|
Use: "repair",
|
||||||
Short: "Run the repair service",
|
Short: "Run the repair service",
|
||||||
@ -221,6 +226,9 @@ var (
|
|||||||
Long: "Creates stripe invoice line items for stripe customer balances obtained from past invoices and other miscellaneous charges.",
|
Long: "Creates stripe invoice line items for stripe customer balances obtained from past invoices and other miscellaneous charges.",
|
||||||
RunE: cmdCreateCustomerBalanceInvoiceItems,
|
RunE: cmdCreateCustomerBalanceInvoiceItems,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aggregate = false
|
||||||
|
|
||||||
prepareCustomerInvoiceRecordsCmd = &cobra.Command{
|
prepareCustomerInvoiceRecordsCmd = &cobra.Command{
|
||||||
Use: "prepare-invoice-records [period]",
|
Use: "prepare-invoice-records [period]",
|
||||||
Short: "Prepares invoice project records",
|
Short: "Prepares invoice project records",
|
||||||
@ -235,6 +243,13 @@ var (
|
|||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.ExactArgs(1),
|
||||||
RunE: cmdCreateCustomerProjectInvoiceItems,
|
RunE: cmdCreateCustomerProjectInvoiceItems,
|
||||||
}
|
}
|
||||||
|
createCustomerAggregatedProjectInvoiceItemsCmd = &cobra.Command{
|
||||||
|
Use: "create-aggregated-project-invoice-items [period]",
|
||||||
|
Short: "Creates aggregated stripe invoice line items for project charges",
|
||||||
|
Long: "Creates aggregated stripe invoice line items for not consumed project records.",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: cmdCreateAggregatedCustomerProjectInvoiceItems,
|
||||||
|
}
|
||||||
createCustomerInvoicesCmd = &cobra.Command{
|
createCustomerInvoicesCmd = &cobra.Command{
|
||||||
Use: "create-invoices [period]",
|
Use: "create-invoices [period]",
|
||||||
Short: "Creates stripe invoices from pending invoice items",
|
Short: "Creates stripe invoices from pending invoice items",
|
||||||
@ -255,12 +270,33 @@ var (
|
|||||||
Long: "Finalizes all draft stripe invoices known to satellite's stripe account.",
|
Long: "Finalizes all draft stripe invoices known to satellite's stripe account.",
|
||||||
RunE: cmdFinalizeCustomerInvoices,
|
RunE: cmdFinalizeCustomerInvoices,
|
||||||
}
|
}
|
||||||
payCustomerInvoicesCmd = &cobra.Command{
|
payInvoicesWithTokenCmd = &cobra.Command{
|
||||||
|
Use: "pay-customer-invoices",
|
||||||
|
Short: "pay open finalized invoices for customer",
|
||||||
|
Long: "attempts payment on any open finalized invoices for a specific user.",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: cmdPayCustomerInvoices,
|
||||||
|
}
|
||||||
|
payAllInvoicesCmd = &cobra.Command{
|
||||||
Use: "pay-invoices",
|
Use: "pay-invoices",
|
||||||
Short: "pay finalized invoices",
|
Short: "pay finalized invoices",
|
||||||
Long: "attempts payment on all open finalized invoices according to subscriptions settings.",
|
Long: "attempts payment on all open finalized invoices according to subscriptions settings.",
|
||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.ExactArgs(1),
|
||||||
RunE: cmdPayCustomerInvoices,
|
RunE: cmdPayAllInvoices,
|
||||||
|
}
|
||||||
|
failPendingInvoiceTokenPaymentCmd = &cobra.Command{
|
||||||
|
Use: "fail-token-payment",
|
||||||
|
Short: "fail pending invoice token payment",
|
||||||
|
Long: "attempts to transition the token invoice payments that are stuck in a pending state to failed.",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: cmdFailPendingInvoiceTokenPayments,
|
||||||
|
}
|
||||||
|
completePendingInvoiceTokenPaymentCmd = &cobra.Command{
|
||||||
|
Use: "complete-token-payment",
|
||||||
|
Short: "complete pending invoice token payment",
|
||||||
|
Long: "attempts to transition the token invoice payments that are stuck in a pending state to complete.",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: cmdCompletePendingInvoiceTokenPayments,
|
||||||
}
|
}
|
||||||
stripeCustomerCmd = &cobra.Command{
|
stripeCustomerCmd = &cobra.Command{
|
||||||
Use: "ensure-stripe-customer",
|
Use: "ensure-stripe-customer",
|
||||||
@ -342,6 +378,7 @@ var (
|
|||||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||||
Output string `help:"destination of report output" default:""`
|
Output string `help:"destination of report output" default:""`
|
||||||
Completed bool `help:"whether to output (initiated and completed) or (initiated and not completed)" default:"false"`
|
Completed bool `help:"whether to output (initiated and completed) or (initiated and not completed)" default:"false"`
|
||||||
|
TimeBased bool `help:"whether the satellite is using time-based graceful exit (and thus, whether to include piece transfer progress in output)" default:"false"`
|
||||||
}
|
}
|
||||||
reportsVerifyGracefulExitReceiptCfg struct {
|
reportsVerifyGracefulExitReceiptCfg struct {
|
||||||
}
|
}
|
||||||
@ -366,6 +403,7 @@ func init() {
|
|||||||
rootCmd.AddCommand(runCmd)
|
rootCmd.AddCommand(runCmd)
|
||||||
runCmd.AddCommand(runMigrationCmd)
|
runCmd.AddCommand(runMigrationCmd)
|
||||||
runCmd.AddCommand(runAPICmd)
|
runCmd.AddCommand(runAPICmd)
|
||||||
|
runCmd.AddCommand(runUICmd)
|
||||||
runCmd.AddCommand(runAdminCmd)
|
runCmd.AddCommand(runAdminCmd)
|
||||||
runCmd.AddCommand(runRepairerCmd)
|
runCmd.AddCommand(runRepairerCmd)
|
||||||
runCmd.AddCommand(runAuditorCmd)
|
runCmd.AddCommand(runAuditorCmd)
|
||||||
@ -394,16 +432,23 @@ func init() {
|
|||||||
billingCmd.AddCommand(setInvoiceStatusCmd)
|
billingCmd.AddCommand(setInvoiceStatusCmd)
|
||||||
billingCmd.AddCommand(createCustomerBalanceInvoiceItemsCmd)
|
billingCmd.AddCommand(createCustomerBalanceInvoiceItemsCmd)
|
||||||
billingCmd.AddCommand(prepareCustomerInvoiceRecordsCmd)
|
billingCmd.AddCommand(prepareCustomerInvoiceRecordsCmd)
|
||||||
|
prepareCustomerInvoiceRecordsCmd.Flags().BoolVar(&aggregate, "aggregate", false, "Used to enable creation of to be aggregated project records in case users have many projects (more than 83).")
|
||||||
billingCmd.AddCommand(createCustomerProjectInvoiceItemsCmd)
|
billingCmd.AddCommand(createCustomerProjectInvoiceItemsCmd)
|
||||||
|
billingCmd.AddCommand(createCustomerAggregatedProjectInvoiceItemsCmd)
|
||||||
billingCmd.AddCommand(createCustomerInvoicesCmd)
|
billingCmd.AddCommand(createCustomerInvoicesCmd)
|
||||||
billingCmd.AddCommand(generateCustomerInvoicesCmd)
|
billingCmd.AddCommand(generateCustomerInvoicesCmd)
|
||||||
|
generateCustomerInvoicesCmd.Flags().BoolVar(&aggregate, "aggregate", false, "Used to enable invoice items aggregation in case users have many projects (more than 83).")
|
||||||
billingCmd.AddCommand(finalizeCustomerInvoicesCmd)
|
billingCmd.AddCommand(finalizeCustomerInvoicesCmd)
|
||||||
billingCmd.AddCommand(payCustomerInvoicesCmd)
|
billingCmd.AddCommand(payInvoicesWithTokenCmd)
|
||||||
|
billingCmd.AddCommand(payAllInvoicesCmd)
|
||||||
|
billingCmd.AddCommand(failPendingInvoiceTokenPaymentCmd)
|
||||||
|
billingCmd.AddCommand(completePendingInvoiceTokenPaymentCmd)
|
||||||
billingCmd.AddCommand(stripeCustomerCmd)
|
billingCmd.AddCommand(stripeCustomerCmd)
|
||||||
consistencyCmd.AddCommand(consistencyGECleanupCmd)
|
consistencyCmd.AddCommand(consistencyGECleanupCmd)
|
||||||
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
|
process.Bind(runUICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(runAdminCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runAdminCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(runRepairerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runRepairerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(runAuditorCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(runAuditorCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
@ -429,10 +474,14 @@ func init() {
|
|||||||
process.Bind(createCustomerBalanceInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(createCustomerBalanceInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(createCustomerProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(createCustomerProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
|
process.Bind(createCustomerAggregatedProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(generateCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(generateCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(payCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(payInvoicesWithTokenCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
|
process.Bind(payAllInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
|
process.Bind(failPendingInvoiceTokenPaymentCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
|
process.Bind(completePendingInvoiceTokenPaymentCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
process.Bind(fixLastNetsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
process.Bind(fixLastNetsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||||
@ -448,8 +497,6 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
@ -644,7 +691,7 @@ func cmdReportsGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
|||||||
|
|
||||||
// send output to stdout
|
// send output to stdout
|
||||||
if reportsGracefulExitCfg.Output == "" {
|
if reportsGracefulExitCfg.Output == "" {
|
||||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, os.Stdout)
|
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.TimeBased, reportsGracefulExitCfg.Completed, start, end, os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// send output to file
|
// send output to file
|
||||||
@ -657,7 +704,7 @@ func cmdReportsGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
|||||||
err = errs.Combine(err, file.Close())
|
err = errs.Combine(err, file.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, file)
|
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.TimeBased, reportsGracefulExitCfg.Completed, start, end, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdNodeUsage(cmd *cobra.Command, args []string) (err error) {
|
func cmdNodeUsage(cmd *cobra.Command, args []string) (err error) {
|
||||||
@ -808,7 +855,7 @@ func cmdPrepareCustomerInvoiceRecords(cmd *cobra.Command, args []string) (err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
return payments.PrepareInvoiceProjectRecords(ctx, periodStart)
|
return payments.PrepareInvoiceProjectRecords(ctx, periodStart, aggregate)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -825,6 +872,19 @@ func cmdCreateCustomerProjectInvoiceItems(cmd *cobra.Command, args []string) (er
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmdCreateAggregatedCustomerProjectInvoiceItems(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
|
periodStart, err := parseYearMonth(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
|
return payments.InvoiceApplyToBeAggregatedProjectRecords(ctx, periodStart)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func cmdCreateCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
func cmdCreateCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
@ -847,7 +907,7 @@ func cmdGenerateCustomerInvoices(cmd *cobra.Command, args []string) (err error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
return payments.GenerateInvoices(ctx, periodStart)
|
return payments.GenerateInvoices(ctx, periodStart, aggregate)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,6 +922,18 @@ func cmdFinalizeCustomerInvoices(cmd *cobra.Command, args []string) (err error)
|
|||||||
func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
|
err := payments.InvoiceApplyCustomerTokenBalance(ctx, args[0])
|
||||||
|
if err != nil {
|
||||||
|
return errs.New("error applying native token payments to invoice for customer: %v", err)
|
||||||
|
}
|
||||||
|
return payments.PayCustomerInvoices(ctx, args[0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdPayAllInvoices(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
periodStart, err := parseYearMonth(args[0])
|
periodStart, err := parseYearMonth(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -876,6 +948,20 @@ func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmdFailPendingInvoiceTokenPayments(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
|
return payments.FailPendingInvoiceTokenPayments(ctx, strings.Split(args[0], ","))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdCompletePendingInvoiceTokenPayments(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||||
|
return payments.CompletePendingInvoiceTokenPayments(ctx, strings.Split(args[0], ","))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func cmdStripeCustomer(cmd *cobra.Command, args []string) (err error) {
|
func cmdStripeCustomer(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
@ -885,6 +971,9 @@ func cmdStripeCustomer(cmd *cobra.Command, args []string) (err error) {
|
|||||||
func cmdConsistencyGECleanup(cmd *cobra.Command, args []string) error {
|
func cmdConsistencyGECleanup(cmd *cobra.Command, args []string) error {
|
||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
|
||||||
|
if runCfg.GracefulExit.TimeBased {
|
||||||
|
return errs.New("this command is not supported with time-based graceful exit")
|
||||||
|
}
|
||||||
before, err := time.Parse("2006-01-02", consistencyGECleanupCfg.Before)
|
before, err := time.Parse("2006-01-02", consistencyGECleanupCfg.Before)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("before flag value isn't of the expected format. %+v", err)
|
return errs.New("before flag value isn't of the expected format. %+v", err)
|
||||||
@ -932,7 +1021,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
successes := new(int64)
|
successes := new(int64)
|
||||||
failures := new(int64)
|
failures := new(int64)
|
||||||
|
|
||||||
undelete := func(node *overlay.SelectedNode) {
|
undelete := func(node *nodeselection.SelectedNode) {
|
||||||
log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
|
log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
@ -966,9 +1055,9 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
|
log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodes []*overlay.SelectedNode
|
var nodes []*nodeselection.SelectedNode
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *overlay.SelectedNode) error {
|
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *nodeselection.SelectedNode) error {
|
||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -985,7 +1074,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
nodes = append(nodes, &overlay.SelectedNode{
|
nodes = append(nodes, &nodeselection.SelectedNode{
|
||||||
ID: dossier.Id,
|
ID: dossier.Id,
|
||||||
Address: dossier.Address,
|
Address: dossier.Address,
|
||||||
LastNet: dossier.LastNet,
|
LastNet: dossier.LastNet,
|
||||||
|
@ -18,8 +18,6 @@ func cmdRangedLoopRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
db, err := satellitedb.Open(ctx, log.Named("db"), runCfg.Database, satellitedb.Options{ApplicationName: "satellite-rangedloop"})
|
db, err := satellitedb.Open(ctx, log.Named("db"), runCfg.Database, satellitedb.Options{ApplicationName: "satellite-rangedloop"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("Error starting master database on satellite rangedloop: %+v", err)
|
return errs.New("Error starting master database on satellite rangedloop: %+v", err)
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/vivint/infectious"
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@ -94,7 +93,12 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
|||||||
|
|
||||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||||
|
|
||||||
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
|
placement, err := config.Placement.Parse()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -102,8 +106,9 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
|||||||
orders, err := orders.NewService(
|
orders, err := orders.NewService(
|
||||||
log.Named("orders"),
|
log.Named("orders"),
|
||||||
signing.SignerFromFullIdentity(identity),
|
signing.SignerFromFullIdentity(identity),
|
||||||
overlay,
|
overlayService,
|
||||||
orders.NewNoopDB(),
|
orders.NewNoopDB(),
|
||||||
|
placement.CreateFilters,
|
||||||
config.Orders,
|
config.Orders,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -122,9 +127,10 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
|||||||
log.Named("segment-repair"),
|
log.Named("segment-repair"),
|
||||||
metabaseDB,
|
metabaseDB,
|
||||||
orders,
|
orders,
|
||||||
overlay,
|
overlayService,
|
||||||
nil, // TODO add noop version
|
nil, // TODO add noop version
|
||||||
ecRepairer,
|
ecRepairer,
|
||||||
|
placement.CreateFilters,
|
||||||
config.Checker.RepairOverrides,
|
config.Checker.RepairOverrides,
|
||||||
config.Repairer,
|
config.Repairer,
|
||||||
)
|
)
|
||||||
@ -132,7 +138,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
|||||||
// TODO reorganize to avoid using peer.
|
// TODO reorganize to avoid using peer.
|
||||||
|
|
||||||
peer := &satellite.Repairer{}
|
peer := &satellite.Repairer{}
|
||||||
peer.Overlay = overlay
|
peer.Overlay = overlayService
|
||||||
peer.Orders.Service = orders
|
peer.Orders.Service = orders
|
||||||
peer.EcRepairer = ecRepairer
|
peer.EcRepairer = ecRepairer
|
||||||
peer.SegmentRepairer = segmentRepairer
|
peer.SegmentRepairer = segmentRepairer
|
||||||
@ -274,10 +280,8 @@ func reuploadSegment(ctx context.Context, log *zap.Logger, peer *satellite.Repai
|
|||||||
return errs.New("not enough new nodes were found for repair: min %v got %v", redundancy.RepairThreshold(), len(newNodes))
|
return errs.New("not enough new nodes were found for repair: min %v got %v", redundancy.RepairThreshold(), len(newNodes))
|
||||||
}
|
}
|
||||||
|
|
||||||
optimalThresholdMultiplier := float64(1) // is this value fine?
|
|
||||||
numHealthyInExcludedCountries := 0
|
|
||||||
putLimits, putPrivateKey, err := peer.Orders.Service.CreatePutRepairOrderLimits(ctx, segment, make([]*pb.AddressedOrderLimit, len(newNodes)),
|
putLimits, putPrivateKey, err := peer.Orders.Service.CreatePutRepairOrderLimits(ctx, segment, make([]*pb.AddressedOrderLimit, len(newNodes)),
|
||||||
make(map[int32]struct{}), newNodes, optimalThresholdMultiplier, numHealthyInExcludedCountries)
|
make(map[uint16]struct{}), newNodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errs.New("could not create PUT_REPAIR order limits: %w", err)
|
return errs.New("could not create PUT_REPAIR order limits: %w", err)
|
||||||
}
|
}
|
||||||
@ -376,7 +380,7 @@ func downloadSegment(ctx context.Context, log *zap.Logger, peer *satellite.Repai
|
|||||||
len(pieceReaders), redundancy.RequiredCount())
|
len(pieceReaders), redundancy.RequiredCount())
|
||||||
}
|
}
|
||||||
|
|
||||||
fec, err := infectious.NewFEC(redundancy.RequiredCount(), redundancy.TotalCount())
|
fec, err := eestream.NewFEC(redundancy.RequiredCount(), redundancy.TotalCount())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, failedDownloads, err
|
return nil, failedDownloads, err
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,6 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
identity, err := runCfg.Identity.Load()
|
identity, err := runCfg.Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to load identity.", zap.Error(err))
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
|
45
cmd/satellite/ui.go
Normal file
45
cmd/satellite/ui.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/private/process"
|
||||||
|
"storj.io/storj/satellite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func cmdUIRun(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
log := zap.L()
|
||||||
|
|
||||||
|
identity, err := runCfg.Identity.Load()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to load identity.", zap.Error(err))
|
||||||
|
return errs.New("Failed to load identity: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
satAddr := runCfg.Config.Contact.ExternalAddress
|
||||||
|
if satAddr == "" {
|
||||||
|
return errs.New("cannot run satellite ui if contact.external-address is not set")
|
||||||
|
}
|
||||||
|
apiAddress := runCfg.Config.Console.ExternalAddress
|
||||||
|
if apiAddress == "" {
|
||||||
|
apiAddress = runCfg.Config.Console.Address
|
||||||
|
}
|
||||||
|
peer, err := satellite.NewUI(log, identity, &runCfg.Config, process.AtomicLevel(cmd), satAddr, apiAddress)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := process.InitMetricsWithHostname(ctx, log, nil); err != nil {
|
||||||
|
log.Warn("Failed to initialize telemetry batcher on satellite api", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
runError := peer.Run(ctx)
|
||||||
|
closeError := peer.Close()
|
||||||
|
return errs.Combine(runError, closeError)
|
||||||
|
}
|
243
cmd/storagenode/cmd_forget_satellite.go
Normal file
243
cmd/storagenode/cmd_forget_satellite.go
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/common/storj"
|
||||||
|
"storj.io/private/cfgstruct"
|
||||||
|
"storj.io/private/process"
|
||||||
|
"storj.io/storj/storagenode"
|
||||||
|
"storj.io/storj/storagenode/pieces"
|
||||||
|
"storj.io/storj/storagenode/satellites"
|
||||||
|
"storj.io/storj/storagenode/storagenodedb"
|
||||||
|
"storj.io/storj/storagenode/trust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// runCfg defines configuration for run command.
|
||||||
|
type forgetSatelliteCfg struct {
|
||||||
|
storagenode.Config
|
||||||
|
|
||||||
|
SatelliteIDs []string `internal:"true"`
|
||||||
|
|
||||||
|
AllUntrusted bool `help:"Clean up all untrusted satellites" default:"false"`
|
||||||
|
Force bool `help:"Force removal of satellite data if not listed in satelliteDB cache or marked as untrusted" default:"false"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newForgetSatelliteCmd(f *Factory) *cobra.Command {
|
||||||
|
var cfg forgetSatelliteCfg
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "forget-satellite [satellite_IDs...]",
|
||||||
|
Short: "Remove an untrusted satellite from the trust cache and clean up its data",
|
||||||
|
Long: "Forget a satellite.\n" +
|
||||||
|
"The command shows the list of the available untrusted satellites " +
|
||||||
|
"and removes the selected satellites from the trust cache and clean up the available data",
|
||||||
|
Example: `
|
||||||
|
# Specify satellite ID to forget
|
||||||
|
$ storagenode forget-satellite --identity-dir /path/to/identityDir --config-dir /path/to/configDir satellite_ID
|
||||||
|
|
||||||
|
# Specify multiple satellite IDs to forget
|
||||||
|
$ storagenode forget-satellite satellite_ID1 satellite_ID2 --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||||
|
|
||||||
|
# Clean up all untrusted satellites
|
||||||
|
# This checks for untrusted satellites in both the satelliteDB cache and the excluded satellites list
|
||||||
|
# specified in the config.yaml file
|
||||||
|
$ storagenode forget-satellite --all-untrusted --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||||
|
|
||||||
|
# For force removal of data for untrusted satellites that are not listed in satelliteDB cache or marked as untrusted
|
||||||
|
$ storagenode forget-satellite satellite_ID1 satellite_ID2 --force --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||||
|
`,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
cfg.SatelliteIDs = args
|
||||||
|
if len(args) > 0 && cfg.AllUntrusted {
|
||||||
|
return errs.New("cannot specify both satellite IDs and --all-untrusted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) == 0 && !cfg.AllUntrusted {
|
||||||
|
return errs.New("must specify either satellite ID(s) as arguments or --all-untrusted flag")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.AllUntrusted && cfg.Force {
|
||||||
|
return errs.New("cannot specify both --all-untrusted and --force")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
return cmdForgetSatellite(ctx, zap.L(), &cfg)
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{"type": "helper"},
|
||||||
|
}
|
||||||
|
|
||||||
|
process.Bind(cmd, &cfg, f.Defaults, cfgstruct.ConfDir(f.ConfDir), cfgstruct.IdentityDir(f.IdentityDir))
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdForgetSatellite(ctx context.Context, log *zap.Logger, cfg *forgetSatelliteCfg) (err error) {
|
||||||
|
// we don't really need the identity, but we load it as a sanity check
|
||||||
|
ident, err := cfg.Identity.Load()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Failed to load identity.", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
log.Info("Identity loaded.", zap.Stringer("Node ID", ident.ID))
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := storagenodedb.OpenExisting(ctx, log.Named("db"), cfg.DatabaseConfig())
|
||||||
|
if err != nil {
|
||||||
|
return errs.New("Error starting master database on storagenode: %+v", err)
|
||||||
|
}
|
||||||
|
defer func() { err = errs.Combine(err, db.Close()) }()
|
||||||
|
|
||||||
|
satelliteDB := db.Satellites()
|
||||||
|
|
||||||
|
// get list of excluded satellites
|
||||||
|
excludedSatellites := make(map[storj.NodeID]bool)
|
||||||
|
for _, rule := range cfg.Storage2.Trust.Exclusions.Rules {
|
||||||
|
url, err := trust.ParseSatelliteURL(rule.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Failed to parse satellite URL from exclusions list", zap.Error(err), zap.String("rule", rule.String()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
excludedSatellites[url.ID] = false // false means the satellite has not been cleaned up yet.
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cfg.SatelliteIDs) > 0 {
|
||||||
|
for _, satelliteIDStr := range cfg.SatelliteIDs {
|
||||||
|
satelliteID, err := storj.NodeIDFromString(satelliteIDStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
satellite := satellites.Satellite{
|
||||||
|
SatelliteID: satelliteID,
|
||||||
|
Status: satellites.Untrusted,
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if satellite is excluded
|
||||||
|
cleanedUp, isExcluded := excludedSatellites[satelliteID]
|
||||||
|
if !isExcluded {
|
||||||
|
sat, err := satelliteDB.GetSatellite(ctx, satelliteID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !satellite.SatelliteID.IsZero() {
|
||||||
|
satellite = sat
|
||||||
|
}
|
||||||
|
if satellite.SatelliteID.IsZero() && !cfg.Force {
|
||||||
|
return errs.New("satellite %v not found. Specify --force to force data deletion", satelliteID)
|
||||||
|
}
|
||||||
|
log.Warn("Satellite not found in satelliteDB cache. Forcing removal of satellite data.", zap.Stringer("satelliteID", satelliteID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if cleanedUp {
|
||||||
|
log.Warn("Satellite already cleaned up", zap.Stringer("satelliteID", satelliteID))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sats, err := satelliteDB.GetSatellites(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
hasUntrusted := false
|
||||||
|
for _, satellite := range sats {
|
||||||
|
if satellite.Status != satellites.Untrusted {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hasUntrusted = true
|
||||||
|
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
excludedSatellites[satellite.SatelliteID] = true // true means the satellite has been cleaned up.
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean up excluded satellites that might not be in the satelliteDB cache.
|
||||||
|
for satelliteID, cleanedUp := range excludedSatellites {
|
||||||
|
if !cleanedUp {
|
||||||
|
satellite := satellites.Satellite{
|
||||||
|
SatelliteID: satelliteID,
|
||||||
|
Status: satellites.Untrusted,
|
||||||
|
}
|
||||||
|
hasUntrusted = true
|
||||||
|
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasUntrusted {
|
||||||
|
log.Info("No untrusted satellites found. You can add satellites to the exclusions list in the config.yaml file.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupSatellite(ctx context.Context, log *zap.Logger, cfg *forgetSatelliteCfg, db *storagenodedb.DB, satellite satellites.Satellite) error {
|
||||||
|
if satellite.Status != satellites.Untrusted && !cfg.Force {
|
||||||
|
log.Error("Satellite is not untrusted. Skipping", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Removing satellite from trust cache.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
cache, err := trust.LoadCache(cfg.Storage2.Trust.CachePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
deleted := cache.DeleteSatelliteEntry(satellite.SatelliteID)
|
||||||
|
if deleted {
|
||||||
|
if err := cache.Save(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Satellite removed from trust cache.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Cleaning up satellite data.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
blobs := pieces.NewBlobsUsageCache(log.Named("blobscache"), db.Pieces())
|
||||||
|
if err := blobs.DeleteNamespace(ctx, satellite.SatelliteID.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Cleaning up the trash.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
err = blobs.DeleteTrashNamespace(ctx, satellite.SatelliteID.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Removing satellite info from reputation DB.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
err = db.Reputation().Delete(ctx, satellite.SatelliteID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete v0 pieces for the satellite, if any.
|
||||||
|
log.Info("Removing satellite v0 pieces if any.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
err = db.V0PieceInfo().WalkSatelliteV0Pieces(ctx, db.Pieces(), satellite.SatelliteID, func(access pieces.StoredPieceAccess) error {
|
||||||
|
return db.Pieces().Delete(ctx, access.BlobRef())
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Removing satellite from satellites DB.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||||
|
err = db.Satellites().DeleteSatellite(ctx, satellite.SatelliteID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
254
cmd/storagenode/cmd_forget_satellite_test.go
Normal file
254
cmd/storagenode/cmd_forget_satellite_test.go
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
|
||||||
|
"storj.io/common/identity"
|
||||||
|
"storj.io/common/memory"
|
||||||
|
"storj.io/common/testcontext"
|
||||||
|
"storj.io/common/testrand"
|
||||||
|
"storj.io/storj/private/testplanet"
|
||||||
|
"storj.io/storj/storagenode/blobstore"
|
||||||
|
"storj.io/storj/storagenode/blobstore/filestore"
|
||||||
|
"storj.io/storj/storagenode/reputation"
|
||||||
|
"storj.io/storj/storagenode/satellites"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_newForgetSatelliteCmd_Error(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args string
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no args",
|
||||||
|
args: "",
|
||||||
|
wantErr: "must specify either satellite ID(s) as arguments or --all-untrusted flag",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Both satellite ID and --all-untrusted flag specified",
|
||||||
|
args: "--all-untrusted 1234567890123456789012345678901234567890123456789012345678901234",
|
||||||
|
wantErr: "cannot specify both satellite IDs and --all-untrusted",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "--all-untrusted and --force specified",
|
||||||
|
args: "--all-untrusted --force",
|
||||||
|
wantErr: "cannot specify both --all-untrusted and --force",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cmd := newForgetSatelliteCmd(&Factory{})
|
||||||
|
cmd.SetArgs(strings.Fields(tt.args))
|
||||||
|
err := cmd.ExecuteContext(testcontext.New(t))
|
||||||
|
if tt.wantErr == "" {
|
||||||
|
require.NoError(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Equal(t, tt.wantErr, err.Error())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_cmdForgetSatellite(t *testing.T) {
|
||||||
|
t.Skip("The tests and the behavior is currently flaky. See https://github.com/storj/storj/issues/6465")
|
||||||
|
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 2, StorageNodeCount: 1, UplinkCount: 0,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
address := planet.StorageNodes[0].Server.PrivateAddr().String()
|
||||||
|
db := planet.StorageNodes[0].DB
|
||||||
|
log := zaptest.NewLogger(t)
|
||||||
|
|
||||||
|
store, err := filestore.NewAt(log, db.Config().Pieces, filestore.DefaultConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(store.Close)
|
||||||
|
|
||||||
|
satelliteID := planet.Satellites[0].ID()
|
||||||
|
|
||||||
|
blobSize := memory.KB
|
||||||
|
blobRef := blobstore.BlobRef{
|
||||||
|
Namespace: satelliteID.Bytes(),
|
||||||
|
Key: testrand.PieceID().Bytes(),
|
||||||
|
}
|
||||||
|
w, err := store.Create(ctx, blobRef, -1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(testrand.Bytes(blobSize))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Commit(ctx))
|
||||||
|
|
||||||
|
// create a new satellite reputation
|
||||||
|
timestamp := time.Now().UTC()
|
||||||
|
reputationDB := db.Reputation()
|
||||||
|
|
||||||
|
stats := reputation.Stats{
|
||||||
|
SatelliteID: satelliteID,
|
||||||
|
Audit: reputation.Metric{
|
||||||
|
TotalCount: 6,
|
||||||
|
SuccessCount: 7,
|
||||||
|
Alpha: 8,
|
||||||
|
Beta: 9,
|
||||||
|
Score: 10,
|
||||||
|
UnknownAlpha: 11,
|
||||||
|
UnknownBeta: 12,
|
||||||
|
UnknownScore: 13,
|
||||||
|
},
|
||||||
|
OnlineScore: 14,
|
||||||
|
UpdatedAt: timestamp,
|
||||||
|
JoinedAt: timestamp,
|
||||||
|
}
|
||||||
|
err = reputationDB.Store(ctx, stats)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test that the reputation was stored correctly
|
||||||
|
rstats, err := reputationDB.Get(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, rstats)
|
||||||
|
require.Equal(t, stats, *rstats)
|
||||||
|
|
||||||
|
// insert a new untrusted satellite in the database
|
||||||
|
err = db.Satellites().SetAddressAndStatus(ctx, satelliteID, address, satellites.Untrusted)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test that the satellite was inserted correctly
|
||||||
|
satellite, err := db.Satellites().GetSatellite(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, satellites.Untrusted, satellite.Status)
|
||||||
|
|
||||||
|
// set up the identity
|
||||||
|
ident := planet.StorageNodes[0].Identity
|
||||||
|
identConfig := identity.Config{
|
||||||
|
CertPath: ctx.File("identity", "identity.cert"),
|
||||||
|
KeyPath: ctx.File("identity", "identity.Key"),
|
||||||
|
}
|
||||||
|
err = identConfig.Save(ident)
|
||||||
|
require.NoError(t, err)
|
||||||
|
planet.StorageNodes[0].Config.Identity = identConfig
|
||||||
|
|
||||||
|
// run the forget satellite command with All flag
|
||||||
|
err = cmdForgetSatellite(ctx, log, &forgetSatelliteCfg{
|
||||||
|
AllUntrusted: true,
|
||||||
|
Config: planet.StorageNodes[0].Config,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
// TODO: this is for reproducing the bug,
|
||||||
|
// remove it once it's fixed.
|
||||||
|
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
// check that the blob was deleted
|
||||||
|
blobInfo, err := store.Stat(ctx, blobRef)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, errs.Is(err, os.ErrNotExist))
|
||||||
|
require.Nil(t, blobInfo)
|
||||||
|
// check that the reputation was deleted
|
||||||
|
rstats, err = reputationDB.Get(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &reputation.Stats{SatelliteID: satelliteID}, rstats)
|
||||||
|
// check that the satellite info was deleted from the database
|
||||||
|
satellite, err = db.Satellites().GetSatellite(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, satellite.SatelliteID.IsZero())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_cmdForgetSatellite_Exclusions(t *testing.T) {
|
||||||
|
t.Skip("The tests and the behavior is currently flaky. See https://github.com/storj/storj/issues/6465")
|
||||||
|
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 2, StorageNodeCount: 1, UplinkCount: 0,
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
address := planet.StorageNodes[0].Server.PrivateAddr().String()
|
||||||
|
db := planet.StorageNodes[0].DB
|
||||||
|
log := zaptest.NewLogger(t)
|
||||||
|
|
||||||
|
store, err := filestore.NewAt(log, db.Config().Pieces, filestore.DefaultConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(store.Close)
|
||||||
|
|
||||||
|
satelliteID := planet.Satellites[0].ID()
|
||||||
|
|
||||||
|
blobSize := memory.KB
|
||||||
|
blobRef := blobstore.BlobRef{
|
||||||
|
Namespace: satelliteID.Bytes(),
|
||||||
|
Key: testrand.PieceID().Bytes(),
|
||||||
|
}
|
||||||
|
w, err := store.Create(ctx, blobRef, -1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(testrand.Bytes(blobSize))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Commit(ctx))
|
||||||
|
|
||||||
|
// create a new satellite reputation
|
||||||
|
timestamp := time.Now().UTC()
|
||||||
|
reputationDB := db.Reputation()
|
||||||
|
|
||||||
|
stats := reputation.Stats{
|
||||||
|
SatelliteID: satelliteID,
|
||||||
|
Audit: reputation.Metric{
|
||||||
|
TotalCount: 6,
|
||||||
|
SuccessCount: 7,
|
||||||
|
Alpha: 8,
|
||||||
|
Beta: 9,
|
||||||
|
Score: 10,
|
||||||
|
UnknownAlpha: 11,
|
||||||
|
UnknownBeta: 12,
|
||||||
|
UnknownScore: 13,
|
||||||
|
},
|
||||||
|
OnlineScore: 14,
|
||||||
|
UpdatedAt: timestamp,
|
||||||
|
JoinedAt: timestamp,
|
||||||
|
}
|
||||||
|
err = reputationDB.Store(ctx, stats)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test that the reputation was stored correctly
|
||||||
|
rstats, err := reputationDB.Get(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, rstats)
|
||||||
|
require.Equal(t, stats, *rstats)
|
||||||
|
|
||||||
|
// set up the identity
|
||||||
|
ident := planet.StorageNodes[0].Identity
|
||||||
|
identConfig := identity.Config{
|
||||||
|
CertPath: ctx.File("identity", "identity.cert"),
|
||||||
|
KeyPath: ctx.File("identity", "identity.Key"),
|
||||||
|
}
|
||||||
|
err = identConfig.Save(ident)
|
||||||
|
require.NoError(t, err)
|
||||||
|
planet.StorageNodes[0].Config.Identity = identConfig
|
||||||
|
|
||||||
|
// add the satellite to the exclusion list
|
||||||
|
err = planet.StorageNodes[0].Config.Storage2.Trust.Exclusions.Set(satelliteID.String() + "@" + address)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// run the forget satellite command with All flag
|
||||||
|
err = cmdForgetSatellite(ctx, log, &forgetSatelliteCfg{
|
||||||
|
AllUntrusted: true,
|
||||||
|
Config: planet.StorageNodes[0].Config,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check that the blob was deleted
|
||||||
|
blobInfo, err := store.Stat(ctx, blobRef)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, errs.Is(err, os.ErrNotExist))
|
||||||
|
require.Nil(t, blobInfo)
|
||||||
|
// check that the reputation was deleted
|
||||||
|
rstats, err = reputationDB.Get(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &reputation.Stats{SatelliteID: satelliteID}, rstats)
|
||||||
|
// check that the satellite info was deleted from the database
|
||||||
|
satellite, err := db.Satellites().GetSatellite(ctx, satelliteID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, satellite.SatelliteID.IsZero())
|
||||||
|
})
|
||||||
|
}
|
@ -44,8 +44,6 @@ func cmdRun(cmd *cobra.Command, cfg *runCfg) (err error) {
|
|||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
cfg.Debug.Address = *process.DebugAddrFlag
|
|
||||||
|
|
||||||
mapDeprecatedConfigs(log, &cfg.StorageNodeFlags)
|
mapDeprecatedConfigs(log, &cfg.StorageNodeFlags)
|
||||||
|
|
||||||
identity, err := cfg.Identity.Load()
|
identity, err := cfg.Identity.Load()
|
||||||
|
@ -59,6 +59,7 @@ func newRootCmd(setDefaults bool) (*cobra.Command, *Factory) {
|
|||||||
newIssueAPIKeyCmd(factory),
|
newIssueAPIKeyCmd(factory),
|
||||||
newGracefulExitInitCmd(factory),
|
newGracefulExitInitCmd(factory),
|
||||||
newGracefulExitStatusCmd(factory),
|
newGracefulExitStatusCmd(factory),
|
||||||
|
newForgetSatelliteCmd(factory),
|
||||||
// internal hidden commands
|
// internal hidden commands
|
||||||
internalcmd.NewUsedSpaceFilewalkerCmd().Command,
|
internalcmd.NewUsedSpaceFilewalkerCmd().Command,
|
||||||
internalcmd.NewGCFilewalkerCmd().Command,
|
internalcmd.NewGCFilewalkerCmd().Command,
|
||||||
|
@ -65,11 +65,15 @@ func (ce *consoleEndpoints) Token() string {
|
|||||||
return ce.appendPath("/api/v0/auth/token")
|
return ce.appendPath("/api/v0/auth/token")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *consoleEndpoints) GraphQL() string {
|
func (ce *consoleEndpoints) Projects() string {
|
||||||
return ce.appendPath("/api/v0/graphql")
|
return ce.appendPath("/api/v0/projects")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *consoleEndpoints) graphqlDo(request *http.Request, jsonResponse interface{}) error {
|
func (ce *consoleEndpoints) APIKeys() string {
|
||||||
|
return ce.appendPath("/api/v0/api-keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce *consoleEndpoints) httpDo(request *http.Request, jsonResponse interface{}) error {
|
||||||
resp, err := ce.client.Do(request)
|
resp, err := ce.client.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -81,24 +85,24 @@ func (ce *consoleEndpoints) graphqlDo(request *http.Request, jsonResponse interf
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var response struct {
|
|
||||||
Data json.RawMessage
|
|
||||||
Errors []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = json.NewDecoder(bytes.NewReader(b)).Decode(&response); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Errors != nil {
|
|
||||||
return errs.New("inner graphql error: %v", response.Errors)
|
|
||||||
}
|
|
||||||
|
|
||||||
if jsonResponse == nil {
|
if jsonResponse == nil {
|
||||||
return errs.New("empty response: %q", b)
|
return errs.New("empty response: %q", b)
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.NewDecoder(bytes.NewReader(response.Data)).Decode(jsonResponse)
|
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||||
|
return json.NewDecoder(bytes.NewReader(b)).Decode(jsonResponse)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errResponse struct {
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewDecoder(bytes.NewReader(b)).Decode(&errResponse)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs.New("request failed with status %d: %s", resp.StatusCode, errResponse.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *consoleEndpoints) createOrGetAPIKey(ctx context.Context) (string, error) {
|
func (ce *consoleEndpoints) createOrGetAPIKey(ctx context.Context) (string, error) {
|
||||||
@ -464,49 +468,41 @@ func (ce *consoleEndpoints) getProject(ctx context.Context, token string) (strin
|
|||||||
request, err := http.NewRequestWithContext(
|
request, err := http.NewRequestWithContext(
|
||||||
ctx,
|
ctx,
|
||||||
http.MethodGet,
|
http.MethodGet,
|
||||||
ce.GraphQL(),
|
ce.Projects(),
|
||||||
nil)
|
nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
q := request.URL.Query()
|
|
||||||
q.Add("query", `query {myProjects{id}}`)
|
|
||||||
request.URL.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
request.AddCookie(&http.Cookie{
|
request.AddCookie(&http.Cookie{
|
||||||
Name: ce.cookieName,
|
Name: ce.cookieName,
|
||||||
Value: token,
|
Value: token,
|
||||||
})
|
})
|
||||||
|
|
||||||
request.Header.Add("Content-Type", "application/graphql")
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
|
||||||
var getProjects struct {
|
var projects []struct {
|
||||||
MyProjects []struct {
|
ID string `json:"id"`
|
||||||
ID string
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := ce.graphqlDo(request, &getProjects); err != nil {
|
if err := ce.httpDo(request, &projects); err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
if len(getProjects.MyProjects) == 0 {
|
if len(projects) == 0 {
|
||||||
return "", errs.New("no projects")
|
return "", errs.New("no projects")
|
||||||
}
|
}
|
||||||
|
|
||||||
return getProjects.MyProjects[0].ID, nil
|
return projects[0].ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *consoleEndpoints) createProject(ctx context.Context, token string) (string, error) {
|
func (ce *consoleEndpoints) createProject(ctx context.Context, token string) (string, error) {
|
||||||
rng := rand.NewSource(time.Now().UnixNano())
|
rng := rand.NewSource(time.Now().UnixNano())
|
||||||
createProjectQuery := fmt.Sprintf(
|
body := fmt.Sprintf(`{"name":"TestProject-%d","description":""}`, rng.Int63())
|
||||||
`mutation {createProject(input:{name:"TestProject-%d",description:""}){id}}`,
|
|
||||||
rng.Int63())
|
|
||||||
|
|
||||||
request, err := http.NewRequestWithContext(
|
request, err := http.NewRequestWithContext(
|
||||||
ctx,
|
ctx,
|
||||||
http.MethodPost,
|
http.MethodPost,
|
||||||
ce.GraphQL(),
|
ce.Projects(),
|
||||||
bytes.NewReader([]byte(createProjectQuery)))
|
bytes.NewReader([]byte(body)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -516,31 +512,27 @@ func (ce *consoleEndpoints) createProject(ctx context.Context, token string) (st
|
|||||||
Value: token,
|
Value: token,
|
||||||
})
|
})
|
||||||
|
|
||||||
request.Header.Add("Content-Type", "application/graphql")
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
|
||||||
var createProject struct {
|
var createdProject struct {
|
||||||
CreateProject struct {
|
ID string `json:"id"`
|
||||||
ID string
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := ce.graphqlDo(request, &createProject); err != nil {
|
if err := ce.httpDo(request, &createdProject); err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return createProject.CreateProject.ID, nil
|
return createdProject.ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *consoleEndpoints) createAPIKey(ctx context.Context, token, projectID string) (string, error) {
|
func (ce *consoleEndpoints) createAPIKey(ctx context.Context, token, projectID string) (string, error) {
|
||||||
rng := rand.NewSource(time.Now().UnixNano())
|
rng := rand.NewSource(time.Now().UnixNano())
|
||||||
createAPIKeyQuery := fmt.Sprintf(
|
apiKeyName := fmt.Sprintf("TestKey-%d", rng.Int63())
|
||||||
`mutation {createAPIKey(projectID:%q,name:"TestKey-%d"){key}}`,
|
|
||||||
projectID, rng.Int63())
|
|
||||||
|
|
||||||
request, err := http.NewRequestWithContext(
|
request, err := http.NewRequestWithContext(
|
||||||
ctx,
|
ctx,
|
||||||
http.MethodPost,
|
http.MethodPost,
|
||||||
ce.GraphQL(),
|
ce.APIKeys()+"/create/"+projectID,
|
||||||
bytes.NewReader([]byte(createAPIKeyQuery)))
|
bytes.NewReader([]byte(apiKeyName)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -550,18 +542,16 @@ func (ce *consoleEndpoints) createAPIKey(ctx context.Context, token, projectID s
|
|||||||
Value: token,
|
Value: token,
|
||||||
})
|
})
|
||||||
|
|
||||||
request.Header.Add("Content-Type", "application/graphql")
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
|
||||||
var createAPIKey struct {
|
var createdKey struct {
|
||||||
CreateAPIKey struct {
|
Key string `json:"key"`
|
||||||
Key string
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := ce.graphqlDo(request, &createAPIKey); err != nil {
|
if err := ce.httpDo(request, &createdKey); err != nil {
|
||||||
return "", errs.Wrap(err)
|
return "", errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return createAPIKey.CreateAPIKey.Key, nil
|
return createdKey.Key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateActivationKey(userID uuid.UUID, email string, createdAt time.Time) (string, error) {
|
func generateActivationKey(userID uuid.UUID, email string, createdAt time.Time) (string, error) {
|
||||||
|
@ -39,6 +39,8 @@ const (
|
|||||||
maxStoragenodeCount = 200
|
maxStoragenodeCount = 200
|
||||||
|
|
||||||
folderPermissions = 0744
|
folderPermissions = 0744
|
||||||
|
|
||||||
|
gatewayGracePeriod = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultAccess = "12edqtGZnqQo6QHwTB92EDqg9B1WrWn34r7ALu94wkqXL4eXjBNnVr6F5W7GhJjVqJCqxpFERmDR1dhZWyMt3Qq5zwrE9yygXeT6kBoS9AfiPuwB6kNjjxepg5UtPPtp4VLp9mP5eeyobKQRD5TsEsxTGhxamsrHvGGBPrZi8DeLtNYFMRTV6RyJVxpYX6MrPCw9HVoDQbFs7VcPeeRxRMQttSXL3y33BJhkqJ6ByFviEquaX5R2wjQT2Kx"
|
var defaultAccess = "12edqtGZnqQo6QHwTB92EDqg9B1WrWn34r7ALu94wkqXL4eXjBNnVr6F5W7GhJjVqJCqxpFERmDR1dhZWyMt3Qq5zwrE9yygXeT6kBoS9AfiPuwB6kNjjxepg5UtPPtp4VLp9mP5eeyobKQRD5TsEsxTGhxamsrHvGGBPrZi8DeLtNYFMRTV6RyJVxpYX6MrPCw9HVoDQbFs7VcPeeRxRMQttSXL3y33BJhkqJ6ByFviEquaX5R2wjQT2Kx"
|
||||||
@ -536,11 +538,11 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
|||||||
return fmt.Errorf("failed to read config string: %w", err)
|
return fmt.Errorf("failed to read config string: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// try with 100ms delays until we hit 3s
|
// try with 100ms delays until we exceed the grace period
|
||||||
apiKey, start := "", time.Now()
|
apiKey, start := "", time.Now()
|
||||||
for apiKey == "" {
|
for apiKey == "" {
|
||||||
apiKey, err = newConsoleEndpoints(consoleAddress).createOrGetAPIKey(context.Background())
|
apiKey, err = newConsoleEndpoints(consoleAddress).createOrGetAPIKey(context.Background())
|
||||||
if err != nil && time.Since(start) > 3*time.Second {
|
if err != nil && time.Since(start) > gatewayGracePeriod {
|
||||||
return fmt.Errorf("failed to create account: %w", err)
|
return fmt.Errorf("failed to create account: %w", err)
|
||||||
}
|
}
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
@ -255,7 +255,8 @@ func (process *Process) Exec(ctx context.Context, command string) (err error) {
|
|||||||
|
|
||||||
if _, ok := process.Arguments[command]; !ok {
|
if _, ok := process.Arguments[command]; !ok {
|
||||||
fmt.Fprintf(process.processes.Output, "%s running: %s\n", process.Name, command)
|
fmt.Fprintf(process.processes.Output, "%s running: %s\n", process.Name, command)
|
||||||
return
|
//TODO: This doesn't look right, but keeping the same behaviour as before.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, executable, process.Arguments[command]...)
|
cmd := exec.CommandContext(ctx, executable, process.Arguments[command]...)
|
||||||
|
@ -4,10 +4,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"storj.io/common/identity"
|
||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,6 +50,17 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if chain, err := os.ReadFile(os.Args[1]); err == nil {
|
||||||
|
if id, err := identity.PeerIdentityFromPEM(chain); err == nil {
|
||||||
|
output(id.ID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if id, err := identity.DecodePeerIdentity(context.Background(), chain); err == nil {
|
||||||
|
output(id.ID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "unknown argument: %q", os.Args[1])
|
fmt.Fprintf(os.Stderr, "unknown argument: %q", os.Args[1])
|
||||||
usage()
|
usage()
|
||||||
}
|
}
|
||||||
|
148
cmd/tools/placement-test/main.go
Normal file
148
cmd/tools/placement-test/main.go
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/common/storj"
|
||||||
|
"storj.io/common/storj/location"
|
||||||
|
"storj.io/private/process"
|
||||||
|
"storj.io/storj/satellite/nodeselection"
|
||||||
|
"storj.io/storj/satellite/overlay"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
rootCmd = &cobra.Command{
|
||||||
|
Use: "placement-test <countrycode:...,lastipport:...,lastnet:...,tag:signer/key/value,tag:signer/key/value...>",
|
||||||
|
Short: "Test placement settings",
|
||||||
|
Long: `"This command helps testing placement configuration.
|
||||||
|
|
||||||
|
You can define a custom node with attributes, and all available placement configuration will be tested against the node.
|
||||||
|
|
||||||
|
Supported node attributes:
|
||||||
|
* countrycode
|
||||||
|
* lastipport
|
||||||
|
* lastnet
|
||||||
|
* tag (value should be in the form of signer/key/value)
|
||||||
|
|
||||||
|
EXAMPLES:
|
||||||
|
|
||||||
|
placement-test --placement '10:country("GB");12:country("DE")' countrycode=11
|
||||||
|
|
||||||
|
placement-test --placement /tmp/proposal.txt countrycode=US,tag=12Q8q2PofHPwycSwAVCpjNxxzWiDJhi8UV4ceZBo4hmNARpYcR7/soc2/true
|
||||||
|
|
||||||
|
Where /tmp/proposal.txt contains definitions, for example:
|
||||||
|
10:tag("12Q8q2PofHPwycSwAVCpjNxxzWiDJhi8UV4ceZBo4hmNARpYcR7","selected",notEmpty());
|
||||||
|
1:country("EU") && exclude(placement(10)) && annotation("location","eu-1");
|
||||||
|
2:country("EEA") && exclude(placement(10)) && annotation("location","eea-1");
|
||||||
|
3:country("US") && exclude(placement(10)) && annotation("location","us-1");
|
||||||
|
4:country("DE") && exclude(placement(10)) && annotation("location","de-1");
|
||||||
|
6:country("*","!BY", "!RU", "!NONE") && exclude(placement(10)) && annotation("location","custom-1")
|
||||||
|
`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
return testPlacement(ctx, args[0])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
config Config
|
||||||
|
)
|
||||||
|
|
||||||
|
func testPlacement(ctx context.Context, fakeNode string) error {
|
||||||
|
node := &nodeselection.SelectedNode{}
|
||||||
|
for _, part := range strings.Split(fakeNode, ",") {
|
||||||
|
kv := strings.SplitN(part, "=", 2)
|
||||||
|
switch strings.ToLower(kv[0]) {
|
||||||
|
case "countrycode":
|
||||||
|
node.CountryCode = location.ToCountryCode(kv[1])
|
||||||
|
case "lastipport":
|
||||||
|
node.LastIPPort = kv[1]
|
||||||
|
case "lastnet":
|
||||||
|
node.LastNet = kv[1]
|
||||||
|
case "tag":
|
||||||
|
tkv := strings.SplitN(kv[1], "/", 3)
|
||||||
|
signer, err := storj.NodeIDFromString(tkv[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
node.Tags = append(node.Tags, nodeselection.NodeTag{
|
||||||
|
Name: tkv[1],
|
||||||
|
Value: []byte(tkv[2]),
|
||||||
|
Signer: signer,
|
||||||
|
SignedAt: time.Now(),
|
||||||
|
NodeID: node.ID,
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
panic("Unsupported field of SelectedNode: " + kv[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
placement, err := config.Placement.Parse()
|
||||||
|
if err != nil {
|
||||||
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Node:")
|
||||||
|
jsonNode, err := json.MarshalIndent(node, " ", " ")
|
||||||
|
if err != nil {
|
||||||
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(string(jsonNode))
|
||||||
|
|
||||||
|
for _, placementNum := range placement.SupportedPlacements() {
|
||||||
|
fmt.Printf("\n--------- Evaluating placement rule %d ---------\n", placementNum)
|
||||||
|
filter := placement.CreateFilters(placementNum)
|
||||||
|
|
||||||
|
fmt.Printf("Placement: %s\n", filter)
|
||||||
|
result := filter.Match(node)
|
||||||
|
fmt.Println("MATCH: ", result)
|
||||||
|
fmt.Println("Annotations: ")
|
||||||
|
if annotated, ok := filter.(nodeselection.NodeFilterWithAnnotation); ok {
|
||||||
|
fmt.Println(" location:", annotated.GetAnnotation("location"))
|
||||||
|
fmt.Println(" "+nodeselection.AutoExcludeSubnet+":", annotated.GetAnnotation(nodeselection.AutoExcludeSubnet))
|
||||||
|
} else {
|
||||||
|
fmt.Println(" no annotation presents")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config contains configuration of placement.
|
||||||
|
type Config struct {
|
||||||
|
Placement overlay.ConfigurablePlacementRule `help:"detailed placement rules in the form 'id:definition;id:definition;...' where id is a 16 bytes integer (use >10 for backward compatibility), definition is a combination of the following functions:country(2 letter country codes,...), tag(nodeId, key, bytes(value)) all(...,...)."`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
process.Bind(rootCmd, &config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
process.ExecWithCustomOptions(rootCmd, process.ExecOptions{
|
||||||
|
LoadConfig: func(cmd *cobra.Command, vip *viper.Viper) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
InitTracing: false,
|
||||||
|
LoggerFactory: func(logger *zap.Logger) *zap.Logger {
|
||||||
|
newLogger, level, err := process.NewLogger("placement-test")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
level.SetLevel(zap.WarnLevel)
|
||||||
|
return newLogger
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
@ -142,10 +142,13 @@ type ReadCSVConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func verifySegments(cmd *cobra.Command, args []string) error {
|
func verifySegments(cmd *cobra.Command, args []string) error {
|
||||||
|
|
||||||
ctx, _ := process.Ctx(cmd)
|
ctx, _ := process.Ctx(cmd)
|
||||||
log := zap.L()
|
log := zap.L()
|
||||||
|
|
||||||
|
return verifySegmentsInContext(ctx, log, cmd, satelliteCfg, rangeCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifySegmentsInContext(ctx context.Context, log *zap.Logger, cmd *cobra.Command, satelliteCfg Satellite, rangeCfg RangeConfig) error {
|
||||||
// open default satellite database
|
// open default satellite database
|
||||||
db, err := satellitedb.Open(ctx, log.Named("db"), satelliteCfg.Database, satellitedb.Options{
|
db, err := satellitedb.Open(ctx, log.Named("db"), satelliteCfg.Database, satellitedb.Options{
|
||||||
ApplicationName: "segment-verify",
|
ApplicationName: "segment-verify",
|
||||||
@ -203,12 +206,12 @@ func verifySegments(cmd *cobra.Command, args []string) error {
|
|||||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||||
|
|
||||||
// setup dependencies for verification
|
// setup dependencies for verification
|
||||||
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), "", "", satelliteCfg.Overlay)
|
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), overlay.NewPlacementDefinitions().CreateFilters, "", "", satelliteCfg.Overlay)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlay, orders.NewNoopDB(), satelliteCfg.Orders)
|
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlayService, orders.NewNoopDB(), overlay.NewPlacementDefinitions().CreateFilters, satelliteCfg.Orders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
@ -243,11 +246,10 @@ func verifySegments(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// setup verifier
|
// setup verifier
|
||||||
verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig)
|
verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig)
|
||||||
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlay, serviceConfig)
|
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlayService, serviceConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
verifier.reportPiece = service.problemPieces.Write
|
|
||||||
defer func() { err = errs.Combine(err, service.Close()) }()
|
defer func() { err = errs.Combine(err, service.Close()) }()
|
||||||
|
|
||||||
log.Debug("starting", zap.Any("config", service.config), zap.String("command", cmd.Name()))
|
log.Debug("starting", zap.Any("config", service.config), zap.String("command", cmd.Name()))
|
||||||
|
282
cmd/tools/segment-verify/main_test.go
Normal file
282
cmd/tools/segment-verify/main_test.go
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/jackc/pgx/v5/stdlib"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
|
||||||
|
"storj.io/common/memory"
|
||||||
|
"storj.io/common/storj"
|
||||||
|
"storj.io/common/testcontext"
|
||||||
|
"storj.io/common/testrand"
|
||||||
|
"storj.io/common/uuid"
|
||||||
|
"storj.io/private/dbutil/cockroachutil"
|
||||||
|
"storj.io/private/tagsql"
|
||||||
|
"storj.io/storj/private/testplanet"
|
||||||
|
"storj.io/storj/satellite/metabase"
|
||||||
|
"storj.io/storj/storagenode/pieces"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommandLineTool(t *testing.T) {
|
||||||
|
const (
|
||||||
|
nodeCount = 10
|
||||||
|
uplinkCount = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
testplanet.Run(t, testplanet.Config{
|
||||||
|
SatelliteCount: 1, StorageNodeCount: nodeCount, UplinkCount: uplinkCount,
|
||||||
|
Reconfigure: testplanet.Reconfigure{
|
||||||
|
Satellite: testplanet.ReconfigureRS(nodeCount, nodeCount, nodeCount, nodeCount),
|
||||||
|
},
|
||||||
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
|
satellite := planet.Satellites[0]
|
||||||
|
|
||||||
|
// get the db connstrings that we can set in the global config (these are hilariously hard to get,
|
||||||
|
// but we really don't need to get them anywhere else in the codebase)
|
||||||
|
dbConnString := getConnStringFromDBConn(t, ctx, satellite.DB.Testing().RawDB())
|
||||||
|
metaDBConnString := getConnStringFromDBConn(t, ctx, satellite.Metabase.DB.UnderlyingTagSQL())
|
||||||
|
|
||||||
|
notFoundCSV := ctx.File("notfound.csv")
|
||||||
|
retryCSV := ctx.File("retry.csv")
|
||||||
|
problemPiecesCSV := ctx.File("problempieces.csv")
|
||||||
|
|
||||||
|
// set up global config that the main func will use
|
||||||
|
satelliteCfg := satelliteCfg
|
||||||
|
satelliteCfg.Config = satellite.Config
|
||||||
|
satelliteCfg.Database = dbConnString
|
||||||
|
satelliteCfg.Metainfo.DatabaseURL = metaDBConnString
|
||||||
|
satelliteCfg.Identity.KeyPath = ctx.File("identity-key")
|
||||||
|
satelliteCfg.Identity.CertPath = ctx.File("identity-cert")
|
||||||
|
require.NoError(t, satelliteCfg.Identity.Save(satellite.Identity))
|
||||||
|
rangeCfg := rangeCfg
|
||||||
|
rangeCfg.Verify = VerifierConfig{
|
||||||
|
PerPieceTimeout: time.Second,
|
||||||
|
OrderRetryThrottle: 500 * time.Millisecond,
|
||||||
|
RequestThrottle: 500 * time.Millisecond,
|
||||||
|
}
|
||||||
|
rangeCfg.Service = ServiceConfig{
|
||||||
|
NotFoundPath: notFoundCSV,
|
||||||
|
RetryPath: retryCSV,
|
||||||
|
ProblemPiecesPath: problemPiecesCSV,
|
||||||
|
Check: 0,
|
||||||
|
BatchSize: 10000,
|
||||||
|
Concurrency: 1000,
|
||||||
|
MaxOffline: 2,
|
||||||
|
OfflineStatusCacheTime: 10 * time.Second,
|
||||||
|
AsOfSystemInterval: -1 * time.Microsecond,
|
||||||
|
}
|
||||||
|
rangeCfg.Low = strings.Repeat("0", 32)
|
||||||
|
rangeCfg.High = strings.Repeat("f", 32)
|
||||||
|
|
||||||
|
// upload some data
|
||||||
|
data := testrand.Bytes(8 * memory.KiB)
|
||||||
|
for u, up := range planet.Uplinks {
|
||||||
|
for i := 0; i < nodeCount; i++ {
|
||||||
|
err := up.Upload(ctx, satellite, "bucket1", fmt.Sprintf("uplink%d/i%d", u, i), data)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// take one node offline so there will be some pieces in the retry list
|
||||||
|
offlineNode := planet.StorageNodes[0]
|
||||||
|
require.NoError(t, planet.StopPeer(offlineNode))
|
||||||
|
|
||||||
|
// and delete 10% of pieces at random so there will be some pieces in the not-found list
|
||||||
|
const deleteFrac = 0.10
|
||||||
|
allDeletedPieces := make(map[storj.NodeID]map[storj.PieceID]struct{})
|
||||||
|
numDeletedPieces := 0
|
||||||
|
for nodeNum, node := range planet.StorageNodes {
|
||||||
|
if node.ID() == offlineNode.ID() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deletedPieces, err := deletePiecesRandomly(ctx, satellite.ID(), node, deleteFrac)
|
||||||
|
require.NoError(t, err, nodeNum)
|
||||||
|
allDeletedPieces[node.ID()] = deletedPieces
|
||||||
|
numDeletedPieces += len(deletedPieces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the number of segments we expect are present in the metainfo db
|
||||||
|
result, err := satellite.Metabase.DB.ListVerifySegments(ctx, metabase.ListVerifySegments{
|
||||||
|
CursorStreamID: uuid.UUID{},
|
||||||
|
CursorPosition: metabase.SegmentPosition{},
|
||||||
|
Limit: 10000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, result.Segments, uplinkCount*nodeCount)
|
||||||
|
|
||||||
|
// perform the verify!
|
||||||
|
log := zaptest.NewLogger(t)
|
||||||
|
err = verifySegmentsInContext(ctx, log, &cobra.Command{Use: "range"}, satelliteCfg, rangeCfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// open the CSVs to check that we get the expected results
|
||||||
|
retryCSVHandle, err := os.Open(retryCSV)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(retryCSVHandle.Close)
|
||||||
|
retryCSVReader := csv.NewReader(retryCSVHandle)
|
||||||
|
|
||||||
|
notFoundCSVHandle, err := os.Open(notFoundCSV)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(notFoundCSVHandle.Close)
|
||||||
|
notFoundCSVReader := csv.NewReader(notFoundCSVHandle)
|
||||||
|
|
||||||
|
problemPiecesCSVHandle, err := os.Open(problemPiecesCSV)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(problemPiecesCSVHandle.Close)
|
||||||
|
problemPiecesCSVReader := csv.NewReader(problemPiecesCSVHandle)
|
||||||
|
|
||||||
|
// in the retry CSV, we don't expect any rows, because there would need to be more than 5
|
||||||
|
// nodes offline to produce records here.
|
||||||
|
// TODO: make that 5 configurable so we can override it here and check results
|
||||||
|
header, err := retryCSVReader.Read()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{"stream id", "position", "found", "not found", "retry"}, header)
|
||||||
|
for {
|
||||||
|
record, err := retryCSVReader.Read()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Fail(t, "unexpected record in retry.csv", "%v", record)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we do expect plenty of rows in not-found.csv. we don't know exactly what pieces these
|
||||||
|
// pertain to, but we can add up all the reported not-found pieces and expect the total
|
||||||
|
// to match numDeletedPieces. In addition, for each segment, found+notfound+retry should
|
||||||
|
// equal nodeCount.
|
||||||
|
header, err = notFoundCSVReader.Read()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{"stream id", "position", "found", "not found", "retry"}, header)
|
||||||
|
identifiedNotFoundPieces := 0
|
||||||
|
for {
|
||||||
|
record, err := notFoundCSVReader.Read()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
found, err := strconv.Atoi(record[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
notFound, err := strconv.Atoi(record[3])
|
||||||
|
require.NoError(t, err)
|
||||||
|
retry, err := strconv.Atoi(record[4])
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lineNum, _ := notFoundCSVReader.FieldPos(0)
|
||||||
|
assert.Equal(t, nodeCount, found+notFound+retry,
|
||||||
|
"line %d of not-found.csv contains record: %v where found+notFound+retry != %d", lineNum, record, nodeCount)
|
||||||
|
identifiedNotFoundPieces += notFound
|
||||||
|
}
|
||||||
|
assert.Equal(t, numDeletedPieces, identifiedNotFoundPieces)
|
||||||
|
|
||||||
|
// finally, in problem-pieces.csv, we can check results with more precision. we expect
|
||||||
|
// that all deleted pieces were identified, and that no pieces were identified as not found
|
||||||
|
// unless we deleted them specifically.
|
||||||
|
header, err = problemPiecesCSVReader.Read()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{"stream id", "position", "node id", "piece number", "outcome"}, header)
|
||||||
|
for {
|
||||||
|
record, err := problemPiecesCSVReader.Read()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
streamID, err := uuid.FromString(record[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
position, err := strconv.ParseUint(record[1], 10, 64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
nodeID, err := storj.NodeIDFromString(record[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
pieceNum, err := strconv.ParseInt(record[3], 10, 16)
|
||||||
|
require.NoError(t, err)
|
||||||
|
outcome := record[4]
|
||||||
|
|
||||||
|
switch outcome {
|
||||||
|
case "NODE_OFFLINE":
|
||||||
|
// expect that this was the node we took offline
|
||||||
|
assert.Equal(t, offlineNode.ID(), nodeID,
|
||||||
|
"record %v said node %s was offline, but we didn't take it offline", record, nodeID)
|
||||||
|
case "NOT_FOUND":
|
||||||
|
segmentPosition := metabase.SegmentPositionFromEncoded(position)
|
||||||
|
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||||
|
StreamID: streamID,
|
||||||
|
Position: segmentPosition,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
pieceID := segment.RootPieceID.Derive(nodeID, int32(pieceNum))
|
||||||
|
|
||||||
|
deletedPiecesForNode, ok := allDeletedPieces[nodeID]
|
||||||
|
require.True(t, ok)
|
||||||
|
_, ok = deletedPiecesForNode[pieceID]
|
||||||
|
assert.True(t, ok, "we did not delete piece ID %s, but it was identified as not found", pieceID)
|
||||||
|
delete(deletedPiecesForNode, pieceID)
|
||||||
|
default:
|
||||||
|
assert.Fail(t, "unexpected outcome from problem-pieces.csv", "got %q, but expected \"NODE_OFFLINE\" or \"NOT_FOUND\"", outcome)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for node, deletedPieces := range allDeletedPieces {
|
||||||
|
assert.Empty(t, deletedPieces, "pieces were deleted from %v but were not reported in problem-pieces.csv", node)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePiecesRandomly(ctx context.Context, satelliteID storj.NodeID, node *testplanet.StorageNode, rate float64) (deletedPieces map[storj.PieceID]struct{}, err error) {
|
||||||
|
deletedPieces = make(map[storj.PieceID]struct{})
|
||||||
|
err = node.Storage2.FileWalker.WalkSatellitePieces(ctx, satelliteID, func(access pieces.StoredPieceAccess) error {
|
||||||
|
if rand.Float64() < rate {
|
||||||
|
path, err := access.FullPath(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.Remove(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
deletedPieces[access.PieceID()] = struct{}{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return deletedPieces, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConnStringFromDBConn(t *testing.T, ctx *testcontext.Context, tagsqlDB tagsql.DB) (dbConnString string) {
|
||||||
|
type dbConnGetter interface {
|
||||||
|
StdlibConn() *stdlib.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
dbConn, err := tagsqlDB.Conn(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(dbConn.Close)
|
||||||
|
err = dbConn.Raw(ctx, func(driverConn interface{}) error {
|
||||||
|
var stdlibConn *stdlib.Conn
|
||||||
|
switch conn := driverConn.(type) {
|
||||||
|
case dbConnGetter:
|
||||||
|
stdlibConn = conn.StdlibConn()
|
||||||
|
case *stdlib.Conn:
|
||||||
|
stdlibConn = conn
|
||||||
|
}
|
||||||
|
dbConnString = stdlibConn.Conn().Config().ConnString()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
if _, ok := tagsqlDB.Driver().(*cockroachutil.Driver); ok {
|
||||||
|
dbConnString = strings.ReplaceAll(dbConnString, "postgres://", "cockroach://")
|
||||||
|
}
|
||||||
|
return dbConnString
|
||||||
|
}
|
@ -15,6 +15,7 @@ import (
|
|||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/private/process"
|
"storj.io/private/process"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
"storj.io/storj/satellite/satellitedb"
|
"storj.io/storj/satellite/satellitedb"
|
||||||
)
|
)
|
||||||
@ -78,7 +79,7 @@ type NodeCheckConfig struct {
|
|||||||
|
|
||||||
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
||||||
type NodeCheckOverlayDB interface {
|
type NodeCheckOverlayDB interface {
|
||||||
IterateAllContactedNodes(context.Context, func(context.Context, *overlay.SelectedNode) error) error
|
IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
|
||||||
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -82,24 +83,29 @@ func (service *Service) VerifyBatches(ctx context.Context, batches []*Batch) err
|
|||||||
limiter := sync2.NewLimiter(service.config.Concurrency)
|
limiter := sync2.NewLimiter(service.config.Concurrency)
|
||||||
for _, batch := range batches {
|
for _, batch := range batches {
|
||||||
batch := batch
|
batch := batch
|
||||||
|
log := service.log.With(zap.Int("num pieces", batch.Len()))
|
||||||
|
|
||||||
info, err := service.GetNodeInfo(ctx, batch.Alias)
|
info, err := service.GetNodeInfo(ctx, batch.Alias)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ErrNoSuchNode.Has(err) {
|
if ErrNoSuchNode.Has(err) {
|
||||||
service.log.Error("will not verify batch; consider pieces lost",
|
log.Info("node has left the cluster; considering pieces lost",
|
||||||
zap.Int("alias", int(batch.Alias)),
|
zap.Int("alias", int(batch.Alias)))
|
||||||
zap.Error(err))
|
for _, seg := range batch.Items {
|
||||||
|
seg.Status.MarkNotFound()
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return Error.Wrap(err)
|
return Error.Wrap(err)
|
||||||
}
|
}
|
||||||
|
log = log.With(zap.Stringer("node ID", info.NodeURL.ID))
|
||||||
|
|
||||||
ignoreThrottle := service.priorityNodes.Contains(batch.Alias)
|
ignoreThrottle := service.priorityNodes.Contains(batch.Alias)
|
||||||
|
|
||||||
limiter.Go(ctx, func() {
|
limiter.Go(ctx, func() {
|
||||||
verifiedCount, err := service.verifier.Verify(ctx, batch.Alias, info.NodeURL, info.Version, batch.Items, ignoreThrottle)
|
verifiedCount, err := service.verifier.Verify(ctx, batch.Alias, info.NodeURL, info.Version, batch.Items, ignoreThrottle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ErrNodeOffline.Has(err) {
|
switch {
|
||||||
|
case ErrNodeOffline.Has(err):
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
if verifiedCount == 0 {
|
if verifiedCount == 0 {
|
||||||
service.offlineNodes.Add(batch.Alias)
|
service.offlineNodes.Add(batch.Alias)
|
||||||
@ -110,8 +116,14 @@ func (service *Service) VerifyBatches(ctx context.Context, batches []*Batch) err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
log.Info("node is offline; marking pieces as retryable")
|
||||||
|
return
|
||||||
|
case errors.Is(err, context.DeadlineExceeded):
|
||||||
|
log.Info("request to node timed out; marking pieces as retryable")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
log.Error("verifying a batch failed", zap.Error(err))
|
||||||
}
|
}
|
||||||
service.log.Error("verifying a batch failed", zap.Error(err))
|
|
||||||
} else {
|
} else {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
if service.offlineCount[batch.Alias] > 0 {
|
if service.offlineCount[batch.Alias] > 0 {
|
||||||
@ -128,8 +140,12 @@ func (service *Service) VerifyBatches(ctx context.Context, batches []*Batch) err
|
|||||||
|
|
||||||
// convertAliasToNodeURL converts a node alias to node url, using a cache if needed.
|
// convertAliasToNodeURL converts a node alias to node url, using a cache if needed.
|
||||||
func (service *Service) convertAliasToNodeURL(ctx context.Context, alias metabase.NodeAlias) (_ storj.NodeURL, err error) {
|
func (service *Service) convertAliasToNodeURL(ctx context.Context, alias metabase.NodeAlias) (_ storj.NodeURL, err error) {
|
||||||
|
service.mu.RLock()
|
||||||
nodeURL, ok := service.aliasToNodeURL[alias]
|
nodeURL, ok := service.aliasToNodeURL[alias]
|
||||||
|
service.mu.RUnlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
|
service.mu.Lock()
|
||||||
|
defer service.mu.Unlock()
|
||||||
nodeID, ok := service.aliasMap.Node(alias)
|
nodeID, ok := service.aliasMap.Node(alias)
|
||||||
if !ok {
|
if !ok {
|
||||||
latest, err := service.metabase.LatestNodesAliasMap(ctx)
|
latest, err := service.metabase.LatestNodesAliasMap(ctx)
|
||||||
|
@ -10,10 +10,12 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spacemonkeygo/monkit/v3"
|
"github.com/spacemonkeygo/monkit/v3"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -21,6 +23,7 @@ import (
|
|||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/storj/satellite/audit"
|
"storj.io/storj/satellite/audit"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -46,7 +49,7 @@ type Verifier interface {
|
|||||||
type Overlay interface {
|
type Overlay interface {
|
||||||
// Get looks up the node by nodeID
|
// Get looks up the node by nodeID
|
||||||
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
||||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error)
|
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SegmentWriter allows writing segments to some output.
|
// SegmentWriter allows writing segments to some output.
|
||||||
@ -70,6 +73,9 @@ type ServiceConfig struct {
|
|||||||
|
|
||||||
OfflineStatusCacheTime time.Duration `help:"how long to cache a \"node offline\" status" default:"30m"`
|
OfflineStatusCacheTime time.Duration `help:"how long to cache a \"node offline\" status" default:"30m"`
|
||||||
|
|
||||||
|
CreatedBefore DateFlag `help:"verify only segments created before specific date (date format 'YYYY-MM-DD')" default:""`
|
||||||
|
CreatedAfter DateFlag `help:"verify only segments created after specific date (date format 'YYYY-MM-DD')" default:"1970-01-01"`
|
||||||
|
|
||||||
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,8 +99,9 @@ type Service struct {
|
|||||||
verifier Verifier
|
verifier Verifier
|
||||||
overlay Overlay
|
overlay Overlay
|
||||||
|
|
||||||
aliasMap *metabase.NodeAliasMap
|
mu sync.RWMutex
|
||||||
aliasToNodeURL map[metabase.NodeAlias]storj.NodeURL
|
aliasToNodeURL map[metabase.NodeAlias]storj.NodeURL
|
||||||
|
aliasMap *metabase.NodeAliasMap
|
||||||
priorityNodes NodeAliasSet
|
priorityNodes NodeAliasSet
|
||||||
ignoreNodes NodeAliasSet
|
ignoreNodes NodeAliasSet
|
||||||
offlineNodes *nodeAliasExpiringSet
|
offlineNodes *nodeAliasExpiringSet
|
||||||
@ -120,6 +127,10 @@ func NewService(log *zap.Logger, metabaseDB Metabase, verifier Verifier, overlay
|
|||||||
return nil, errs.Combine(Error.Wrap(err), retry.Close(), notFound.Close())
|
return nil, errs.Combine(Error.Wrap(err), retry.Close(), notFound.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if nodeVerifier, ok := verifier.(*NodeVerifier); ok {
|
||||||
|
nodeVerifier.reportPiece = problemPieces.Write
|
||||||
|
}
|
||||||
|
|
||||||
return &Service{
|
return &Service{
|
||||||
log: log,
|
log: log,
|
||||||
config: config,
|
config: config,
|
||||||
@ -293,6 +304,9 @@ func (service *Service) ProcessRange(ctx context.Context, low, high uuid.UUID) (
|
|||||||
CursorPosition: cursorPosition,
|
CursorPosition: cursorPosition,
|
||||||
Limit: service.config.BatchSize,
|
Limit: service.config.BatchSize,
|
||||||
|
|
||||||
|
CreatedAfter: service.config.CreatedAfter.time(),
|
||||||
|
CreatedBefore: service.config.CreatedBefore.time(),
|
||||||
|
|
||||||
AsOfSystemInterval: service.config.AsOfSystemInterval,
|
AsOfSystemInterval: service.config.AsOfSystemInterval,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -485,6 +499,9 @@ func (service *Service) ProcessSegmentsFromCSV(ctx context.Context, segmentSourc
|
|||||||
}
|
}
|
||||||
for n, verifySegment := range verifySegments.Segments {
|
for n, verifySegment := range verifySegments.Segments {
|
||||||
segmentsData[n].VerifySegment = verifySegment
|
segmentsData[n].VerifySegment = verifySegment
|
||||||
|
segmentsData[n].Status.Found = 0
|
||||||
|
segmentsData[n].Status.Retry = 0
|
||||||
|
segmentsData[n].Status.NotFound = 0
|
||||||
segments[n] = &segmentsData[n]
|
segments[n] = &segmentsData[n]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -617,3 +634,42 @@ func uuidBefore(v uuid.UUID) uuid.UUID {
|
|||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DateFlag flag implementation for date, format YYYY-MM-DD.
|
||||||
|
type DateFlag struct {
|
||||||
|
time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements pflag.Value.
|
||||||
|
func (t *DateFlag) String() string {
|
||||||
|
return t.Format(time.DateOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set implements pflag.Value.
|
||||||
|
func (t *DateFlag) Set(s string) error {
|
||||||
|
if s == "" {
|
||||||
|
t.Time = time.Now()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedTime, err := time.Parse(time.DateOnly, s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Time = parsedTime
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DateFlag) time() *time.Time {
|
||||||
|
if t.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &t.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type implements pflag.Value.
|
||||||
|
func (t *DateFlag) Type() string {
|
||||||
|
return "time-flag"
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ pflag.Value = &DateFlag{}
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
segmentverify "storj.io/storj/cmd/tools/segment-verify"
|
segmentverify "storj.io/storj/cmd/tools/segment-verify"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
"storj.io/storj/satellite/metabase"
|
"storj.io/storj/satellite/metabase"
|
||||||
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/storj/satellite/overlay"
|
"storj.io/storj/satellite/overlay"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -344,10 +345,10 @@ func (db *metabaseMock) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error) {
|
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error) {
|
||||||
var xs []*overlay.SelectedNode
|
var xs []*nodeselection.SelectedNode
|
||||||
for nodeID := range db.nodeIDToAlias {
|
for nodeID := range db.nodeIDToAlias {
|
||||||
xs = append(xs, &overlay.SelectedNode{
|
xs = append(xs, &nodeselection.SelectedNode{
|
||||||
ID: nodeID,
|
ID: nodeID,
|
||||||
Address: &pb.NodeAddress{
|
Address: &pb.NodeAddress{
|
||||||
Address: fmt.Sprintf("nodeid:%v", nodeID),
|
Address: fmt.Sprintf("nodeid:%v", nodeID),
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
package main_test
|
package main_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -23,15 +23,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestVerifier(t *testing.T) {
|
func TestVerifier(t *testing.T) {
|
||||||
|
const (
|
||||||
|
nodeCount = 10
|
||||||
|
uplinkCount = 10
|
||||||
|
)
|
||||||
|
|
||||||
testplanet.Run(t, testplanet.Config{
|
testplanet.Run(t, testplanet.Config{
|
||||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
SatelliteCount: 1, StorageNodeCount: nodeCount, UplinkCount: uplinkCount,
|
||||||
Reconfigure: testplanet.Reconfigure{
|
Reconfigure: testplanet.Reconfigure{
|
||||||
Satellite: testplanet.ReconfigureRS(4, 4, 4, 4),
|
Satellite: testplanet.ReconfigureRS(nodeCount, nodeCount, nodeCount, nodeCount),
|
||||||
},
|
},
|
||||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||||
satellite := planet.Satellites[0]
|
satellite := planet.Satellites[0]
|
||||||
|
|
||||||
snoCount := int32(len(planet.StorageNodes))
|
|
||||||
olderNodeVersion := "v1.68.1" // version without Exists endpoint
|
olderNodeVersion := "v1.68.1" // version without Exists endpoint
|
||||||
newerNodeVersion := "v1.69.2" // minimum version with Exists endpoint
|
newerNodeVersion := "v1.69.2" // minimum version with Exists endpoint
|
||||||
|
|
||||||
@ -46,7 +50,7 @@ func TestVerifier(t *testing.T) {
|
|||||||
observedZapCore, observedLogs := observer.New(zap.DebugLevel)
|
observedZapCore, observedLogs := observer.New(zap.DebugLevel)
|
||||||
observedLogger := zap.New(observedZapCore).Named("verifier")
|
observedLogger := zap.New(observedZapCore).Named("verifier")
|
||||||
|
|
||||||
service := segmentverify.NewVerifier(
|
verifier := segmentverify.NewVerifier(
|
||||||
observedLogger,
|
observedLogger,
|
||||||
satellite.Dialer,
|
satellite.Dialer,
|
||||||
satellite.Orders.Service,
|
satellite.Orders.Service,
|
||||||
@ -54,9 +58,9 @@ func TestVerifier(t *testing.T) {
|
|||||||
|
|
||||||
// upload some data
|
// upload some data
|
||||||
data := testrand.Bytes(8 * memory.KiB)
|
data := testrand.Bytes(8 * memory.KiB)
|
||||||
for _, up := range planet.Uplinks {
|
for u, up := range planet.Uplinks {
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < nodeCount; i++ {
|
||||||
err := up.Upload(ctx, satellite, "bucket1", strconv.Itoa(i), data)
|
err := up.Upload(ctx, satellite, "bucket1", fmt.Sprintf("uplink%d/i%d", u, i), data)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -67,50 +71,57 @@ func TestVerifier(t *testing.T) {
|
|||||||
Limit: 10000,
|
Limit: 10000,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Len(t, result.Segments, uplinkCount*nodeCount)
|
||||||
|
|
||||||
validSegments := []*segmentverify.Segment{}
|
validSegments := make([]*segmentverify.Segment, len(result.Segments))
|
||||||
for _, raw := range result.Segments {
|
for i, raw := range result.Segments {
|
||||||
validSegments = append(validSegments, &segmentverify.Segment{
|
validSegments[i] = &segmentverify.Segment{VerifySegment: raw}
|
||||||
VerifySegment: raw,
|
|
||||||
Status: segmentverify.Status{Retry: snoCount},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resetStatuses := func() {
|
||||||
|
for _, seg := range validSegments {
|
||||||
|
seg.Status = segmentverify.Status{Retry: nodeCount}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resetStatuses()
|
||||||
|
|
||||||
aliasMap, err := satellite.Metabase.DB.LatestNodesAliasMap(ctx)
|
aliasMap, err := satellite.Metabase.DB.LatestNodesAliasMap(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
nodeWithExistsEndpoint := planet.StorageNodes[testrand.Intn(len(planet.StorageNodes)-1)]
|
t.Run("verify all", func(t *testing.T) {
|
||||||
|
nodeWithExistsEndpoint := planet.StorageNodes[testrand.Intn(len(planet.StorageNodes)-1)]
|
||||||
|
|
||||||
var g errgroup.Group
|
var g errgroup.Group
|
||||||
for _, node := range planet.StorageNodes {
|
for _, node := range planet.StorageNodes {
|
||||||
node := node
|
node := node
|
||||||
nodeVersion := olderNodeVersion
|
nodeVersion := olderNodeVersion
|
||||||
if node == nodeWithExistsEndpoint {
|
if node == nodeWithExistsEndpoint {
|
||||||
nodeVersion = newerNodeVersion
|
nodeVersion = newerNodeVersion
|
||||||
|
}
|
||||||
|
alias, ok := aliasMap.Alias(node.ID())
|
||||||
|
require.True(t, ok)
|
||||||
|
g.Go(func() error {
|
||||||
|
_, err := verifier.Verify(ctx, alias, node.NodeURL(), nodeVersion, validSegments, true)
|
||||||
|
return err
|
||||||
|
})
|
||||||
}
|
}
|
||||||
alias, ok := aliasMap.Alias(node.ID())
|
require.NoError(t, g.Wait())
|
||||||
require.True(t, ok)
|
require.NotZero(t, len(observedLogs.All()))
|
||||||
g.Go(func() error {
|
|
||||||
_, err := service.Verify(ctx, alias, node.NodeURL(), nodeVersion, validSegments, true)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
require.NoError(t, g.Wait())
|
|
||||||
require.NotZero(t, len(observedLogs.All()))
|
|
||||||
|
|
||||||
// check that segments were verified with download method
|
// check that segments were verified with download method
|
||||||
fallbackLogs := observedLogs.FilterMessage("fallback to download method").All()
|
fallbackLogs := observedLogs.FilterMessage("fallback to download method").All()
|
||||||
require.Equal(t, 3, len(fallbackLogs))
|
require.Equal(t, nodeCount-1, len(fallbackLogs))
|
||||||
require.Equal(t, zap.DebugLevel, fallbackLogs[0].Level)
|
require.Equal(t, zap.DebugLevel, fallbackLogs[0].Level)
|
||||||
|
|
||||||
// check that segments were verified with exists endpoint
|
// check that segments were verified with exists endpoint
|
||||||
existsLogs := observedLogs.FilterMessage("verify segments using Exists method").All()
|
existsLogs := observedLogs.FilterMessage("verify segments using Exists method").All()
|
||||||
require.Equal(t, 1, len(existsLogs))
|
require.Equal(t, 1, len(existsLogs))
|
||||||
require.Equal(t, zap.DebugLevel, existsLogs[0].Level)
|
require.Equal(t, zap.DebugLevel, existsLogs[0].Level)
|
||||||
|
|
||||||
for _, seg := range validSegments {
|
for segNum, seg := range validSegments {
|
||||||
require.Equal(t, segmentverify.Status{Found: snoCount, NotFound: 0, Retry: 0}, seg.Status)
|
require.Equal(t, segmentverify.Status{Found: nodeCount, NotFound: 0, Retry: 0}, seg.Status, segNum)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
// segment not found
|
// segment not found
|
||||||
alias0, ok := aliasMap.Alias(planet.StorageNodes[0].ID())
|
alias0, ok := aliasMap.Alias(planet.StorageNodes[0].ID())
|
||||||
@ -138,7 +149,7 @@ func TestVerifier(t *testing.T) {
|
|||||||
var count int
|
var count int
|
||||||
t.Run("segment not found using download method", func(t *testing.T) {
|
t.Run("segment not found using download method", func(t *testing.T) {
|
||||||
// for older node version
|
// for older node version
|
||||||
count, err = service.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion,
|
count, err = verifier.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion,
|
||||||
[]*segmentverify.Segment{validSegment0, missingSegment, validSegment1}, true)
|
[]*segmentverify.Segment{validSegment0, missingSegment, validSegment1}, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, count)
|
require.Equal(t, 3, count)
|
||||||
@ -153,7 +164,7 @@ func TestVerifier(t *testing.T) {
|
|||||||
validSegment1.Status = segmentverify.Status{Retry: 1}
|
validSegment1.Status = segmentverify.Status{Retry: 1}
|
||||||
|
|
||||||
t.Run("segment not found using exists method", func(t *testing.T) {
|
t.Run("segment not found using exists method", func(t *testing.T) {
|
||||||
count, err = service.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), newerNodeVersion,
|
count, err = verifier.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), newerNodeVersion,
|
||||||
[]*segmentverify.Segment{validSegment0, missingSegment, validSegment1}, true)
|
[]*segmentverify.Segment{validSegment0, missingSegment, validSegment1}, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 3, count)
|
require.Equal(t, 3, count)
|
||||||
@ -162,31 +173,34 @@ func TestVerifier(t *testing.T) {
|
|||||||
require.Equal(t, segmentverify.Status{Found: 1}, validSegment1.Status)
|
require.Equal(t, segmentverify.Status{Found: 1}, validSegment1.Status)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
resetStatuses()
|
||||||
|
|
||||||
t.Run("test throttling", func(t *testing.T) {
|
t.Run("test throttling", func(t *testing.T) {
|
||||||
// Test throttling
|
// Test throttling
|
||||||
verifyStart := time.Now()
|
verifyStart := time.Now()
|
||||||
const throttleN = 5
|
const throttleN = 5
|
||||||
count, err = service.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion, validSegments[:throttleN], false)
|
count, err = verifier.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion, validSegments[:throttleN], false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verifyDuration := time.Since(verifyStart)
|
verifyDuration := time.Since(verifyStart)
|
||||||
require.Equal(t, throttleN, count)
|
require.Equal(t, throttleN, count)
|
||||||
require.Greater(t, verifyDuration, config.RequestThrottle*(throttleN-1))
|
require.Greater(t, verifyDuration, config.RequestThrottle*(throttleN-1))
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO: test download timeout
|
resetStatuses()
|
||||||
|
|
||||||
|
// TODO: test download timeout
|
||||||
t.Run("Node offline", func(t *testing.T) {
|
t.Run("Node offline", func(t *testing.T) {
|
||||||
err = planet.StopNodeAndUpdate(ctx, planet.StorageNodes[0])
|
err = planet.StopNodeAndUpdate(ctx, planet.StorageNodes[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// for older node version
|
// for older node version
|
||||||
count, err = service.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion, validSegments, true)
|
count, err = verifier.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), olderNodeVersion, validSegments, true)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, 0, count)
|
require.Equal(t, 0, count)
|
||||||
require.True(t, segmentverify.ErrNodeOffline.Has(err))
|
require.True(t, segmentverify.ErrNodeOffline.Has(err))
|
||||||
|
|
||||||
// for node version with Exists endpoint
|
// for node version with Exists endpoint
|
||||||
count, err = service.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), newerNodeVersion, validSegments, true)
|
count, err = verifier.Verify(ctx, alias0, planet.StorageNodes[0].NodeURL(), newerNodeVersion, validSegments, true)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, 0, count)
|
require.Equal(t, 0, count)
|
||||||
require.True(t, segmentverify.ErrNodeOffline.Has(err))
|
require.True(t, segmentverify.ErrNodeOffline.Has(err))
|
||||||
|
218
cmd/tools/tag-signer/main.go
Normal file
218
cmd/tools/tag-signer/main.go
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/common/identity"
|
||||||
|
"storj.io/common/nodetag"
|
||||||
|
"storj.io/common/pb"
|
||||||
|
"storj.io/common/signing"
|
||||||
|
"storj.io/common/storj"
|
||||||
|
"storj.io/private/process"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
rootCmd = &cobra.Command{
|
||||||
|
Use: "tag-signer",
|
||||||
|
Short: "Sign key=value pairs with identity",
|
||||||
|
Long: "Node tags are arbitrary key value pairs signed by an authority. If the public key is configured on " +
|
||||||
|
"Satellite side, Satellite will check the signatures and save the tags, which can be used (for example)" +
|
||||||
|
" during node selection. Storagenodes can be configured to send encoded node tags to the Satellite. " +
|
||||||
|
"This utility helps creating/managing the values of this specific configuration value, which is encoded by default.",
|
||||||
|
}
|
||||||
|
|
||||||
|
signCmd = &cobra.Command{
|
||||||
|
Use: "sign <key=value> <key2=value> ...",
|
||||||
|
Short: "Create signed tagset",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
encoded, err := signTags(ctx, config, args)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Println(encoded)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect <encoded string>",
|
||||||
|
Short: "Print out the details from an encoded node set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
ctx, _ := process.Ctx(cmd)
|
||||||
|
return inspect(ctx, args[0])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
config Config
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config contains configuration required for signing.
|
||||||
|
type Config struct {
|
||||||
|
IdentityDir string `help:"location if the identity files" path:"true"`
|
||||||
|
NodeID string `help:"the ID of the node, which will used this tag "`
|
||||||
|
Confirm bool `help:"enable comma in tag values" default:"false"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(signCmd)
|
||||||
|
rootCmd.AddCommand(inspectCmd)
|
||||||
|
process.Bind(signCmd, &config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func signTags(ctx context.Context, cfg Config, tagPairs []string) (string, error) {
|
||||||
|
|
||||||
|
if cfg.IdentityDir == "" {
|
||||||
|
return "", errs.New("Please specify the identity, used as a signer with --identity-dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.NodeID == "" {
|
||||||
|
return "", errs.New("Please specify the --node-id")
|
||||||
|
}
|
||||||
|
|
||||||
|
identityConfig := identity.Config{
|
||||||
|
CertPath: filepath.Join(cfg.IdentityDir, "identity.cert"),
|
||||||
|
KeyPath: filepath.Join(cfg.IdentityDir, "identity.key"),
|
||||||
|
}
|
||||||
|
|
||||||
|
fullIdentity, err := identityConfig.Load()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := signing.SignerFromFullIdentity(fullIdentity)
|
||||||
|
|
||||||
|
nodeID, err := storj.NodeIDFromString(cfg.NodeID)
|
||||||
|
if err != nil {
|
||||||
|
return "", errs.New("Wrong NodeID format: %v", err)
|
||||||
|
}
|
||||||
|
tagSet := &pb.NodeTagSet{
|
||||||
|
NodeId: nodeID.Bytes(),
|
||||||
|
SignedAt: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
tagSet.Tags, err = parseTagPairs(tagPairs, cfg.Confirm)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedMessage, err := nodetag.Sign(ctx, tagSet, signer)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
all := &pb.SignedNodeTagSets{
|
||||||
|
Tags: []*pb.SignedNodeTagSet{
|
||||||
|
signedMessage,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := proto.Marshal(all)
|
||||||
|
if err != nil {
|
||||||
|
return "", errs.Wrap(err)
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(raw), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspect(ctx context.Context, s string) error {
|
||||||
|
raw, err := base64.StdEncoding.DecodeString(s)
|
||||||
|
if err != nil {
|
||||||
|
return errs.New("Input is not in base64 format")
|
||||||
|
}
|
||||||
|
|
||||||
|
sets := &pb.SignedNodeTagSets{}
|
||||||
|
err = proto.Unmarshal(raw, sets)
|
||||||
|
if err != nil {
|
||||||
|
return errs.New("Input is not a protobuf encoded *pb.SignedNodeTagSets message")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range sets.Tags {
|
||||||
|
|
||||||
|
signerNodeID, err := storj.NodeIDFromBytes(msg.SignerNodeId)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Signer: ", signerNodeID.String())
|
||||||
|
fmt.Println("Signature: ", hex.EncodeToString(msg.Signature))
|
||||||
|
|
||||||
|
tags := &pb.NodeTagSet{}
|
||||||
|
err = proto.Unmarshal(msg.SerializedTag, tags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nodeID, err := storj.NodeIDFromBytes(tags.NodeId)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("SignedAt: ", time.Unix(tags.SignedAt, 0).Format(time.RFC3339))
|
||||||
|
fmt.Println("NodeID: ", nodeID.String())
|
||||||
|
fmt.Println("Tags:")
|
||||||
|
for _, tag := range tags.Tags {
|
||||||
|
fmt.Printf(" %s=%s\n", tag.Name, string(tag.Value))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTagPairs(tagPairs []string, allowCommaValues bool) ([]*pb.Tag, error) {
|
||||||
|
tags := make([]*pb.Tag, 0, len(tagPairs))
|
||||||
|
|
||||||
|
for _, tag := range tagPairs {
|
||||||
|
tag = strings.TrimSpace(tag)
|
||||||
|
if len(tag) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowCommaValues && strings.ContainsRune(tag, ',') {
|
||||||
|
return nil, errs.New("multiple tags should be separated by spaces instead of commas, or specify --confirm to enable commas in tag values")
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(tag, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, errs.New("tags should be in KEY=VALUE format, but it was %s", tag)
|
||||||
|
}
|
||||||
|
tags = append(tags, &pb.Tag{
|
||||||
|
Name: parts[0],
|
||||||
|
Value: []byte(parts[1]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
process.ExecWithCustomOptions(rootCmd, process.ExecOptions{
|
||||||
|
LoadConfig: func(cmd *cobra.Command, vip *viper.Viper) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
InitTracing: false,
|
||||||
|
LoggerFactory: func(logger *zap.Logger) *zap.Logger {
|
||||||
|
newLogger, level, err := process.NewLogger("tag-signer")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
level.SetLevel(zap.WarnLevel)
|
||||||
|
return newLogger
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
99
cmd/tools/tag-signer/main_test.go
Normal file
99
cmd/tools/tag-signer/main_test.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"storj.io/common/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_parseTagPairs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
confirm bool
|
||||||
|
expected []*pb.Tag
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "comma separated tag pairs without confirm flag",
|
||||||
|
args: []string{"key1=value1,key2=value2"},
|
||||||
|
expectedError: "multiple tags should be separated by spaces instead of commas, or specify --confirm to enable commas in tag values",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "comma separated tag pairs with confirm flag",
|
||||||
|
args: []string{"key1=value1,key2=value2"},
|
||||||
|
confirm: true,
|
||||||
|
expected: []*pb.Tag{
|
||||||
|
{
|
||||||
|
Name: "key1",
|
||||||
|
Value: []byte("value1,key2=value2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single tag pair",
|
||||||
|
args: []string{"key1=value1"},
|
||||||
|
confirm: true,
|
||||||
|
expected: []*pb.Tag{
|
||||||
|
{
|
||||||
|
Name: "key1",
|
||||||
|
Value: []byte("value1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple tag pairs",
|
||||||
|
args: []string{"key1=value1", "key2=value2"},
|
||||||
|
confirm: true,
|
||||||
|
expected: []*pb.Tag{
|
||||||
|
{
|
||||||
|
Name: "key1",
|
||||||
|
Value: []byte("value1"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "key2",
|
||||||
|
Value: []byte("value2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple tag pairs with with comma values and confirm flag",
|
||||||
|
args: []string{"key1=value1", "key2=value2,value3"},
|
||||||
|
confirm: true,
|
||||||
|
expected: []*pb.Tag{
|
||||||
|
{
|
||||||
|
Name: "key1",
|
||||||
|
Value: []byte("value1"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "key2",
|
||||||
|
Value: []byte("value2,value3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple tag pairs with with comma values without confirm flag",
|
||||||
|
args: []string{"key1=value1", "key2=value2,value3"},
|
||||||
|
expectedError: "multiple tags should be separated by spaces instead of commas, or specify --confirm to enable commas in tag values",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := parseTagPairs(tt.args, tt.confirm)
|
||||||
|
if tt.expectedError != "" {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), tt.expectedError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -29,6 +29,8 @@ type accessPermissions struct {
|
|||||||
|
|
||||||
notBefore *time.Time
|
notBefore *time.Time
|
||||||
notAfter *time.Time
|
notAfter *time.Time
|
||||||
|
|
||||||
|
maxObjectTTL *time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ap *accessPermissions) Setup(params clingy.Parameters, prefixFlags bool) {
|
func (ap *accessPermissions) Setup(params clingy.Parameters, prefixFlags bool) {
|
||||||
@ -65,6 +67,12 @@ func (ap *accessPermissions) Setup(params clingy.Parameters, prefixFlags bool) {
|
|||||||
"Disallow access after this time (e.g. '+2h', 'now', '2020-01-02T15:04:05Z0700', 'none')",
|
"Disallow access after this time (e.g. '+2h', 'now', '2020-01-02T15:04:05Z0700', 'none')",
|
||||||
nil, clingy.Transform(parseHumanDateNotAfter), clingy.Type("relative_date"), clingy.Optional).(*time.Time)
|
nil, clingy.Transform(parseHumanDateNotAfter), clingy.Type("relative_date"), clingy.Optional).(*time.Time)
|
||||||
|
|
||||||
|
params.Break()
|
||||||
|
|
||||||
|
ap.maxObjectTTL = params.Flag("max-object-ttl",
|
||||||
|
"The object is automatically deleted after this period. (e.g. '1h30m', '24h', '720h')",
|
||||||
|
nil, clingy.Transform(time.ParseDuration), clingy.Type("period"), clingy.Optional).(*time.Duration)
|
||||||
|
|
||||||
if !prefixFlags {
|
if !prefixFlags {
|
||||||
ap.prefixes = params.Arg("prefix", "Key prefix access will be restricted to",
|
ap.prefixes = params.Arg("prefix", "Key prefix access will be restricted to",
|
||||||
clingy.Transform(ulloc.Parse),
|
clingy.Transform(ulloc.Parse),
|
||||||
@ -93,6 +101,7 @@ func (ap *accessPermissions) Apply(access *uplink.Access) (*uplink.Access, error
|
|||||||
AllowUpload: ap.AllowUpload(),
|
AllowUpload: ap.AllowUpload(),
|
||||||
NotBefore: ap.NotBefore(),
|
NotBefore: ap.NotBefore(),
|
||||||
NotAfter: ap.NotAfter(),
|
NotAfter: ap.NotAfter(),
|
||||||
|
MaxObjectTTL: ap.MaxObjectTTL(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we aren't actually restricting anything, then we don't need to Share.
|
// if we aren't actually restricting anything, then we don't need to Share.
|
||||||
@ -120,9 +129,10 @@ func defaulted[T any](val *T, def T) T {
|
|||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ap *accessPermissions) NotBefore() time.Time { return defaulted(ap.notBefore, time.Time{}) }
|
func (ap *accessPermissions) NotBefore() time.Time { return defaulted(ap.notBefore, time.Time{}) }
|
||||||
func (ap *accessPermissions) NotAfter() time.Time { return defaulted(ap.notAfter, time.Time{}) }
|
func (ap *accessPermissions) NotAfter() time.Time { return defaulted(ap.notAfter, time.Time{}) }
|
||||||
func (ap *accessPermissions) AllowDelete() bool { return !defaulted(ap.disallowDeletes, ap.readonly) }
|
func (ap *accessPermissions) AllowDelete() bool { return !defaulted(ap.disallowDeletes, ap.readonly) }
|
||||||
func (ap *accessPermissions) AllowList() bool { return !defaulted(ap.disallowLists, ap.writeonly) }
|
func (ap *accessPermissions) AllowList() bool { return !defaulted(ap.disallowLists, ap.writeonly) }
|
||||||
func (ap *accessPermissions) AllowDownload() bool { return !defaulted(ap.disallowReads, ap.writeonly) }
|
func (ap *accessPermissions) AllowDownload() bool { return !defaulted(ap.disallowReads, ap.writeonly) }
|
||||||
func (ap *accessPermissions) AllowUpload() bool { return !defaulted(ap.disallowWrites, ap.readonly) }
|
func (ap *accessPermissions) AllowUpload() bool { return !defaulted(ap.disallowWrites, ap.readonly) }
|
||||||
|
func (ap *accessPermissions) MaxObjectTTL() *time.Duration { return ap.maxObjectTTL }
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/zeebo/clingy"
|
"github.com/zeebo/clingy"
|
||||||
"github.com/zeebo/errs"
|
|
||||||
|
|
||||||
"storj.io/storj/cmd/uplink/ulext"
|
"storj.io/storj/cmd/uplink/ulext"
|
||||||
)
|
)
|
||||||
@ -33,7 +32,7 @@ func (c *cmdAccessUse) Execute(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, ok := accesses[c.access]; !ok {
|
if _, ok := accesses[c.access]; !ok {
|
||||||
return errs.New("unknown access: %q", c.access)
|
return fmt.Errorf("ERROR: access %q does not exist. Use 'uplink access list' to see existing accesses", c.access)
|
||||||
}
|
}
|
||||||
if err := c.ex.SaveAccessInfo(c.access, accesses); err != nil {
|
if err := c.ex.SaveAccessInfo(c.access, accesses); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/VividCortex/ewma"
|
|
||||||
"github.com/vbauerster/mpb/v8"
|
"github.com/vbauerster/mpb/v8"
|
||||||
"github.com/vbauerster/mpb/v8/decor"
|
"github.com/vbauerster/mpb/v8/decor"
|
||||||
"github.com/zeebo/clingy"
|
"github.com/zeebo/clingy"
|
||||||
@ -85,8 +84,7 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
|||||||
).(bool)
|
).(bool)
|
||||||
c.byteRange = params.Flag("range", "Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35", "").(string)
|
c.byteRange = params.Flag("range", "Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35", "").(string)
|
||||||
|
|
||||||
parallelism := params.Flag("parallelism", "Controls how many parallel chunks to upload/download from a file", nil,
|
c.parallelism = params.Flag("parallelism", "Controls how many parallel parts to upload/download from a file", 1,
|
||||||
clingy.Optional,
|
|
||||||
clingy.Short('p'),
|
clingy.Short('p'),
|
||||||
clingy.Transform(strconv.Atoi),
|
clingy.Transform(strconv.Atoi),
|
||||||
clingy.Transform(func(n int) (int, error) {
|
clingy.Transform(func(n int) (int, error) {
|
||||||
@ -95,8 +93,8 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
|||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}),
|
}),
|
||||||
).(*int)
|
).(int)
|
||||||
c.parallelismChunkSize = params.Flag("parallelism-chunk-size", "Set the size of the chunks for parallelism, 0 means automatic adjustment", memory.Size(0),
|
c.parallelismChunkSize = params.Flag("parallelism-chunk-size", "Set the size of the parts for parallelism, 0 means automatic adjustment", memory.Size(0),
|
||||||
clingy.Transform(memory.ParseString),
|
clingy.Transform(memory.ParseString),
|
||||||
clingy.Transform(func(n int64) (memory.Size, error) {
|
clingy.Transform(func(n int64) (memory.Size, error) {
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
@ -107,17 +105,16 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
|||||||
).(memory.Size)
|
).(memory.Size)
|
||||||
|
|
||||||
c.uploadConfig = testuplink.DefaultConcurrentSegmentUploadsConfig()
|
c.uploadConfig = testuplink.DefaultConcurrentSegmentUploadsConfig()
|
||||||
maxConcurrent := params.Flag(
|
c.uploadConfig.SchedulerOptions.MaximumConcurrent = params.Flag(
|
||||||
"maximum-concurrent-pieces",
|
"maximum-concurrent-pieces",
|
||||||
"Maximum concurrent pieces to upload at once per transfer",
|
"Maximum concurrent pieces to upload at once per part",
|
||||||
nil,
|
c.uploadConfig.SchedulerOptions.MaximumConcurrent,
|
||||||
clingy.Optional,
|
|
||||||
clingy.Transform(strconv.Atoi),
|
clingy.Transform(strconv.Atoi),
|
||||||
clingy.Advanced,
|
clingy.Advanced,
|
||||||
).(*int)
|
).(int)
|
||||||
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles = params.Flag(
|
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles = params.Flag(
|
||||||
"maximum-concurrent-segments",
|
"maximum-concurrent-segments",
|
||||||
"Maximum concurrent segments to upload at once per transfer",
|
"Maximum concurrent segments to upload at once per part",
|
||||||
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles,
|
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles,
|
||||||
clingy.Transform(strconv.Atoi),
|
clingy.Transform(strconv.Atoi),
|
||||||
clingy.Advanced,
|
clingy.Advanced,
|
||||||
@ -133,28 +130,6 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
|||||||
clingy.Advanced,
|
clingy.Advanced,
|
||||||
).(string)
|
).(string)
|
||||||
|
|
||||||
{ // handle backwards compatibility around parallelism and maximum concurrent pieces
|
|
||||||
addr := func(x int) *int { return &x }
|
|
||||||
|
|
||||||
switch {
|
|
||||||
// if neither are actively set, use defaults
|
|
||||||
case parallelism == nil && maxConcurrent == nil:
|
|
||||||
parallelism = addr(1)
|
|
||||||
maxConcurrent = addr(c.uploadConfig.SchedulerOptions.MaximumConcurrent)
|
|
||||||
|
|
||||||
// if parallelism is not set, use a value based on maxConcurrent
|
|
||||||
case parallelism == nil:
|
|
||||||
parallelism = addr((*maxConcurrent + 99) / 100)
|
|
||||||
|
|
||||||
// if maxConcurrent is not set, use a value based on parallelism
|
|
||||||
case maxConcurrent == nil:
|
|
||||||
maxConcurrent = addr(100 * *parallelism)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.uploadConfig.SchedulerOptions.MaximumConcurrent = *maxConcurrent
|
|
||||||
c.parallelism = *parallelism
|
|
||||||
}
|
|
||||||
|
|
||||||
c.inmemoryEC = params.Flag("inmemory-erasure-coding", "Keep erasure-coded pieces in-memory instead of writing them on the disk during upload", false,
|
c.inmemoryEC = params.Flag("inmemory-erasure-coding", "Keep erasure-coded pieces in-memory instead of writing them on the disk during upload", false,
|
||||||
clingy.Transform(strconv.ParseBool),
|
clingy.Transform(strconv.ParseBool),
|
||||||
clingy.Boolean,
|
clingy.Boolean,
|
||||||
@ -194,9 +169,10 @@ func (c *cmdCp) Execute(ctx context.Context) error {
|
|||||||
fs, err := c.ex.OpenFilesystem(ctx, c.access,
|
fs, err := c.ex.OpenFilesystem(ctx, c.access,
|
||||||
ulext.ConcurrentSegmentUploadsConfig(c.uploadConfig),
|
ulext.ConcurrentSegmentUploadsConfig(c.uploadConfig),
|
||||||
ulext.ConnectionPoolOptions(rpcpool.Options{
|
ulext.ConnectionPoolOptions(rpcpool.Options{
|
||||||
// Add a bit more capacity for connections to the satellite
|
// Allow at least as many connections as the maximum concurrent pieces per
|
||||||
Capacity: c.uploadConfig.SchedulerOptions.MaximumConcurrent + 5,
|
// parallel part per transfer, plus a few extra for the satellite.
|
||||||
KeyCapacity: 5,
|
Capacity: c.transfers*c.parallelism*c.uploadConfig.SchedulerOptions.MaximumConcurrent + 5,
|
||||||
|
KeyCapacity: 2,
|
||||||
IdleExpiration: 2 * time.Minute,
|
IdleExpiration: 2 * time.Minute,
|
||||||
}))
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -419,17 +395,6 @@ func (c *cmdCp) copyFile(ctx context.Context, fs ulfs.Filesystem, source, dest u
|
|||||||
}
|
}
|
||||||
defer func() { _ = mwh.Abort(ctx) }()
|
defer func() { _ = mwh.Abort(ctx) }()
|
||||||
|
|
||||||
// if we're uploading, do a single part of maximum size
|
|
||||||
if dest.Remote() {
|
|
||||||
return errs.Wrap(c.singleCopy(
|
|
||||||
ctx,
|
|
||||||
source, dest,
|
|
||||||
mrh, mwh,
|
|
||||||
offset, length,
|
|
||||||
bar,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
partSize, err := c.calculatePartSize(mrh.Length(), c.parallelismChunkSize.Int64())
|
partSize, err := c.calculatePartSize(mrh.Length(), c.parallelismChunkSize.Int64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -448,13 +413,15 @@ func (c *cmdCp) copyFile(ctx context.Context, fs ulfs.Filesystem, source, dest u
|
|||||||
// calculatePartSize returns the needed part size in order to upload the file with size of 'length'.
|
// calculatePartSize returns the needed part size in order to upload the file with size of 'length'.
|
||||||
// It hereby respects if the client requests/prefers a certain size and only increases if needed.
|
// It hereby respects if the client requests/prefers a certain size and only increases if needed.
|
||||||
func (c *cmdCp) calculatePartSize(length, preferredSize int64) (requiredSize int64, err error) {
|
func (c *cmdCp) calculatePartSize(length, preferredSize int64) (requiredSize int64, err error) {
|
||||||
segC := (length / maxPartCount / (memory.MiB * 64).Int64()) + 1
|
segC := (length / maxPartCount / memory.GiB.Int64()) + 1
|
||||||
requiredSize = segC * (memory.MiB * 64).Int64()
|
requiredSize = segC * memory.GiB.Int64()
|
||||||
switch {
|
switch {
|
||||||
case preferredSize == 0:
|
case preferredSize == 0:
|
||||||
return requiredSize, nil
|
return requiredSize, nil
|
||||||
case requiredSize <= preferredSize:
|
case requiredSize <= preferredSize:
|
||||||
return preferredSize, nil
|
return preferredSize, nil
|
||||||
|
case length < 0: // let the user pick their size if we don't have a length to know better
|
||||||
|
return preferredSize, nil
|
||||||
default:
|
default:
|
||||||
return 0, errs.New(fmt.Sprintf("the specified chunk size %s is too small, requires %s or larger",
|
return 0, errs.New(fmt.Sprintf("the specified chunk size %s is too small, requires %s or larger",
|
||||||
memory.FormatBytes(preferredSize), memory.FormatBytes(requiredSize)))
|
memory.FormatBytes(preferredSize), memory.FormatBytes(requiredSize)))
|
||||||
@ -535,8 +502,8 @@ func (c *cmdCp) parallelCopy(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var readBufs *ulfs.BytesPool
|
var readBufs *ulfs.BytesPool
|
||||||
if p > 1 && chunkSize > 0 && (source.Std() || dest.Std()) {
|
if p > 1 && chunkSize > 0 && source.Std() {
|
||||||
// Create the read buffer pool only for uploads from stdin and downloads to stdout with parallelism > 1.
|
// Create the read buffer pool only for uploads from stdin with parallelism > 1.
|
||||||
readBufs = ulfs.NewBytesPool(int(chunkSize))
|
readBufs = ulfs.NewBytesPool(int(chunkSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -557,6 +524,14 @@ func (c *cmdCp) parallelCopy(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i == 0 && bar != nil {
|
||||||
|
info, err := src.Info(ctx)
|
||||||
|
if err == nil {
|
||||||
|
bar.SetTotal(info.ContentLength, false)
|
||||||
|
bar.EnableTriggerComplete()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wh, err := dst.NextPart(ctx, chunk)
|
wh, err := dst.NextPart(ctx, chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = rh.Close()
|
_ = rh.Close()
|
||||||
@ -578,12 +553,8 @@ func (c *cmdCp) parallelCopy(
|
|||||||
|
|
||||||
var w io.Writer = wh
|
var w io.Writer = wh
|
||||||
if bar != nil {
|
if bar != nil {
|
||||||
bar.SetTotal(rh.Info().ContentLength, false)
|
|
||||||
bar.EnableTriggerComplete()
|
|
||||||
pw := bar.ProxyWriter(w)
|
pw := bar.ProxyWriter(w)
|
||||||
defer func() {
|
defer func() { _ = pw.Close() }()
|
||||||
_ = pw.Close()
|
|
||||||
}()
|
|
||||||
w = pw
|
w = pw
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -619,65 +590,9 @@ func (c *cmdCp) parallelCopy(
|
|||||||
return errs.Wrap(combineErrs(es))
|
return errs.Wrap(combineErrs(es))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cmdCp) singleCopy(
|
|
||||||
ctx context.Context,
|
|
||||||
source, dest ulloc.Location,
|
|
||||||
src ulfs.MultiReadHandle,
|
|
||||||
dst ulfs.MultiWriteHandle,
|
|
||||||
offset, length int64,
|
|
||||||
bar *mpb.Bar) error {
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
if err := src.SetOffset(offset); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
rh, err := src.NextPart(ctx, length)
|
|
||||||
if err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
defer func() { _ = rh.Close() }()
|
|
||||||
|
|
||||||
wh, err := dst.NextPart(ctx, length)
|
|
||||||
if err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
defer func() { _ = wh.Abort() }()
|
|
||||||
|
|
||||||
var w io.Writer = wh
|
|
||||||
if bar != nil {
|
|
||||||
bar.SetTotal(rh.Info().ContentLength, false)
|
|
||||||
bar.EnableTriggerComplete()
|
|
||||||
pw := bar.ProxyWriter(w)
|
|
||||||
defer func() { _ = pw.Close() }()
|
|
||||||
w = pw
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := sync2.Copy(ctx, w, rh); err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := wh.Commit(); err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := dst.Commit(ctx); err != nil {
|
|
||||||
return errs.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProgressBar(progress *mpb.Progress, name string, which, total int) *mpb.Bar {
|
func newProgressBar(progress *mpb.Progress, name string, which, total int) *mpb.Bar {
|
||||||
const counterFmt = " % .2f / % .2f"
|
const counterFmt = " % .2f / % .2f"
|
||||||
const percentageFmt = "%.2f "
|
const percentageFmt = "%.2f "
|
||||||
const speedFmt = "% .2f"
|
|
||||||
|
|
||||||
movingAverage := ewma.NewMovingAverage()
|
|
||||||
|
|
||||||
prepends := []decor.Decorator{decor.Name(name + " ")}
|
prepends := []decor.Decorator{decor.Name(name + " ")}
|
||||||
if total > 1 {
|
if total > 1 {
|
||||||
@ -687,7 +602,6 @@ func newProgressBar(progress *mpb.Progress, name string, which, total int) *mpb.
|
|||||||
|
|
||||||
appends := []decor.Decorator{
|
appends := []decor.Decorator{
|
||||||
decor.NewPercentage(percentageFmt),
|
decor.NewPercentage(percentageFmt),
|
||||||
decor.MovingAverageSpeed(decor.SizeB1024(1024), speedFmt, movingAverage),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return progress.AddBar(0,
|
return progress.AddBar(0,
|
||||||
|
@ -99,46 +99,51 @@ func TestCpDownload(t *testing.T) {
|
|||||||
func TestCpPartSize(t *testing.T) {
|
func TestCpPartSize(t *testing.T) {
|
||||||
c := newCmdCp(nil)
|
c := newCmdCp(nil)
|
||||||
|
|
||||||
// 1GiB file, should return 64MiB
|
// 10 GiB file, should return 1 GiB
|
||||||
partSize, err := c.calculatePartSize(memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
partSize, err := c.calculatePartSize(10*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*64, partSize)
|
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||||
|
|
||||||
// 640 GB file, should return 64MiB.
|
// 10000 GB file, should return 1 GiB.
|
||||||
partSize, err = c.calculatePartSize(memory.GB.Int64()*640, c.parallelismChunkSize.Int64())
|
partSize, err = c.calculatePartSize(10000*memory.GB.Int64(), c.parallelismChunkSize.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*64, partSize)
|
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||||
|
|
||||||
// 640GiB file, should return 128MiB.
|
// 10000 GiB file, should return 2 GiB.
|
||||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*640, c.parallelismChunkSize.Int64())
|
partSize, err = c.calculatePartSize(10000*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*128, partSize)
|
require.EqualValues(t, 2*memory.GiB, partSize)
|
||||||
|
|
||||||
// 1TiB file, should return 128MiB.
|
// 10 TiB file, should return 2 GiB.
|
||||||
partSize, err = c.calculatePartSize(memory.TiB.Int64(), c.parallelismChunkSize.Int64())
|
partSize, err = c.calculatePartSize(10*memory.TiB.Int64(), c.parallelismChunkSize.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*128, partSize)
|
require.EqualValues(t, 2*memory.GiB, partSize)
|
||||||
|
|
||||||
// 1.3TiB file, should return 192MiB.
|
// 20001 GiB file, should return 3 GiB.
|
||||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, c.parallelismChunkSize.Int64())
|
partSize, err = c.calculatePartSize(20001*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*192, partSize)
|
require.EqualValues(t, 3*memory.GiB, partSize)
|
||||||
|
|
||||||
// should return 1GiB as requested.
|
// should return 1GiB as requested.
|
||||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, memory.GiB.Int64())
|
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, memory.GiB.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.GiB, partSize)
|
require.EqualValues(t, memory.GiB, partSize)
|
||||||
|
|
||||||
// should return 192 MiB and error, since preferred is too low.
|
// should return 1 GiB and error, since preferred is too low.
|
||||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, memory.MiB.Int64())
|
partSize, err = c.calculatePartSize(1300*memory.GiB.Int64(), memory.MiB.Int64())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, "the specified chunk size 1.0 MiB is too small, requires 192.0 MiB or larger", err.Error())
|
require.Equal(t, "the specified chunk size 1.0 MiB is too small, requires 1.0 GiB or larger", err.Error())
|
||||||
require.Zero(t, partSize)
|
require.Zero(t, partSize)
|
||||||
|
|
||||||
// negative length should return 64MiB part size
|
// negative length should return asked for amount
|
||||||
partSize, err = c.calculatePartSize(-1, c.parallelismChunkSize.Int64())
|
partSize, err = c.calculatePartSize(-1, 1*memory.GiB.Int64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, memory.MiB*64, partSize)
|
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||||
|
|
||||||
|
// negative length should return specified amount
|
||||||
|
partSize, err = c.calculatePartSize(-1, 100)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 100, partSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCpUpload(t *testing.T) {
|
func TestCpUpload(t *testing.T) {
|
||||||
|
@ -104,15 +104,16 @@ func (c *cmdShare) Execute(ctx context.Context) error {
|
|||||||
|
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Sharing access to satellite %s\n", access.SatelliteAddress())
|
fmt.Fprintf(clingy.Stdout(ctx), "Sharing access to satellite %s\n", access.SatelliteAddress())
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "=========== ACCESS RESTRICTIONS ==========================================================\n")
|
fmt.Fprintf(clingy.Stdout(ctx), "=========== ACCESS RESTRICTIONS ==========================================================\n")
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Download : %s\n", formatPermission(c.ap.AllowDownload()))
|
fmt.Fprintf(clingy.Stdout(ctx), "Download : %s\n", formatPermission(c.ap.AllowDownload()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Upload : %s\n", formatPermission(c.ap.AllowUpload()))
|
fmt.Fprintf(clingy.Stdout(ctx), "Upload : %s\n", formatPermission(c.ap.AllowUpload()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Lists : %s\n", formatPermission(c.ap.AllowList()))
|
fmt.Fprintf(clingy.Stdout(ctx), "Lists : %s\n", formatPermission(c.ap.AllowList()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Deletes : %s\n", formatPermission(c.ap.AllowDelete()))
|
fmt.Fprintf(clingy.Stdout(ctx), "Deletes : %s\n", formatPermission(c.ap.AllowDelete()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "NotBefore : %s\n", formatTimeRestriction(c.ap.NotBefore()))
|
fmt.Fprintf(clingy.Stdout(ctx), "NotBefore : %s\n", formatTimeRestriction(c.ap.NotBefore()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "NotAfter : %s\n", formatTimeRestriction(c.ap.NotAfter()))
|
fmt.Fprintf(clingy.Stdout(ctx), "NotAfter : %s\n", formatTimeRestriction(c.ap.NotAfter()))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Paths : %s\n", formatPaths(c.ap.prefixes))
|
fmt.Fprintf(clingy.Stdout(ctx), "MaxObjectTTL : %s\n", formatDuration(c.ap.maxObjectTTL))
|
||||||
|
fmt.Fprintf(clingy.Stdout(ctx), "Paths : %s\n", formatPaths(c.ap.prefixes))
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========\n")
|
fmt.Fprintf(clingy.Stdout(ctx), "=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========\n")
|
||||||
fmt.Fprintf(clingy.Stdout(ctx), "Access : %s\n", newAccessData)
|
fmt.Fprintf(clingy.Stdout(ctx), "Access : %s\n", newAccessData)
|
||||||
|
|
||||||
if c.register {
|
if c.register {
|
||||||
credentials, err := RegisterAccess(ctx, access, c.authService, c.public, c.caCert)
|
credentials, err := RegisterAccess(ctx, access, c.authService, c.public, c.caCert)
|
||||||
@ -182,6 +183,13 @@ func formatTimeRestriction(t time.Time) string {
|
|||||||
return formatTime(true, t)
|
return formatTime(true, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func formatDuration(d *time.Duration) string {
|
||||||
|
if d == nil {
|
||||||
|
return "Not set"
|
||||||
|
}
|
||||||
|
return d.String()
|
||||||
|
}
|
||||||
|
|
||||||
func formatPaths(sharePrefixes []uplink.SharePrefix) string {
|
func formatPaths(sharePrefixes []uplink.SharePrefix) string {
|
||||||
if len(sharePrefixes) == 0 {
|
if len(sharePrefixes) == 0 {
|
||||||
return "WARNING! The entire project is shared!"
|
return "WARNING! The entire project is shared!"
|
||||||
|
@ -33,15 +33,16 @@ func TestShare(t *testing.T) {
|
|||||||
state.Succeed(t, "share", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -51,15 +52,16 @@ func TestShare(t *testing.T) {
|
|||||||
state.Succeed(t, "share", "--readonly", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "--readonly", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -69,15 +71,16 @@ func TestShare(t *testing.T) {
|
|||||||
state.Succeed(t, "share", "--disallow-lists", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "--disallow-lists", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Disallowed
|
Lists : Disallowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -87,15 +90,16 @@ func TestShare(t *testing.T) {
|
|||||||
state.Succeed(t, "share", "--disallow-reads", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "--disallow-reads", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Disallowed
|
Download : Disallowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -116,33 +120,54 @@ func TestShare(t *testing.T) {
|
|||||||
state.Succeed(t, "share", "--public", "--not-after=none", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "--public", "--not-after=none", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("share access with --not-after time restriction parameter", func(t *testing.T) {
|
t.Run("share access with --not-after", func(t *testing.T) {
|
||||||
state := ultest.Setup(commands)
|
state := ultest.Setup(commands)
|
||||||
|
|
||||||
state.Succeed(t, "share", "--not-after", "2022-01-01T15:01:01-01:00", "sj://some/prefix").RequireStdoutGlob(t, `
|
state.Succeed(t, "share", "--not-after", "2022-01-01T15:01:01-01:00", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : 2022-01-01 16:01:01
|
NotAfter : 2022-01-01 16:01:01
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
|
`)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("share access with --max-object-ttl", func(t *testing.T) {
|
||||||
|
state := ultest.Setup(commands)
|
||||||
|
|
||||||
|
state.Succeed(t, "share", "--max-object-ttl", "720h", "--readonly=false", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||||
|
Sharing access to satellite *
|
||||||
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
|
Download : Allowed
|
||||||
|
Upload : Allowed
|
||||||
|
Lists : Allowed
|
||||||
|
Deletes : Allowed
|
||||||
|
NotBefore : No restriction
|
||||||
|
NotAfter : No restriction
|
||||||
|
MaxObjectTTL : 720h0m0s
|
||||||
|
Paths : sj://some/prefix
|
||||||
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
|
Access : *
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -184,15 +209,16 @@ func TestShare(t *testing.T) {
|
|||||||
expected := `
|
expected := `
|
||||||
Sharing access to satellite *
|
Sharing access to satellite *
|
||||||
=========== ACCESS RESTRICTIONS ==========================================================
|
=========== ACCESS RESTRICTIONS ==========================================================
|
||||||
Download : Allowed
|
Download : Allowed
|
||||||
Upload : Disallowed
|
Upload : Disallowed
|
||||||
Lists : Allowed
|
Lists : Allowed
|
||||||
Deletes : Disallowed
|
Deletes : Disallowed
|
||||||
NotBefore : No restriction
|
NotBefore : No restriction
|
||||||
NotAfter : No restriction
|
NotAfter : No restriction
|
||||||
Paths : sj://some/prefix
|
MaxObjectTTL : Not set
|
||||||
|
Paths : sj://some/prefix
|
||||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||||
Access : *
|
Access : *
|
||||||
========== GATEWAY CREDENTIALS ===========================================================
|
========== GATEWAY CREDENTIALS ===========================================================
|
||||||
Access Key ID: accesskeyid
|
Access Key ID: accesskeyid
|
||||||
Secret Key : secretkey
|
Secret Key : secretkey
|
||||||
|
@ -21,6 +21,8 @@ import (
|
|||||||
|
|
||||||
"github.com/jtolio/eventkit"
|
"github.com/jtolio/eventkit"
|
||||||
"github.com/spacemonkeygo/monkit/v3"
|
"github.com/spacemonkeygo/monkit/v3"
|
||||||
|
"github.com/spacemonkeygo/monkit/v3/collect"
|
||||||
|
"github.com/spacemonkeygo/monkit/v3/present"
|
||||||
"github.com/zeebo/clingy"
|
"github.com/zeebo/clingy"
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -28,6 +30,7 @@ import (
|
|||||||
|
|
||||||
"storj.io/common/experiment"
|
"storj.io/common/experiment"
|
||||||
"storj.io/common/rpc/rpctracing"
|
"storj.io/common/rpc/rpctracing"
|
||||||
|
"storj.io/common/sync2/mpscqueue"
|
||||||
"storj.io/common/tracing"
|
"storj.io/common/tracing"
|
||||||
jaeger "storj.io/monkit-jaeger"
|
jaeger "storj.io/monkit-jaeger"
|
||||||
"storj.io/private/version"
|
"storj.io/private/version"
|
||||||
@ -68,8 +71,9 @@ type external struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug struct {
|
debug struct {
|
||||||
pprofFile string
|
pprofFile string
|
||||||
traceFile string
|
traceFile string
|
||||||
|
monkitTraceFile string
|
||||||
}
|
}
|
||||||
|
|
||||||
events struct {
|
events struct {
|
||||||
@ -124,7 +128,7 @@ func (ex *external) Setup(f clingy.Flags) {
|
|||||||
).(string)
|
).(string)
|
||||||
|
|
||||||
ex.tracing.tags = f.Flag(
|
ex.tracing.tags = f.Flag(
|
||||||
"trace-tags", "coma separated k=v pairs to be added to distributed traces", map[string]string{},
|
"trace-tags", "comma separated k=v pairs to be added to distributed traces", map[string]string{},
|
||||||
clingy.Advanced,
|
clingy.Advanced,
|
||||||
clingy.Transform(func(val string) (map[string]string, error) {
|
clingy.Transform(func(val string) (map[string]string, error) {
|
||||||
res := map[string]string{}
|
res := map[string]string{}
|
||||||
@ -151,6 +155,11 @@ func (ex *external) Setup(f clingy.Flags) {
|
|||||||
clingy.Advanced,
|
clingy.Advanced,
|
||||||
).(string)
|
).(string)
|
||||||
|
|
||||||
|
ex.debug.monkitTraceFile = f.Flag(
|
||||||
|
"debug-monkit-trace", "File to collect Monkit trace data. Understands file extensions .json and .svg", "",
|
||||||
|
clingy.Advanced,
|
||||||
|
).(string)
|
||||||
|
|
||||||
ex.analytics = f.Flag(
|
ex.analytics = f.Flag(
|
||||||
"analytics", "Whether to send usage information to Storj", nil,
|
"analytics", "Whether to send usage information to Storj", nil,
|
||||||
clingy.Transform(strconv.ParseBool), clingy.Optional, clingy.Boolean,
|
clingy.Transform(strconv.ParseBool), clingy.Optional, clingy.Boolean,
|
||||||
@ -371,8 +380,60 @@ func (ex *external) Wrap(ctx context.Context, cmd clingy.Command) (err error) {
|
|||||||
eventkit.DefaultRegistry.Scope("init").Event("init")
|
eventkit.DefaultRegistry.Scope("init").Event("init")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
var workErr error
|
||||||
return cmd.Execute(ctx)
|
work := func(ctx context.Context) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
workErr = cmd.Execute(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var formatter func(io.Writer, []*collect.FinishedSpan) error
|
||||||
|
switch {
|
||||||
|
default:
|
||||||
|
work(ctx)
|
||||||
|
return workErr
|
||||||
|
case strings.HasSuffix(strings.ToLower(ex.debug.monkitTraceFile), ".svg"):
|
||||||
|
formatter = present.SpansToSVG
|
||||||
|
case strings.HasSuffix(strings.ToLower(ex.debug.monkitTraceFile), ".json"):
|
||||||
|
formatter = present.SpansToJSON
|
||||||
|
}
|
||||||
|
|
||||||
|
spans := mpscqueue.New[collect.FinishedSpan]()
|
||||||
|
collector := func(s *monkit.Span, err error, panicked bool, finish time.Time) {
|
||||||
|
spans.Enqueue(collect.FinishedSpan{
|
||||||
|
Span: s,
|
||||||
|
Err: err,
|
||||||
|
Panicked: panicked,
|
||||||
|
Finish: finish,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
defer collect.ObserveAllTraces(monkit.Default, spanCollectorFunc(collector))()
|
||||||
|
work(ctx)
|
||||||
|
|
||||||
|
fh, err := os.Create(ex.debug.monkitTraceFile)
|
||||||
|
if err != nil {
|
||||||
|
return errs.Combine(workErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spanSlice []*collect.FinishedSpan
|
||||||
|
for {
|
||||||
|
next, ok := spans.Dequeue()
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
spanSlice = append(spanSlice, &next)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = formatter(fh, spanSlice)
|
||||||
|
return errs.Combine(workErr, err, fh.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
type spanCollectorFunc func(*monkit.Span, error, bool, time.Time)
|
||||||
|
|
||||||
|
func (f spanCollectorFunc) Start(*monkit.Span) {}
|
||||||
|
|
||||||
|
func (f spanCollectorFunc) Finish(s *monkit.Span, err error, panicked bool, finish time.Time) {
|
||||||
|
f(s, err, panicked, finish)
|
||||||
}
|
}
|
||||||
|
|
||||||
func tracked(ctx context.Context, cb func(context.Context)) (done func()) {
|
func tracked(ctx context.Context, cb func(context.Context)) (done func()) {
|
||||||
|
@ -43,6 +43,16 @@ func (ex *external) OpenProject(ctx context.Context, accessName string, options
|
|||||||
UserAgent: uplinkCLIUserAgent,
|
UserAgent: uplinkCLIUserAgent,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
userAgents, err := ex.Dynamic("client.user-agent")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(userAgents) > 0 {
|
||||||
|
if ua := userAgents[len(userAgents)-1]; ua != "" {
|
||||||
|
config.UserAgent = ua
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if opts.ConnectionPoolOptions != (rpcpool.Options{}) {
|
if opts.ConnectionPoolOptions != (rpcpool.Options{}) {
|
||||||
if err := transport.SetConnectionPool(ctx, &config, rpcpool.New(opts.ConnectionPoolOptions)); err != nil {
|
if err := transport.SetConnectionPool(ctx, &config, rpcpool.New(opts.ConnectionPoolOptions)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
273
docs/blueprints/certified-nodes.md
Normal file
273
docs/blueprints/certified-nodes.md
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
# Node and operator certification
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
This is a proposal for a small feature and service that allows for nodes and
|
||||||
|
operators to have signed tags of certain kinds for use in project-specific or
|
||||||
|
Satellite-specific node selection.
|
||||||
|
|
||||||
|
## Background/context
|
||||||
|
|
||||||
|
We have a couple of ongoing needs:
|
||||||
|
|
||||||
|
* 1099 KYC
|
||||||
|
* Private storage node networks
|
||||||
|
* SOC2/HIPAA/etc node certification
|
||||||
|
* Voting and operator signaling
|
||||||
|
|
||||||
|
### 1099 KYC
|
||||||
|
|
||||||
|
The United States has a rule that if node operators earn more than $600/year,
|
||||||
|
we need to file a 1099 for each of them. Our current way of dealing with this
|
||||||
|
is manual and time consuming, and so it would be nice to automate it.
|
||||||
|
|
||||||
|
Ultimately, we should be able to automatically:
|
||||||
|
|
||||||
|
1) keep track of which nodes are run by operators under or over the $600
|
||||||
|
threshold.
|
||||||
|
2) keep track of if an automated KYC service has signed off that we have the
|
||||||
|
necessary information to file a 1099.
|
||||||
|
3) automatically suspend nodes that have earned more than $600 but have not
|
||||||
|
provided legally required information.
|
||||||
|
|
||||||
|
### Private storage node networks
|
||||||
|
|
||||||
|
We have seen growing interest from customers that want to bring their own
|
||||||
|
hard drives, or be extremely choosy about the nodes they are willing to work
|
||||||
|
with. The current way we are solving this is spinning up private Satellites
|
||||||
|
that are configured to only work with the nodes those customers provide, but
|
||||||
|
it would be better if we didn't have to start custom Satellites for this.
|
||||||
|
|
||||||
|
Instead, it would be nice to have a per-project configuration on an existing
|
||||||
|
Satellite that allowed that project to specify a specific subset of verified
|
||||||
|
or validated nodes, e.g., Project A should be able to say only nodes from
|
||||||
|
node providers B and C should be selected. Symmetrically, Nodes from providers
|
||||||
|
B and C may only want to accept data from certain projects, like Project A.
|
||||||
|
|
||||||
|
When nodes from providers B and C are added to the Satellite, they should be
|
||||||
|
able to provide a provider-specific signature, and requirements about
|
||||||
|
customer-specific requirements, if any.
|
||||||
|
|
||||||
|
### SOC2/HIPAA/etc node certification
|
||||||
|
|
||||||
|
This is actually just a slightly different shape of the private storage node
|
||||||
|
network problem, but instead of being provider-specific, it is property
|
||||||
|
specific.
|
||||||
|
|
||||||
|
Perhaps Project D has a compliance requirement. They can only store data
|
||||||
|
on nodes that meet specific requirements.
|
||||||
|
|
||||||
|
Node operators E and F are willing to conform and attest to these compliance
|
||||||
|
requirements, but don't know about project D. It would be nice if Node
|
||||||
|
operators E and F could navigate to a compliance portal and see a list of
|
||||||
|
potential compliance attestations available. For possible compliance
|
||||||
|
attestations, node operators could sign agreements for these, and then receive
|
||||||
|
a verified signature that shows their selected compliance options.
|
||||||
|
|
||||||
|
Then, Project D's node selection process would filter by nodes that had been
|
||||||
|
approved for the necessary compliance requirements.
|
||||||
|
|
||||||
|
### Voting and operator signaling
|
||||||
|
|
||||||
|
As Satellite operators ourselves, we are currently engaged in a discussion about
|
||||||
|
pricing changes with storage node operators. Future Satellite operators may find
|
||||||
|
themselves in similar situations. It would be nice if storage node operators
|
||||||
|
could indicate votes for values. This would potentially be more representative
|
||||||
|
of network sentiment than posts on a forum.
|
||||||
|
|
||||||
|
Note that this isn't a transparent voting scheme, where other voters can see
|
||||||
|
the votes made, so this may not be a great voting solution in general.
|
||||||
|
|
||||||
|
## Design and implementation
|
||||||
|
|
||||||
|
I believe there are two basic building blocks that solves all of the above
|
||||||
|
issues:
|
||||||
|
|
||||||
|
* Signed node tags (with potential values)
|
||||||
|
* A document signing service
|
||||||
|
|
||||||
|
### Signed node tags
|
||||||
|
|
||||||
|
The network representation:
|
||||||
|
|
||||||
|
```
|
||||||
|
message Tag {
|
||||||
|
// Note that there is a signal flat namespace of all names per
|
||||||
|
// signer node id. Signers should be careful to make sure that
|
||||||
|
// there are no name collisions. For self-signed content-hash
|
||||||
|
// based values, the name should have the prefix of the content
|
||||||
|
// hash.
|
||||||
|
string name = 1;
|
||||||
|
bytes value = 2; // optional, representation dependent on name.
|
||||||
|
}
|
||||||
|
|
||||||
|
message TagSet {
|
||||||
|
// must always be set. this is the node the signer is signing for.
|
||||||
|
bytes node_id = 1;
|
||||||
|
|
||||||
|
repeated Tag tags = 2;
|
||||||
|
|
||||||
|
// must always be set. this makes sure the signature is signing the
|
||||||
|
// timestamp inside.
|
||||||
|
int64 timestamp = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SignedTagSet {
|
||||||
|
// this is the seralized form of TagSet, serialized so that
|
||||||
|
// the signature process has something stable to work with.
|
||||||
|
bytes serialized_tag = 1;
|
||||||
|
|
||||||
|
// this is who signed (could be self signed, could be well known).
|
||||||
|
bytes signer_node_id = 3;
|
||||||
|
bytes signature = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SignedTagSets {
|
||||||
|
repeated SignedTagSet tags = 1;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that every tag is signing a name/value pair (value optional) against
|
||||||
|
a specific node id.
|
||||||
|
|
||||||
|
Note also that names are only unique within the namespace of a given signer.
|
||||||
|
|
||||||
|
The database representation on the Satellite. N.B.: nothing should be entered
|
||||||
|
into this database without validation:
|
||||||
|
|
||||||
|
```
|
||||||
|
model signed_tags (
|
||||||
|
field node_id blob
|
||||||
|
field name text
|
||||||
|
field value blob
|
||||||
|
field timestamp int64
|
||||||
|
field signer_node_id blob
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The "signer_node_id" is worth more explanation. Every signer should have a
|
||||||
|
stable node id. Satellites and storage nodes already have one, but any other
|
||||||
|
service that validates node tags would also need one.
|
||||||
|
In particular, the document signing service (below) would have its own unique
|
||||||
|
node id for signing tags, whereas for voting-style tags or tags based on a
|
||||||
|
content-addressed identifier (e.g. a hash of a document), the nodes would
|
||||||
|
self-sign.
|
||||||
|
|
||||||
|
### Document signing service
|
||||||
|
|
||||||
|
We would start a small web service, where users can log in and sign and fill
|
||||||
|
out documents. This web service would then create a unique activation code
|
||||||
|
that storage node operators could run on their storage nodes for activation and
|
||||||
|
signing. They could run `storagenode activate <code>` and then the node would
|
||||||
|
reach out to the signing service and get a `SignedTag` related to that node
|
||||||
|
given the information the user provided. The node could then present these
|
||||||
|
to the satellite.
|
||||||
|
|
||||||
|
Ultimately, the document signing service will require a separate design doc,
|
||||||
|
but here are some considerations for it:
|
||||||
|
|
||||||
|
Activation codes must expire shortly. Even Netflix has two hours of validity
|
||||||
|
for their service code - for a significantly less critical use case. What would
|
||||||
|
be a usable validity time for our use case? 15 minutes? 1 hour? Should we make
|
||||||
|
it configurable?
|
||||||
|
|
||||||
|
We want to still keep usability in mind for a SNO who needs to activate 500
|
||||||
|
nodes.
|
||||||
|
|
||||||
|
It would be even better if the SNO could force invalidating the activation code
|
||||||
|
when they are done with it.
|
||||||
|
|
||||||
|
As activation codes expire, the SNO should be able to generate a new activation
|
||||||
|
code if they want to associate a new node to an already signed document.
|
||||||
|
|
||||||
|
It should be hard to brute-force activation codes. They shouldn't be simple
|
||||||
|
numbers (4-digit or 6-digit) but something as complex as UUID.
|
||||||
|
|
||||||
|
It's also possible that SNO uses some signature mechanism during signing service
|
||||||
|
authentication, and the same signature is used for activation. If the same
|
||||||
|
signature mechanism is used during activation then no token is necessary.
|
||||||
|
|
||||||
|
### Update node selection
|
||||||
|
|
||||||
|
Once the above two building blocks exist, many problems become much more easily
|
||||||
|
solvable.
|
||||||
|
|
||||||
|
We would want to extend node selection to be able to do queries,
|
||||||
|
given project-specific configuration, based on these signed_tag values.
|
||||||
|
|
||||||
|
Because node selection mostly happens in memory from cached node table data,
|
||||||
|
it should be easy to add some denormalized data for certain selected cases,
|
||||||
|
such as:
|
||||||
|
|
||||||
|
* Document hashes nodes have self signed.
|
||||||
|
* Approval states based on well known third party signer nodes (a KYC service).
|
||||||
|
|
||||||
|
Once these fields exist, then node selection can happen as before, filtering
|
||||||
|
for the appropriate value given project settings.
|
||||||
|
|
||||||
|
## How these building blocks work for the example use cases
|
||||||
|
|
||||||
|
### 1099 KYC
|
||||||
|
|
||||||
|
The document signing service would have a KYC (Know Your Customer) form. Once
|
||||||
|
filled out, the document signing service would make a `TagSet` that includes all
|
||||||
|
of the answers to the KYC questions, for the given node id, signed by the
|
||||||
|
document signing service's node id.
|
||||||
|
|
||||||
|
The node would hang on to this `SignedTagSet` and submit it along with others
|
||||||
|
in a `SignedTagSets` to Satellites occasionally (maybe once a month during
|
||||||
|
node CheckIn).
|
||||||
|
|
||||||
|
### Private storage node networks
|
||||||
|
|
||||||
|
Storage node provisioning would provide nodes with a signed `SignedTagSet`
|
||||||
|
from a provisioning service that had its own node id. Then a private Satellite
|
||||||
|
could be configured to require that all nodes present a `SignedTagSet` signed
|
||||||
|
by the configured provisioning service that has that node's id in it.
|
||||||
|
|
||||||
|
Notably - this functionality could also be solved by the older waitlist node
|
||||||
|
identity signing certificate process, but we are slowly removing what remains
|
||||||
|
of that feature over time.
|
||||||
|
|
||||||
|
This functionality could also be solved by setting the Satellite's minimum
|
||||||
|
allowable node id difficulty to the maximum possible difficulty, thus preventing
|
||||||
|
any automatic node registration, and manually inserting node ids into the
|
||||||
|
database. This is what we are currently doing for private network trials, but
|
||||||
|
if `SignedTagSet`s existed, that would be easier.
|
||||||
|
|
||||||
|
### SOC2/HIPAA/etc node certification
|
||||||
|
|
||||||
|
For any type of document that doesn't require any third party service
|
||||||
|
(such as government id validation, etc), the document and its fields can be
|
||||||
|
filled out and self signed by the node, along with a content hash of the
|
||||||
|
document in question.
|
||||||
|
|
||||||
|
The node would create a `TagSet`, where one field is the hash of the legal
|
||||||
|
document that was agreed upon, and the remaining fields (with names prefixed
|
||||||
|
by the document's content hash) would be form fields
|
||||||
|
that the node operator filled in and ascribed to the document. Then, the
|
||||||
|
`TagSet` would be signed by the node itself. The cryptographic nature of the
|
||||||
|
content hash inside the `TagSet` would validate what the node operator had
|
||||||
|
agreed to.
|
||||||
|
|
||||||
|
### Voting and operator signaling
|
||||||
|
|
||||||
|
Node operators could self sign additional `Tag`s inside of a miscellaneous
|
||||||
|
`TagSet`, including `Tag`s such as
|
||||||
|
|
||||||
|
```
|
||||||
|
"storage-node-vote-20230611-network-change": "yes"
|
||||||
|
```
|
||||||
|
|
||||||
|
Or similar.
|
||||||
|
|
||||||
|
## Open problems
|
||||||
|
|
||||||
|
* Revocation? - `TagSets` have a timestamp inside that must be filled out. In
|
||||||
|
The future, certain tags could have an expiry or updated values or similar.
|
||||||
|
|
||||||
|
## Other options
|
||||||
|
|
||||||
|
## Wrapup
|
||||||
|
|
||||||
|
## Related work
|
163
docs/blueprints/fix-deletes-and-server-side-copy.md
Normal file
163
docs/blueprints/fix-deletes-and-server-side-copy.md
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# Fix deletes (and server side copy!)
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
Hey, let's make deletes faster by relying on GC. If we do this, there are some
|
||||||
|
additional fun implications.
|
||||||
|
|
||||||
|
## Background/context
|
||||||
|
|
||||||
|
We are having a lot of trouble with deletes with customers. In the last month
|
||||||
|
we have received critical feedback from a couple of customers (ask if you want
|
||||||
|
to know) about how hard it is to delete a bucket. A customer wants to stop
|
||||||
|
paying us for a bucket they no longer want, maybe due to the high per-segment
|
||||||
|
fee or otherwise.
|
||||||
|
|
||||||
|
The main thing customers want is to be able to issue a delete and have us
|
||||||
|
manage the delete process in the background.
|
||||||
|
|
||||||
|
There are two kinds of deletes right now (besides setting a TTL on objects) - explicit deletes and garbage
|
||||||
|
collection. Explicit deletes are supposed to happen immediately and not result
|
||||||
|
in unpaid data for the storage node (though they don't right now), and garbage
|
||||||
|
is generated due to long tail cancelation or other reasons, but is unfortunately
|
||||||
|
a cost to storage node operators in that they are not paid for data that is
|
||||||
|
considered garbage. Garbage is cleaned up by a garbage collection process that
|
||||||
|
stores data for an additional week after being identified as garbage in the
|
||||||
|
trash for recovery purposes. We have long desired to have as many deletes be
|
||||||
|
explicit deletes as possible for the above reasons.
|
||||||
|
|
||||||
|
The way explict deletes work right now is that the Uplink sends the Satellite a
|
||||||
|
delete request. The Satellite, in an attempt to both provide backpressure and
|
||||||
|
reduce garbage, then issues delete requests to the storage nodes, while keeping
|
||||||
|
the Uplink waiting. The benefit of the Satellite doing this is that the
|
||||||
|
Satellite attempts to batch some of these delete requests.
|
||||||
|
|
||||||
|
Unfortunately, because backups are snapshots at points in time, and Satellites
|
||||||
|
might be recovered from backup, storage nodes are currently unable to fully
|
||||||
|
delete these explicitly deleted objects. The process for recovering a Satellite
|
||||||
|
from backup is to first recover its backed up metadata, and then to issue a
|
||||||
|
restore-from-trash to all storage nodes. So, as a result, any of the gains we've
|
||||||
|
tried to get from explicit deletes are illusory because explicitly deleted data
|
||||||
|
goes into the trash just like any other garbage.
|
||||||
|
|
||||||
|
It has been our intention to eventually restore the functionality of storage
|
||||||
|
nodes being able to explicitly delete data through some sort of proof-of-delete
|
||||||
|
system that storage nodes can present to amnesiatic Satellites, or to improve
|
||||||
|
the Satellite backup system to have a write ahead log so that backups don't
|
||||||
|
forget anything. But, this has remained a low priority for years, and the
|
||||||
|
costs of doing so might outweigh the benefits.
|
||||||
|
|
||||||
|
One additional consideration about explicit deletes is that it complicates
|
||||||
|
server-side copy. Server-side copy must keep track of reference counting or
|
||||||
|
reference lists so that explicit deletes are not errantly issued too soon.
|
||||||
|
Keeping track of reference counting or reference lists is a significant burden
|
||||||
|
of bookkeeping. It adds many additional corner cases in nearly every object
|
||||||
|
interaction path, and reduces the overall performance of copied objects by
|
||||||
|
increasing the amount of database requests for them.
|
||||||
|
|
||||||
|
Consider instead another option! We don't do any of this!
|
||||||
|
|
||||||
|
## Design and implementation
|
||||||
|
|
||||||
|
No explicit deletes. When an uplink deletes data, it deletes it from the
|
||||||
|
Satellite only.
|
||||||
|
|
||||||
|
The Satellite will clean the data up on the storage nodes through the standard
|
||||||
|
garbage collection process.
|
||||||
|
|
||||||
|
That's it!
|
||||||
|
|
||||||
|
In case you're wondering, here are stats about optimal bloom filter sizing:
|
||||||
|
|
||||||
|
```
|
||||||
|
pieces size (10% false positives)
|
||||||
|
100000 58.4 KiB
|
||||||
|
1000000 583.9 KiB
|
||||||
|
10000000 5.7 MiB
|
||||||
|
100000000 57.0 MiB
|
||||||
|
```
|
||||||
|
|
||||||
|
### BUT WAIT, THERE'S MORE
|
||||||
|
|
||||||
|
If we no longer have explicit deletes, we can dramatically simplify server-side
|
||||||
|
copy! Instead of having many other tables with backreferences and keeping track
|
||||||
|
of copied objects separately and differently from uncopied objects and ancestor
|
||||||
|
objects and so on, we don't need any of that.
|
||||||
|
|
||||||
|
Copied objects can simply be full copies of the metadata, and we don't need to
|
||||||
|
keep track of when the last copy of a specific stream disappears.
|
||||||
|
|
||||||
|
This would considerably improve Satellite performance, load, and overhead on
|
||||||
|
copied objects.
|
||||||
|
|
||||||
|
This would considerably reduce the complexity of the Satellite codebase and data
|
||||||
|
model, which itself would reduce the challenges developers face when interacting
|
||||||
|
with our object model.
|
||||||
|
|
||||||
|
## Other options
|
||||||
|
|
||||||
|
Stick with the current plan.
|
||||||
|
|
||||||
|
## Migration
|
||||||
|
|
||||||
|
Migration can happen in the following order:
|
||||||
|
|
||||||
|
* We will first need to stop doing explicit deletes everywhere, so that
|
||||||
|
we don't accidentally delete anything.
|
||||||
|
* Then we will need to remove the server side copy code and just make object
|
||||||
|
copies actually just copy the straight metadata without all the copied object
|
||||||
|
bookkeeping.
|
||||||
|
* Once there is no risk and there is no incoming queue, then we can have a job
|
||||||
|
that iterates through all existing copied objects and denormalizes them to
|
||||||
|
get rid of the copied object bookkeeping.
|
||||||
|
|
||||||
|
## Wrapup
|
||||||
|
|
||||||
|
We should just do this. It feels painful to give up on explicit deletes but
|
||||||
|
considering we have not had them actually working for years and everyone seems
|
||||||
|
happy and it hasn't been any priority to fix, we could bite the bullet, commit
|
||||||
|
to this, and dramatically improve lots of other things.
|
||||||
|
|
||||||
|
It also feels painful to give up on the existing server-side copy design, but
|
||||||
|
that is a sunk cost.
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
1. With this proposal Storagenodes will store for more time (Until GC cleans up the files). I think it should be acceptable:
|
||||||
|
|
||||||
|
* For objects stored for longer period time, it doesn't give big difference (1 year vs 1 year + 1 day...)
|
||||||
|
* For object uploaded / downloaded in short period of time: It doesn't make sense just to upload + delete. For upload + download + delete, it's a good business anyway, as the big money is in egress, not in the storage. As an SNO, I am fine with this.
|
||||||
|
|
||||||
|
2. GDPR includes 'right to be forgotten'. I think this proposal should be compatible (but IANAL): if metadata (including the encryption key) is not available any more, there isn't any way to read it.
|
||||||
|
|
||||||
|
3. There is one exception: let's say I started to download some data, but meantime the owner deleted it. Explicit delete may block the read (pieces are disappearing, remaining segments might be missing...)
|
||||||
|
|
||||||
|
While this proposal would enable to finish the downloads if I already have the orderlimits from the satellite (pieces will remain there until next GC).
|
||||||
|
|
||||||
|
Don't know if this difference matters or not.
|
||||||
|
|
||||||
|
One other point on objects that are stored for a short amount of time above - we can potentially introduce a minimum storage duration to help cover costs.
|
||||||
|
|
||||||
|
## Q&A
|
||||||
|
|
||||||
|
> 1. what with node tallies? without additional bookkeeping it may be hard to not pay SNO for copies, SNO will be payed for storing single piece multiple times because we are just collecting pieces from segments to calc nodes tally.
|
||||||
|
|
||||||
|
> 2. how we will handle repairs? will we leave it as is and copy and original will be repaired on its own?
|
||||||
|
|
||||||
|
> 3. do we plan to pay for one week of additional storage? data won't be in trash.
|
||||||
|
|
||||||
|
> 4. we need to remember that currently segment copy doesn't keep pieces. pieces are main size factor for segments table. We need to take into account that if we will have duplications table size will grow. not a blocker but worth to remember.
|
||||||
|
|
||||||
|
These are good questions!
|
||||||
|
|
||||||
|
Ultimately, I think these are maybe questions for the product team to figure out, but my gut reaction is:
|
||||||
|
|
||||||
|
* according to the stats, there are very few copied objects. copied objects form a fraction of a percent of all data
|
||||||
|
* so, what if we just take questions one and three together and call it a wash? we overpay nodes by paying individually for each copy, and then don't pay nodes for the additional time before GC moves the deleted object to the trash? if we go this route, it also seems fine to let repair do multiple individual repairs.
|
||||||
|
|
||||||
|
i think my opinion would change if copies became a nontrivial amount of our data of course, and this may need to be revisited.
|
||||||
|
|
||||||
|
## Related work
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -11,7 +11,7 @@ This testplan is going to cover the Access Grants Page. This page lists access g
|
|||||||
|
|
||||||
| Test Scenario | Test Case | Description | Comments |
|
| Test Scenario | Test Case | Description | Comments |
|
||||||
|---------------------------------|---------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
|
|---------------------------------|---------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
|
||||||
| Access Grant Management Page UI | Click on the Access Management Section of Storj DCS Sidebar | While the sidebar is present, if the user clicks on the access management section of the sidebar then the user should be redirected to the access grant management page | |
|
| Access Grant Management Page UI | Click on the Access Management Section of Storj Sidebar | While the sidebar is present, if the user clicks on the access management section of the sidebar then the user should be redirected to the access grant management page | |
|
||||||
| | Confirm Access Grant Management Page | While the user is in their access grant management page, the user should be able to see the Access Management Header and a header named My Access Keys with a list of access keys if the user has created any, a button for a new access grant and a search bar to search for any access grants | |
|
| | Confirm Access Grant Management Page | While the user is in their access grant management page, the user should be able to see the Access Management Header and a header named My Access Keys with a list of access keys if the user has created any, a button for a new access grant and a search bar to search for any access grants | |
|
||||||
| | Access Grant More Info Button | Under the access management header, there is a more info button that leads to an explanation of access grants, so if it is clicked user should be redirected to storj-labs access grants concepts page | |
|
| | Access Grant More Info Button | Under the access management header, there is a more info button that leads to an explanation of access grants, so if it is clicked user should be redirected to storj-labs access grants concepts page | |
|
||||||
| | Click More Info Button on Access Grant with Limited Permissions | When a user clicks on the more info button for said access grant with limited permissions, it should show the stated permissions | |
|
| | Click More Info Button on Access Grant with Limited Permissions | When a user clicks on the more info button for said access grant with limited permissions, it should show the stated permissions | |
|
||||||
@ -20,7 +20,7 @@ This testplan is going to cover the Access Grants Page. This page lists access g
|
|||||||
| | Access Grants Shortcuts- Learn More Button | If user clicks on learn more button on the access grants shortcuts, then user should be redirected to Storj-labs page with more information about access grants | |
|
| | Access Grants Shortcuts- Learn More Button | If user clicks on learn more button on the access grants shortcuts, then user should be redirected to Storj-labs page with more information about access grants | |
|
||||||
| | API Keys Shortcuts- Create API Keys Button | If user clicks on create API keys button on the API keys shortcut, then user should be presented with a modal allowing user to create API keys (at the end user should also be able to copy said API key and Satellite Address or save it in a text file) | |
|
| | API Keys Shortcuts- Create API Keys Button | If user clicks on create API keys button on the API keys shortcut, then user should be presented with a modal allowing user to create API keys (at the end user should also be able to copy said API key and Satellite Address or save it in a text file) | |
|
||||||
| | API Keys Shortcuts- Learn More Button | If user clicks on learn more button on the API keys shortcut, then user should be redirected to Storj-labs page with more information about API keys | |
|
| | API Keys Shortcuts- Learn More Button | If user clicks on learn more button on the API keys shortcut, then user should be redirected to Storj-labs page with more information about API keys | |
|
||||||
| | S3 Credentials Shortcuts- Create S3 Credentials Button | If user clicks on create S3 credentials button on the S3 credentials shortcuts, then user should be presented with a modal to create S3 credentials to switch backend of an app using S3 compatible object storage to Storj DCS (at the end user should also be able to copy said S3 credentials; secret key, access key and endpoint on clipboard or download as a text file) | |
|
| | S3 Credentials Shortcuts- Create S3 Credentials Button | If user clicks on create S3 credentials button on the S3 credentials shortcuts, then user should be presented with a modal to create S3 credentials to switch backend of an app using S3 compatible object storage to Storj (at the end user should also be able to copy said S3 credentials; secret key, access key and endpoint on clipboard or download as a text file) | |
|
||||||
| | S3 Credentials Shortcuts- Learn More Button | If user clicks on learn more button on the S3 credentials shortcut, then user should be redirected to Storj-labs page with more information on S3 credentials | |
|
| | S3 Credentials Shortcuts- Learn More Button | If user clicks on learn more button on the S3 credentials shortcut, then user should be redirected to Storj-labs page with more information on S3 credentials | |
|
||||||
| | First Visit Check for About Access Grants | If user visits access management page for the first time, the user should see an about access grant message explaining what access grants are (this message should also be dismissible) | |
|
| | First Visit Check for About Access Grants | If user visits access management page for the first time, the user should see an about access grant message explaining what access grants are (this message should also be dismissible) | |
|
||||||
| | Check for About Access Grants after First Visit | If user visits access management page again after their first time ( and presses dismiss), then for every subsequent visit to this page the user should not be presented with this access grant message | |
|
| | Check for About Access Grants after First Visit | If user visits access management page again after their first time ( and presses dismiss), then for every subsequent visit to this page the user should not be presented with this access grant message | |
|
||||||
|
17
docs/testplan/graceful-exit-revamp-testplan.md
Normal file
17
docs/testplan/graceful-exit-revamp-testplan.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Graceful Exit Revamp
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
This testplan covers Graceful Exit Revamp
|
||||||
|
|
||||||
|
|
||||||
|
| Test Scenario | Test Case | Description | Comments |
|
||||||
|
|---------------|-----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
|
||||||
|
| Graceful Exit | Happy path | Perform GE on the node, satellite not send any new pieces to this node. Pieces on this node marked as "retrievable but unhealthy". After one month (with an appropriately high online score), the node will be considered exited. | Covered |
|
||||||
|
| | GE on Disqualified Node | Make sure GE was not initiated for the disqualified node. | Covered |
|
||||||
|
| | Double exit | Perform GE on the node and after receiving success message do it once again. Make sure node can not do it twice | Covered |
|
||||||
|
| | Low online score | Perform GE on node with less then 50% of score. Node should fail to GE | Covered |
|
||||||
|
| | Two many nodes call GE at the same time | We should transfer all the pieces to available nodes anyway. Example: start with 8 nodes(RS settings 2,3,4,4) and call GE on 4 nodes at the same time | |
|
||||||
|
| | Audits | SN should receive audits even if it perform GE at the moment | Covered? |
|
||||||
|
| | GE on Suspended node | Make sure GE was not initiated for the suspended node (Unknown audit errors). | |
|
||||||
|
| | GE started before feature deployment | Node should stop transferring new pieces and should be treated by tne new rules. | |
|
46
docs/testplan/object-versioning-testplan.md
Normal file
46
docs/testplan/object-versioning-testplan.md
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Object Versioning
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
This testplan covers Object Versioning
|
||||||
|
|
||||||
|
|
||||||
|
| Test Scenario | Test Case | Description | Comments |
|
||||||
|
|---------------|-----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------|
|
||||||
|
| Copy | To a bucket that has versioning enabled | Should add one version to it(make a new version and make in latest) | check the column "versioning_state" in "bucket_metainfo" |
|
||||||
|
| | To a bucket that has version disabled | Make sure GE was not initiated for the disqualified node. | |
|
||||||
|
| | Copy object | Should support copying a specific version, should copy the latest version of an object if not specified | |
|
||||||
|
| Move | To a bucket that has versioning enabled | Should add one version to it | check the column "versioning_state" in "bucket_metainfo" |
|
||||||
|
| | To a bucket that has version disabled | Perform GE on node with less then 50% of score. Node should fail to GE | |
|
||||||
|
| Delete | Delete one from many versions | Create 3 versions of the same file and delete the middle one indicating the version id | |
|
||||||
|
| | All versions | Unconditionally deletes all versions of an object | |
|
||||||
|
| | Delete bucket | Force delete bucket with files that has versioning. We should keep all versions of the files unless manually deleted | |
|
||||||
|
| Restore | Delete and restore | Delete version of the file and restore from that version | |
|
||||||
|
| | Restore | Create few versions of the file and restore from latest to older version | |
|
||||||
|
| Create | Create new bucket | Versioning should be inherited from project level | |
|
||||||
|
| Suspend | Suspend versioning | Suspend versioning on a bucket that had versioning enabled. 3 versions of a file exists. Try to upload the same file again. -> the newest file gets overriden. The older 2 versions stay intact | |
|
||||||
|
| Update | Update metadata | Metadata update should not create new version. Takes the version as input but does not use it. Only updates the metadata for the highest committed object version. | |
|
||||||
|
| List | all versions | Unconditionally returns all object versions. Listing all versions should include delete markers. Versions come out created last to first | |
|
||||||
|
| UI | UI | UI should always show the latest version of each object | |
|
||||||
|
| Buckets | Old | Old buckets created before the feature should be in "unsupported" state | |
|
||||||
|
| | Enable versioning after upload | Upload obj to a bucket with versioning disabled and then enable versioning. Check version of the object | |
|
||||||
|
| PutObject | Versioning enabled | When object with same name uploaded to a bucket we should create new unique version of the object | |
|
||||||
|
| | Versioning disabled | Latest version of the object is overwritten by the new object, new object has a version ID of null | |
|
||||||
|
| | Multipart | Multipart upload with versioning enabled | |
|
||||||
|
| | Expiration | Create object with expiration in versioned bucket, delete marker should be applied to it | |
|
||||||
|
|
||||||
|
## Third-party test suite
|
||||||
|
|
||||||
|
These test suites have good tests inside, so we should run all versioning
|
||||||
|
related tests in them
|
||||||
|
|
||||||
|
* https://github.com/ceph/s3-tests/blob/master/s3tests_boto3/functional/test_s3.py
|
||||||
|
* https://github.com/snowflakedb/snowflake-s3compat-api-test-suite
|
||||||
|
|
||||||
|
## Questions
|
||||||
|
|
||||||
|
* Can a customer set a maximum number of versions?
|
||||||
|
* Can a customer pin specific versions to make sure they can't be deleted
|
||||||
|
by malware?
|
||||||
|
* Can a project member with a restricted access grant modify the version
|
||||||
|
flag on a bucket? Which permissions does the access grant need?
|
25
docs/testplan/project-cowbell-testplan.md
Normal file
25
docs/testplan/project-cowbell-testplan.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Mini Cowbell Testplan
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Background
|
||||||
|
We want to deploy the entire Storj stack on environments that have kubernetes running on 5 NUCs.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Pre-condition
|
||||||
|
Configuration for satellites that only have 5 node and the recommended RS scheme is [2,3,4,4] where:
|
||||||
|
- 2 is the number of required pieces to reconstitute the segment.
|
||||||
|
- 3 is the repair threshold, i.e. if a segment remains with only 3 healthy pieces, it will be repaired.
|
||||||
|
- 4 is the success threshold, i.e. the number of pieces required for a successful upload or repair.
|
||||||
|
- 4 is the number of total erasure-coded pieces that will be generated.
|
||||||
|
|
||||||
|
|
||||||
|
| Test Scenario | Test Case | Description | Comments |
|
||||||
|
|---------------|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| Upload | Upload with all nodes online | Every file is uploaded to 4 nodes with 2x expansion factor. So one node has no files. | Happy path scenario |
|
||||||
|
| | Upload with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for uploads) | Uploads will continue uninterrupted if the client uses the new refactored upload path. This improved upload logic will request the satellite for a new node if the satellite selects the offline node for the upload, unaware it is already offline. If the client uses the old upload logic, uploads may fail if the satellite selects the offline node (20% chance). When the satellite detects the offline node, all uploads will be successful. |
|
||||||
|
| Download | Download with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for downloads) | |
|
||||||
|
| Repair | Repair with 2 nodes disqualified | Disqualify 2 nodes so the repair download are still possible but there is no node available for an upload, shouldn't consume download bandwidth and error out early. Only spend download bandwidth when there is at least one node available for an upload | If two nodes go offline, there are remaining pieces in the worst case, which cannot be repaired and is a de facto data loss if the offline nodes are damaged. |
|
||||||
|
| Audit | | Audits can't identify corrupted pieces with just the minimum number of pieces. Reputation should not increase. Audits should be able to identify corrupted pieces with minumum + 1 pieces. Reputation should decrease. | |
|
||||||
|
| Upgrades | Nodes restart for upgrades | No more than a single node goes offline for maintenance. Otherwise, normal operation of the network cannot be ensured. | Occasionally, nodes may need to restart due to software updates. This brings the node offline for some period of time |
|
58
docs/testplan/storj-private-cloud-testplan.md
Normal file
58
docs/testplan/storj-private-cloud-testplan.md
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
|
||||||
|
## Storj Private Cloud - Test Plan
|
||||||
|
|
||||||
|
## Test Scenarios
|
||||||
|
|
||||||
|
Some test ideas:
|
||||||
|
- Upload and download some data
|
||||||
|
- Server side copy and server side move
|
||||||
|
- Multipart uploads
|
||||||
|
- Versioning (replace and existing file)
|
||||||
|
- Audit identifies a bad node and Repair finds new good nodes for the pieces (integration test inclusing audit reservoier sampling, audit job, reverifier, repair checker, repair worker)
|
||||||
|
- Repair checker and repair worker performance with a million segments in the repair queue (repair queue needs to be ordered by null values first)
|
||||||
|
- ranged loop performance (do we get better performance from running 2 range loops vs a single range?)
|
||||||
|
- Upload, Download, List, Delete performance with a million segments in the DB.
|
||||||
|
- Garbage collection especially the bloom filter creation. Needs to be run from a backup DB and can't be run from the live DB.
|
||||||
|
- Storage nodes and customer accounting
|
||||||
|
- Account upload and download limits (redis cache)
|
||||||
|
- Customer signup with onboarding including creating an access grant
|
||||||
|
- Token payments
|
||||||
|
- Graceful exit
|
||||||
|
- Node selection with geofencing, suspended nodes, disqualified nodes, offline nodes, nodes running outdated versions, nodes out of disk space
|
||||||
|
|
||||||
|
Bonus section (technically out of scope but still interresting questions for other tickets)
|
||||||
|
- Should a private satellite require a stripe account for the billing section? How does the UI look like without a stripe account? How can the customer upgrade to a pro account without having to add a credit card.
|
||||||
|
- Does the satellite need to be able to send out emails? For signup we have a simulation mode but for other features like project member invite we can't skip the email currently. (Other features with similar issues: storage node notifications, account freeze, password reset)
|
||||||
|
- What is the plan for the initial vetting period? A brand new satellite with brand new nodes will not be able to upload any date because not enough vetted nodes. -> config change to upload to unvetted nodes. -> risk about uploading too much data to unvetted nodes by keeping this setting longer than nessesary)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [Test Plan Table]
|
||||||
|
|
||||||
|
|
||||||
|
| Test Scenario | Test Case | Description | Comments |
|
||||||
|
|-----------------------------|------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------|
|
||||||
|
| Upload | Small file | Do the upload for 1 KiB, 5 KiB, 1 MiB, 64 MiB files. | |
|
||||||
|
| | Big file | Do the upload 1024Mb files | |
|
||||||
|
| | Multipart upload | Upload big file to check the multipart upload | |
|
||||||
|
| Download | Inline segment | User should download inline segment without any errors | |
|
||||||
|
| | Remote segment | User should download remote segment without any errors | |
|
||||||
|
| | Copy 10000 Or More Segments | If a user uploads an object with 10000 segments or more and server side copies it from the source object to the destination object, it should be possible | |
|
||||||
|
| | Copy inline segment | User should copy inline segment without any errors | |
|
||||||
|
| | Copy remote segment | User should copy remote segment without any errors | |
|
||||||
|
| Move | Move object | Move object from one bucket to another bucket | |
|
||||||
|
| Versioning | Replace and existing file | User should be able to update existing file | |
|
||||||
|
| DB- Table Segment | Expiration Date | If a user uses Server-side copy, then the source object and the destination object must have the same expiration date | Might be redundant test because of segment table removing |
|
||||||
|
| DB - Table `segment_copies` | Ancestor_stream_id negative | If a segment with `stream_id = S` hasn't been copied, then the `segment_copies` table has no row having `ancestor_stream_id = S` | Might be redundant test because of segment table removing |
|
||||||
|
| | Ancestor_stream_id positive | If a segment with `stream_id = S` has been copied, then the `segment_copies` table has at least one row having `ancestor_stream_id = S` | Might be redundant test because of segment table removing |
|
||||||
|
| Repair | Data repair | Upload some data then kill some nodes and disqualify 1 node(should be enough storage nodes to upload repaired segments). Repaired segment should not contain any piece in the killed and DQ nodes. Downloads the data from new nodes and check that it's the same than the uploaded one. | This test should be in the code |
|
||||||
|
| Token payments | Multiple Transactions | If a user has a pending transaction and then performs another transaction with a higher nonce using the same address, the new transaction has to wait until the previous transaction with the lower nonce is confirmed (standard behavior of geth, nothing to test for us) | |
|
||||||
|
| | Invoice Generation | When an invoice is generated and "paid", coupons should be used first, followed by storj balance and then lastly credit card | |
|
||||||
|
| Performance | Repair queue index has to be null value first. | https://storj.slack.com/archives/C01427KSZ1P/p1589815803066100 | |
|
||||||
|
| Garbage Collection | Garbage Collection | Needs to be run from a backup DB and can't be run from the live DB | |
|
||||||
|
| Accounting | Customer | Generate the full invoice cycle | |
|
||||||
|
| | Storage node | Generate the invoice | |
|
||||||
|
| Account limits | Upload | Verify that limits are working | |
|
||||||
|
| | Download | Verify that limits are working | |
|
||||||
|
| Signup | Customer signup | Customer signup with onboarding including creating an access grant | |
|
61
go.mod
61
go.mod
@ -1,9 +1,8 @@
|
|||||||
module storj.io/storj
|
module storj.io/storj
|
||||||
|
|
||||||
go 1.18
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/VividCortex/ewma v1.2.0
|
|
||||||
github.com/alessio/shellescape v1.2.2
|
github.com/alessio/shellescape v1.2.2
|
||||||
github.com/alicebob/miniredis/v2 v2.13.3
|
github.com/alicebob/miniredis/v2 v2.13.3
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
@ -22,49 +21,50 @@ require (
|
|||||||
github.com/jackc/pgx/v5 v5.3.1
|
github.com/jackc/pgx/v5 v5.3.1
|
||||||
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
|
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
|
||||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d
|
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d
|
||||||
|
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b
|
||||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6
|
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6
|
||||||
github.com/loov/hrtime v1.0.3
|
github.com/loov/hrtime v1.0.3
|
||||||
github.com/mattn/go-sqlite3 v1.14.12
|
github.com/mattn/go-sqlite3 v1.14.12
|
||||||
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
|
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
|
||||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
||||||
github.com/oschwald/maxminddb-golang v1.8.0
|
github.com/oschwald/maxminddb-golang v1.12.0
|
||||||
github.com/pquerna/otp v1.3.0
|
github.com/pquerna/otp v1.3.0
|
||||||
github.com/redis/go-redis/v9 v9.0.3
|
github.com/redis/go-redis/v9 v9.0.3
|
||||||
github.com/shopspring/decimal v1.2.0
|
github.com/shopspring/decimal v1.2.0
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb
|
github.com/spacemonkeygo/monkit/v3 v3.0.22
|
||||||
github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd
|
github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd
|
||||||
github.com/spf13/cobra v1.1.3
|
github.com/spf13/cobra v1.1.3
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.7.1
|
github.com/spf13/viper v1.7.1
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/stripe/stripe-go/v72 v72.90.0
|
github.com/stripe/stripe-go/v75 v75.8.0
|
||||||
github.com/vbauerster/mpb/v8 v8.4.0
|
github.com/vbauerster/mpb/v8 v8.4.0
|
||||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3
|
|
||||||
github.com/zeebo/assert v1.3.1
|
github.com/zeebo/assert v1.3.1
|
||||||
github.com/zeebo/blake3 v0.2.3
|
github.com/zeebo/blake3 v0.2.3
|
||||||
github.com/zeebo/clingy v0.0.0-20230602044025-906be850f10d
|
github.com/zeebo/clingy v0.0.0-20230602044025-906be850f10d
|
||||||
github.com/zeebo/errs v1.3.0
|
github.com/zeebo/errs v1.3.0
|
||||||
github.com/zeebo/errs/v2 v2.0.3
|
github.com/zeebo/errs/v2 v2.0.3
|
||||||
github.com/zeebo/ini v0.0.0-20210514163846-cc8fbd8d9599
|
github.com/zeebo/ini v0.0.0-20210514163846-cc8fbd8d9599
|
||||||
|
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602
|
||||||
github.com/zyedidia/generic v1.2.1
|
github.com/zyedidia/generic v1.2.1
|
||||||
go.etcd.io/bbolt v1.3.5
|
go.etcd.io/bbolt v1.3.5
|
||||||
go.uber.org/zap v1.16.0
|
go.uber.org/zap v1.16.0
|
||||||
golang.org/x/crypto v0.7.0
|
golang.org/x/crypto v0.12.0
|
||||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db
|
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db
|
||||||
golang.org/x/net v0.9.0
|
golang.org/x/net v0.10.0
|
||||||
golang.org/x/oauth2 v0.7.0
|
golang.org/x/oauth2 v0.7.0
|
||||||
golang.org/x/sync v0.1.0
|
golang.org/x/sync v0.3.0
|
||||||
golang.org/x/sys v0.7.0
|
golang.org/x/sys v0.13.0
|
||||||
golang.org/x/term v0.7.0
|
golang.org/x/term v0.11.0
|
||||||
golang.org/x/text v0.9.0
|
golang.org/x/text v0.12.0
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
||||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
|
storj.io/common v0.0.0-20231130134106-1fa84867e323
|
||||||
storj.io/drpc v0.0.33
|
storj.io/drpc v0.0.33
|
||||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
|
storj.io/monkit-jaeger v0.0.0-20230707083646-f15e6e8b7e8c
|
||||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1
|
storj.io/private v0.0.0-20231127092015-c439a594bc1d
|
||||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
|
storj.io/uplink v1.12.3-0.20231130143633-4a092fa01b98
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -72,6 +72,7 @@ require (
|
|||||||
cloud.google.com/go/compute v1.19.0 // indirect
|
cloud.google.com/go/compute v1.19.0 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||||
cloud.google.com/go/profiler v0.3.1 // indirect
|
cloud.google.com/go/profiler v0.3.1 // indirect
|
||||||
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||||
github.com/apache/thrift v0.12.0 // indirect
|
github.com/apache/thrift v0.12.0 // indirect
|
||||||
@ -83,7 +84,7 @@ require (
|
|||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/flynn/noise v1.0.0 // indirect
|
github.com/flynn/noise v1.0.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
|
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
@ -100,22 +101,25 @@ require (
|
|||||||
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
|
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||||
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 // indirect
|
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 // indirect
|
||||||
|
github.com/jtolio/crawlspace v0.0.0-20231116162947-3ec5cc6b36c5 // indirect
|
||||||
|
github.com/jtolio/crawlspace/tools v0.0.0-20231115161146-57d90b78ce62 // indirect
|
||||||
github.com/klauspost/compress v1.15.10 // indirect
|
github.com/klauspost/compress v1.15.10 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.5 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.7 // indirect
|
github.com/mattn/go-colorable v0.1.7 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.2.0 // indirect
|
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.0 // indirect
|
github.com/pelletier/go-toml v1.9.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
|
github.com/quic-go/qtls-go1-20 v0.4.1 // indirect
|
||||||
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
|
github.com/quic-go/quic-go v0.40.0 // indirect
|
||||||
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect
|
|
||||||
github.com/quic-go/quic-go v0.32.0 // indirect
|
|
||||||
github.com/rivo/uniseg v0.4.4 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||||
github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
|
github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
|
||||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||||
github.com/spf13/afero v1.6.0 // indirect
|
github.com/spf13/afero v1.6.0 // indirect
|
||||||
@ -126,14 +130,16 @@ require (
|
|||||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
||||||
github.com/zeebo/admission/v3 v3.0.3 // indirect
|
github.com/zeebo/admission/v3 v3.0.3 // indirect
|
||||||
github.com/zeebo/float16 v0.1.0 // indirect
|
github.com/zeebo/float16 v0.1.0 // indirect
|
||||||
|
github.com/zeebo/goof v0.0.0-20230830143729-8a73f2ee257d // indirect
|
||||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 // indirect
|
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 // indirect
|
||||||
github.com/zeebo/mwc v0.0.4 // indirect
|
github.com/zeebo/mwc v0.0.4 // indirect
|
||||||
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602 // indirect
|
github.com/zeebo/sudo v1.0.2 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
|
go.uber.org/mock v0.3.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/mod v0.8.0 // indirect
|
golang.org/x/mod v0.11.0 // indirect
|
||||||
golang.org/x/tools v0.6.0 // indirect
|
golang.org/x/tools v0.9.1 // indirect
|
||||||
google.golang.org/api v0.118.0 // indirect
|
google.golang.org/api v0.118.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
@ -141,5 +147,6 @@ require (
|
|||||||
google.golang.org/protobuf v1.30.0 // indirect
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
storj.io/picobuf v0.0.1 // indirect
|
storj.io/infectious v0.0.2 // indirect
|
||||||
|
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c // indirect
|
||||||
)
|
)
|
||||||
|
114
go.sum
114
go.sum
@ -103,6 +103,7 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
|
|||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -143,12 +144,14 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
|
|||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-oauth2/oauth2/v4 v4.4.2 h1:tWQlR5I4/qhWiyOME67BAFmo622yi+2mm7DMm8DpMdg=
|
github.com/go-oauth2/oauth2/v4 v4.4.2 h1:tWQlR5I4/qhWiyOME67BAFmo622yi+2mm7DMm8DpMdg=
|
||||||
github.com/go-oauth2/oauth2/v4 v4.4.2/go.mod h1:K4DemYzNwwYnIDOPdHtX/7SlO0AHdtlphsTgE7lA3PA=
|
github.com/go-oauth2/oauth2/v4 v4.4.2/go.mod h1:K4DemYzNwwYnIDOPdHtX/7SlO0AHdtlphsTgE7lA3PA=
|
||||||
github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0=
|
github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
@ -322,8 +325,14 @@ github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 h1:dITCBge70U9
|
|||||||
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3/go.mod h1:eo5po8nCwRcvZIIR8eGi7PKthzXuunpXzUmXzxCBfBc=
|
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3/go.mod h1:eo5po8nCwRcvZIIR8eGi7PKthzXuunpXzUmXzxCBfBc=
|
||||||
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM/oggqfpGh8=
|
github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM/oggqfpGh8=
|
||||||
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
|
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
|
||||||
|
github.com/jtolio/crawlspace v0.0.0-20231116162947-3ec5cc6b36c5 h1:RSt5K+VT7bPr6A9DW/8Kav6V6aYB+8Vqn6ygqp6S0UM=
|
||||||
|
github.com/jtolio/crawlspace v0.0.0-20231116162947-3ec5cc6b36c5/go.mod h1:ruaBEBN4k5AmKzmI6K2LsfLno2t5tPgvSUB2dyiHHqo=
|
||||||
|
github.com/jtolio/crawlspace/tools v0.0.0-20231115161146-57d90b78ce62 h1:51cqrrnWE0zKhZFepIgnY7JSHgN5uGMX1aVFHjtc1ek=
|
||||||
|
github.com/jtolio/crawlspace/tools v0.0.0-20231115161146-57d90b78ce62/go.mod h1:Fa/Qz4+Sh0xCARqEKUdF7RCGMZcF3ilqBIfS2eVfA/Y=
|
||||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
|
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
|
||||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
|
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
|
||||||
|
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
|
||||||
|
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
|
||||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
|
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
|
||||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
|
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
@ -344,11 +353,13 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
|||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
@ -420,19 +431,20 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw
|
|||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI=
|
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||||
github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk=
|
github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs=
|
||||||
github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
|
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0=
|
github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0=
|
||||||
github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
@ -456,14 +468,10 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R
|
|||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U=
|
github.com/quic-go/qtls-go1-20 v0.4.1 h1:D33340mCNDAIKBqXuAvexTNMUByrYmFYVfKfDN5nfFs=
|
||||||
github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc=
|
github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
||||||
github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk=
|
github.com/quic-go/quic-go v0.40.0 h1:GYd1iznlKm7dpHD7pOVpUvItgMPo/jrMgDWZhMCecqw=
|
||||||
github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
github.com/quic-go/quic-go v0.40.0/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c=
|
||||||
github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI=
|
|
||||||
github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
|
||||||
github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA=
|
|
||||||
github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo=
|
|
||||||
github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k=
|
github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k=
|
||||||
github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
@ -472,7 +480,9 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
|
|||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||||
@ -526,8 +536,8 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod
|
|||||||
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.18/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
github.com/spacemonkeygo/monkit/v3 v3.0.18/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb h1:kWLHxcYDcloMFEJMngxuKh8wcLl9RjjeAN2a9AtTtCg=
|
github.com/spacemonkeygo/monkit/v3 v3.0.22 h1:4/g8IVItBDKLdVnqrdHZrCVPpIrwDBzl1jrV0IHQHDU=
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
github.com/spacemonkeygo/monkit/v3 v3.0.22/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA=
|
||||||
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
|
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
|
||||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
||||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||||
@ -565,10 +575,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/stripe/stripe-go/v72 v72.90.0 h1:fvJ/aL1rHHWRj5buuayb/2ufJued1UR1HEVavsoZoFs=
|
github.com/stripe/stripe-go/v75 v75.8.0 h1:kXdHvihp03v64L0C+xXGjolsdzdOmCqwKLnK2wA6bio=
|
||||||
github.com/stripe/stripe-go/v72 v72.90.0/go.mod h1:QwqJQtduHubZht9mek5sds9CtQcKFdsykV9ZepRWwo0=
|
github.com/stripe/stripe-go/v75 v75.8.0/go.mod h1:wT44gah+eCY8Z0aSpY/vQlYYbicU9uUAbAqdaUxxDqE=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
@ -600,8 +610,6 @@ github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5ee
|
|||||||
github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc=
|
github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc=
|
||||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
|
|
||||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
|
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||||
@ -644,6 +652,8 @@ github.com/zeebo/errs/v2 v2.0.3 h1:WwqAmopgot4ZC+CgIveP+H91Nf78NDEGWjtAXen45Hw=
|
|||||||
github.com/zeebo/errs/v2 v2.0.3/go.mod h1:OKmvVZt4UqpyJrYFykDKm168ZquJ55pbbIVUICNmLN0=
|
github.com/zeebo/errs/v2 v2.0.3/go.mod h1:OKmvVZt4UqpyJrYFykDKm168ZquJ55pbbIVUICNmLN0=
|
||||||
github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=
|
github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=
|
||||||
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
|
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
|
||||||
|
github.com/zeebo/goof v0.0.0-20230830143729-8a73f2ee257d h1:BcGKO/7ni6YuQHLTEy5I9ujNb7Z3Xw5edcQRpZnCwSg=
|
||||||
|
github.com/zeebo/goof v0.0.0-20230830143729-8a73f2ee257d/go.mod h1:nbQ8jtLiWGVGehuiqVKJp/Oc9FnzA56AZ0tG/srGTGY=
|
||||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=
|
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=
|
||||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
|
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
|
||||||
github.com/zeebo/ini v0.0.0-20210514163846-cc8fbd8d9599 h1:aYOFLPl7mY7PFFuLuYoBqlP46yJ7rZONGlXMS4/6QpA=
|
github.com/zeebo/ini v0.0.0-20210514163846-cc8fbd8d9599 h1:aYOFLPl7mY7PFFuLuYoBqlP46yJ7rZONGlXMS4/6QpA=
|
||||||
@ -654,6 +664,8 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
|||||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||||
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602 h1:nMxsvi3pTJapmPpdShLdCO8sbCqd8XkjKYMssSJrfiM=
|
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602 h1:nMxsvi3pTJapmPpdShLdCO8sbCqd8XkjKYMssSJrfiM=
|
||||||
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602/go.mod h1:hthZGQud7FXSu0Rd7Q6LRMmJ2pvvBvCkZ/LAmpkn5u4=
|
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602/go.mod h1:hthZGQud7FXSu0Rd7Q6LRMmJ2pvvBvCkZ/LAmpkn5u4=
|
||||||
|
github.com/zeebo/sudo v1.0.2 h1:6RpQNYeWtd7ycPwYSRgceNdbjodamyyuapNB8mQ1V0M=
|
||||||
|
github.com/zeebo/sudo v1.0.2/go.mod h1:bO8DB2LXZchv4WMBzo1sCYp24BxAtwa0Lp0XTXU3cU4=
|
||||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||||
github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc=
|
github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc=
|
||||||
github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis=
|
github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis=
|
||||||
@ -672,6 +684,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo=
|
||||||
|
go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
@ -705,8 +719,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
|||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -736,8 +750,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -769,12 +783,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
|
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -794,8 +809,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -821,7 +836,6 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -841,13 +855,13 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
@ -856,8 +870,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
@ -896,8 +910,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -1013,16 +1027,18 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
|||||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||||
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
|
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
|
||||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d h1:AXdJxmg4Jqdz1nmogSrImKOHAU+bn8JCy8lHYnTwP0Y=
|
storj.io/common v0.0.0-20231130134106-1fa84867e323 h1:0+vWHYPJyjZABb8Qyj1H2tCqpvyXMrN0GwTWu7vZ9nA=
|
||||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d/go.mod h1:zu2L8WdpvfIBrCbBTgPsz4qhHSArYSiDgRcV1RLlIF8=
|
storj.io/common v0.0.0-20231130134106-1fa84867e323/go.mod h1:qjHfzW5RlGg5z04CwIEjJd1eQ3HCGhUNtxZ6K/W7yqM=
|
||||||
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
|
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
|
||||||
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
|
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
|
||||||
storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4=
|
storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4=
|
||||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1AmlVLitdGXTVrvmbzN4Z9C9Ms40=
|
storj.io/infectious v0.0.2 h1:rGIdDC/6gNYAStsxsZU79D/MqFjNyJc1tsyyj9sTl7Q=
|
||||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
|
storj.io/infectious v0.0.2/go.mod h1:QEjKKww28Sjl1x8iDsjBpOM4r1Yp8RsowNcItsZJ1Vs=
|
||||||
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
|
storj.io/monkit-jaeger v0.0.0-20230707083646-f15e6e8b7e8c h1:92Hl7mBzjfMNNkkO3uVp62ZC8yZuBNcz20EVcKNzpkQ=
|
||||||
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
|
storj.io/monkit-jaeger v0.0.0-20230707083646-f15e6e8b7e8c/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
|
||||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
|
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c h1:or/DtG5uaZpzimL61ahlgAA+MTYn/U3txz4fe+XBFUg=
|
||||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
|
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c/go.mod h1:JCuc3C0gzCJHQ4J6SOx/Yjg+QTpX0D+Fvs5H46FETCk=
|
||||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
|
storj.io/private v0.0.0-20231127092015-c439a594bc1d h1:snE4Ec2k4bLNRsNq5YcKH6njS56zF30SR8u4Fgeksy4=
|
||||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
|
storj.io/private v0.0.0-20231127092015-c439a594bc1d/go.mod h1:vLbKaAmrBdkrFd8ZvTgNUJ+kLKl25Y4kkwii7K2gWMI=
|
||||||
|
storj.io/uplink v1.12.3-0.20231130143633-4a092fa01b98 h1:EZ8MPk01yvDqwP8x2oI5Q3zkE4ef6K+GXpI6kJjBfxY=
|
||||||
|
storj.io/uplink v1.12.3-0.20231130143633-4a092fa01b98/go.mod h1:w+dXLZ8X3vtK3xis9jsMiBS0bzw4kU5foo5GOsIW7QM=
|
||||||
|
@ -128,7 +128,6 @@ storj.io/storj/satellite/repair/repairer."repair_too_many_nodes_failed" Meter
|
|||||||
storj.io/storj/satellite/repair/repairer."repair_unnecessary" Meter
|
storj.io/storj/satellite/repair/repairer."repair_unnecessary" Meter
|
||||||
storj.io/storj/satellite/repair/repairer."repairer_segments_below_min_req" Counter
|
storj.io/storj/satellite/repair/repairer."repairer_segments_below_min_req" Counter
|
||||||
storj.io/storj/satellite/repair/repairer."segment_deleted_before_repair" Meter
|
storj.io/storj/satellite/repair/repairer."segment_deleted_before_repair" Meter
|
||||||
storj.io/storj/satellite/repair/repairer."segment_repair_count" IntVal
|
|
||||||
storj.io/storj/satellite/repair/repairer."segment_time_until_repair" IntVal
|
storj.io/storj/satellite/repair/repairer."segment_time_until_repair" IntVal
|
||||||
storj.io/storj/satellite/repair/repairer."time_for_repair" FloatVal
|
storj.io/storj/satellite/repair/repairer."time_for_repair" FloatVal
|
||||||
storj.io/storj/satellite/repair/repairer."time_since_checker_queue" FloatVal
|
storj.io/storj/satellite/repair/repairer."time_since_checker_queue" FloatVal
|
||||||
|
@ -202,10 +202,6 @@ func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *DB) NewRx() *Rx {
|
|
||||||
return &Rx{db: obj}
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
||||||
tx, err := db.Open(ctx)
|
tx, err := db.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1365,132 +1361,6 @@ func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Rx struct {
|
|
||||||
db *DB
|
|
||||||
tx *Tx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx tagsql.Tx, err error) {
|
|
||||||
tx, err := rx.getTx(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return tx.Tx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
|
||||||
if rx.tx == nil {
|
|
||||||
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rx.tx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Rebind(s string) string {
|
|
||||||
return rx.db.Rebind(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Commit() (err error) {
|
|
||||||
if rx.tx != nil {
|
|
||||||
err = rx.tx.Commit()
|
|
||||||
rx.tx = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Rollback() (err error) {
|
|
||||||
if rx.tx != nil {
|
|
||||||
err = rx.tx.Rollback()
|
|
||||||
rx.tx = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) All_Node(ctx context.Context) (
|
|
||||||
rows []*Node, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.All_Node(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Count_Node(ctx context.Context) (
|
|
||||||
count int64, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Count_Node(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Create_Node(ctx context.Context,
|
|
||||||
node_id Node_Id_Field,
|
|
||||||
node_name Node_Name_Field,
|
|
||||||
node_public_address Node_PublicAddress_Field,
|
|
||||||
node_api_secret Node_ApiSecret_Field) (
|
|
||||||
node *Node, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Create_Node(ctx, node_id, node_name, node_public_address, node_api_secret)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Delete_Node_By_Id(ctx context.Context,
|
|
||||||
node_id Node_Id_Field) (
|
|
||||||
deleted bool, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Delete_Node_By_Id(ctx, node_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
|
||||||
node_id Node_Id_Field) (
|
|
||||||
node *Node, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Get_Node_By_Id(ctx, node_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Limited_Node(ctx context.Context,
|
|
||||||
limit int, offset int64) (
|
|
||||||
rows []*Node, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Limited_Node(ctx, limit, offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
|
||||||
node_id Node_Id_Field,
|
|
||||||
update Node_Update_Fields) (
|
|
||||||
err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.UpdateNoReturn_Node_By_Id(ctx, node_id, update)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
|
||||||
node_id Node_Id_Field,
|
|
||||||
update Node_Update_Fields) (
|
|
||||||
node *Node, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Update_Node_By_Id(ctx, node_id, update)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Methods interface {
|
type Methods interface {
|
||||||
All_Node(ctx context.Context) (
|
All_Node(ctx context.Context) (
|
||||||
rows []*Node, err error)
|
rows []*Node, err error)
|
||||||
|
@ -69,7 +69,9 @@ type DiskSpace struct {
|
|||||||
Allocated int64 `json:"allocated"`
|
Allocated int64 `json:"allocated"`
|
||||||
Used int64 `json:"usedPieces"`
|
Used int64 `json:"usedPieces"`
|
||||||
Trash int64 `json:"usedTrash"`
|
Trash int64 `json:"usedTrash"`
|
||||||
Free int64 `json:"free"`
|
// Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
|
||||||
|
Free int64 `json:"free"`
|
||||||
|
// Available is the amount of free space on the allocated disk space, in bytes.
|
||||||
Available int64 `json:"available"`
|
Available int64 `json:"available"`
|
||||||
Overused int64 `json:"overused"`
|
Overused int64 `json:"overused"`
|
||||||
}
|
}
|
||||||
|
@ -5,23 +5,76 @@ package apigen
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"storj.io/storj/private/api"
|
"storj.io/storj/private/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// groupNameAndPrefixRegExp guarantees that Group name and prefix are empty or have are only formed
|
||||||
|
// by ASCII letters or digits and not starting with a digit.
|
||||||
|
var groupNameAndPrefixRegExp = regexp.MustCompile(`^([A-Za-z][0-9A-Za-z]*)?$`)
|
||||||
|
|
||||||
// API represents specific API's configuration.
|
// API represents specific API's configuration.
|
||||||
type API struct {
|
type API struct {
|
||||||
Version string
|
// Version is the corresponding version of the API.
|
||||||
Description string
|
// It's concatenated to the BasePath, so assuming the base path is "/api" and the version is "v1"
|
||||||
PackageName string
|
// the API paths will begin with `/api/v1`.
|
||||||
|
// When empty, the version doesn't appear in the API paths. If it starts or ends with one or more
|
||||||
|
// "/", they are stripped from the API endpoint paths.
|
||||||
|
Version string
|
||||||
|
Description string
|
||||||
|
// The package name to use for the Go generated code.
|
||||||
|
// If omitted, the last segment of the PackagePath will be used as the package name.
|
||||||
|
PackageName string
|
||||||
|
// The path of the package that will use the generated Go code.
|
||||||
|
// This is used to prevent the code from importing its own package.
|
||||||
|
PackagePath string
|
||||||
|
// BasePath is the base path for the API endpoints. E.g. "/api".
|
||||||
|
// It doesn't require to begin with "/". When empty, "/" is used.
|
||||||
|
BasePath string
|
||||||
Auth api.Auth
|
Auth api.Auth
|
||||||
EndpointGroups []*EndpointGroup
|
EndpointGroups []*EndpointGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group adds new endpoints group to API.
|
// Group adds new endpoints group to API.
|
||||||
|
// name must be `^([A-Z0-9]\w*)?$“
|
||||||
|
// prefix must be `^\w*$`.
|
||||||
func (a *API) Group(name, prefix string) *EndpointGroup {
|
func (a *API) Group(name, prefix string) *EndpointGroup {
|
||||||
|
if !groupNameAndPrefixRegExp.MatchString(name) {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"invalid name for API Endpoint Group. name must fulfill the regular expression %q, got %q",
|
||||||
|
groupNameAndPrefixRegExp,
|
||||||
|
name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if !groupNameAndPrefixRegExp.MatchString(prefix) {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"invalid prefix for API Endpoint Group %q. prefix must fulfill the regular expression %q, got %q",
|
||||||
|
name,
|
||||||
|
groupNameAndPrefixRegExp,
|
||||||
|
prefix,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, g := range a.EndpointGroups {
|
||||||
|
if strings.EqualFold(g.Name, name) {
|
||||||
|
panic(fmt.Sprintf("name has to be case-insensitive unique across all the groups. name=%q", name))
|
||||||
|
}
|
||||||
|
if strings.EqualFold(g.Prefix, prefix) {
|
||||||
|
panic(fmt.Sprintf("prefix has to be case-insensitive unique across all the groups. prefix=%q", prefix))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
group := &EndpointGroup{
|
group := &EndpointGroup{
|
||||||
Name: name,
|
Name: name,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
@ -32,6 +85,14 @@ func (a *API) Group(name, prefix string) *EndpointGroup {
|
|||||||
return group
|
return group
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *API) endpointBasePath() string {
|
||||||
|
if strings.HasPrefix(a.BasePath, "/") {
|
||||||
|
return path.Join(a.BasePath, a.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "/" + path.Join(a.BasePath, a.Version)
|
||||||
|
}
|
||||||
|
|
||||||
// StringBuilder is an extension of strings.Builder that allows for writing formatted lines.
|
// StringBuilder is an extension of strings.Builder that allows for writing formatted lines.
|
||||||
type StringBuilder struct{ strings.Builder }
|
type StringBuilder struct{ strings.Builder }
|
||||||
|
|
||||||
@ -51,17 +112,6 @@ func getElementaryType(t reflect.Type) reflect.Type {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// filter returns a new slice of reflect.Type values that satisfy the given keep function.
|
|
||||||
func filter(types []reflect.Type, keep func(reflect.Type) bool) []reflect.Type {
|
|
||||||
filtered := make([]reflect.Type, 0, len(types))
|
|
||||||
for _, t := range types {
|
|
||||||
if keep(t) {
|
|
||||||
filtered = append(filtered, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return filtered
|
|
||||||
}
|
|
||||||
|
|
||||||
// isNillableType returns whether instances of the given type can be nil.
|
// isNillableType returns whether instances of the given type can be nil.
|
||||||
func isNillableType(t reflect.Type) bool {
|
func isNillableType(t reflect.Type) bool {
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
@ -70,3 +120,100 @@ func isNillableType(t reflect.Type) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isJSONOmittableType returns whether the "omitempty" JSON tag option works with struct fields of this type.
|
||||||
|
func isJSONOmittableType(t reflect.Type) bool {
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String,
|
||||||
|
reflect.Bool,
|
||||||
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||||
|
reflect.Float32, reflect.Float64,
|
||||||
|
reflect.Interface, reflect.Pointer:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func capitalize(s string) string {
|
||||||
|
r, size := utf8.DecodeRuneInString(s)
|
||||||
|
if size <= 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(unicode.ToTitle(r)) + s[size:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func uncapitalize(s string) string {
|
||||||
|
r, size := utf8.DecodeRuneInString(s)
|
||||||
|
if size <= 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(unicode.ToLower(r)) + s[size:]
|
||||||
|
}
|
||||||
|
|
||||||
|
type typeAndName struct {
|
||||||
|
Type reflect.Type
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func mapToSlice(typesAndNames map[reflect.Type]string) []typeAndName {
|
||||||
|
list := make([]typeAndName, 0, len(typesAndNames))
|
||||||
|
for t, n := range typesAndNames {
|
||||||
|
list = append(list, typeAndName{Type: t, Name: n})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(list, func(i, j int) bool {
|
||||||
|
return list[i].Name < list[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// filter returns a new slice of typeAndName values that satisfy the given keep function.
|
||||||
|
func filter(types []typeAndName, keep func(typeAndName) bool) []typeAndName {
|
||||||
|
filtered := make([]typeAndName, 0, len(types))
|
||||||
|
for _, t := range types {
|
||||||
|
if keep(t) {
|
||||||
|
filtered = append(filtered, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonTagInfo struct {
|
||||||
|
FieldName string
|
||||||
|
OmitEmpty bool
|
||||||
|
Skip bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseJSONTag(structType reflect.Type, field reflect.StructField) jsonTagInfo {
|
||||||
|
tag, ok := field.Tag.Lookup("json")
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("(%s).%s missing json tag", structType.String(), field.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
options := strings.Split(tag, ",")
|
||||||
|
for i, opt := range options {
|
||||||
|
options[i] = strings.TrimSpace(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := options[0]
|
||||||
|
if fieldName == "" {
|
||||||
|
panic(fmt.Sprintf("(%s).%s missing json field name", structType.String(), field.Name))
|
||||||
|
}
|
||||||
|
if fieldName == "-" && len(options) == 1 {
|
||||||
|
return jsonTagInfo{Skip: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
info := jsonTagInfo{FieldName: fieldName}
|
||||||
|
for _, opt := range options[1:] {
|
||||||
|
if opt == "omitempty" {
|
||||||
|
info.OmitEmpty = isJSONOmittableType(field.Type)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
118
private/apigen/common_test.go
Normal file
118
private/apigen/common_test.go
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package apigen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPI_endpointBasePath(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
version string
|
||||||
|
basePath string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{version: "", basePath: "", expected: "/"},
|
||||||
|
{version: "v1", basePath: "", expected: "/v1"},
|
||||||
|
{version: "v0", basePath: "/", expected: "/v0"},
|
||||||
|
{version: "", basePath: "api", expected: "/api"},
|
||||||
|
{version: "v2", basePath: "api", expected: "/api/v2"},
|
||||||
|
{version: "v2", basePath: "/api", expected: "/api/v2"},
|
||||||
|
{version: "v2", basePath: "api/", expected: "/api/v2"},
|
||||||
|
{version: "v2", basePath: "/api/", expected: "/api/v2"},
|
||||||
|
{version: "/v3", basePath: "api", expected: "/api/v3"},
|
||||||
|
{version: "/v3/", basePath: "api", expected: "/api/v3"},
|
||||||
|
{version: "v3/", basePath: "api", expected: "/api/v3"},
|
||||||
|
{version: "//v3/", basePath: "api", expected: "/api/v3"},
|
||||||
|
{version: "v3///", basePath: "api", expected: "/api/v3"},
|
||||||
|
{version: "/v3///", basePath: "/api/test/", expected: "/api/test/v3"},
|
||||||
|
{version: "/v4.2", basePath: "api/test", expected: "/api/test/v4.2"},
|
||||||
|
{version: "/v4/2", basePath: "/api/test", expected: "/api/test/v4/2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("version:%s basePath: %s", c.version, c.basePath), func(t *testing.T) {
|
||||||
|
a := API{
|
||||||
|
Version: c.version,
|
||||||
|
BasePath: c.basePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, c.expected, a.endpointBasePath())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_Group(t *testing.T) {
|
||||||
|
t.Run("valid name and prefix", func(t *testing.T) {
|
||||||
|
api := API{}
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
api.Group("testName", "tName")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
api.Group("TestName1", "TName1")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid name", func(t *testing.T) {
|
||||||
|
api := API{}
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("1testName", "tName")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("test-name", "tName")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid prefix", func(t *testing.T) {
|
||||||
|
api := API{}
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("testName", "5tName")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("testname", "t_name")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("group with repeated name", func(t *testing.T) {
|
||||||
|
api := API{}
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
api.Group("testName", "tName")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("TESTNAME", "tName2")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("testname", "tName3")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("group with repeated prefix", func(t *testing.T) {
|
||||||
|
api := API{}
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
api.Group("testName", "tName")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("testName2", "tname")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
api.Group("testname3", "tnamE")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -34,14 +35,39 @@ func (api *API) generateDocumentation() string {
|
|||||||
wf := func(format string, args ...any) { _, _ = fmt.Fprintf(&doc, format, args...) }
|
wf := func(format string, args ...any) { _, _ = fmt.Fprintf(&doc, format, args...) }
|
||||||
|
|
||||||
wf("# API Docs\n\n")
|
wf("# API Docs\n\n")
|
||||||
wf("**Description:** %s\n\n", api.Description)
|
if api.Description != "" {
|
||||||
wf("**Version:** `%s`\n\n", api.Version)
|
wf("**Description:** %s\n\n", api.Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
if api.Version != "" {
|
||||||
|
wf("**Version:** `%s`\n\n", api.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
wf("<h2 id='list-of-endpoints'>List of Endpoints</h2>\n\n")
|
||||||
|
getEndpointLink := func(group, endpoint string) string {
|
||||||
|
fullName := group + "-" + endpoint
|
||||||
|
fullName = strings.ReplaceAll(fullName, " ", "-")
|
||||||
|
nonAlphanumericRegex := regexp.MustCompile(`[^a-zA-Z0-9-]+`)
|
||||||
|
fullName = nonAlphanumericRegex.ReplaceAllString(fullName, "")
|
||||||
|
return strings.ToLower(fullName)
|
||||||
|
}
|
||||||
|
for _, group := range api.EndpointGroups {
|
||||||
|
wf("* %s\n", group.Name)
|
||||||
|
for _, endpoint := range group.endpoints {
|
||||||
|
wf(" * [%s](#%s)\n", endpoint.Name, getEndpointLink(group.Name, endpoint.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wf("\n")
|
||||||
|
|
||||||
for _, group := range api.EndpointGroups {
|
for _, group := range api.EndpointGroups {
|
||||||
for _, endpoint := range group.endpoints {
|
for _, endpoint := range group.endpoints {
|
||||||
wf("## %s\n\n", endpoint.Name)
|
wf(
|
||||||
|
"<h3 id='%s'>%s (<a href='#list-of-endpoints'>go to full list</a>)</h3>\n\n",
|
||||||
|
getEndpointLink(group.Name, endpoint.Name),
|
||||||
|
endpoint.Name,
|
||||||
|
)
|
||||||
wf("%s\n\n", endpoint.Description)
|
wf("%s\n\n", endpoint.Description)
|
||||||
wf("`%s /%s%s`\n\n", endpoint.Method, group.Prefix, endpoint.Path)
|
wf("`%s %s/%s%s`\n\n", endpoint.Method, api.endpointBasePath(), group.Prefix, endpoint.Path)
|
||||||
|
|
||||||
if len(endpoint.QueryParams) > 0 {
|
if len(endpoint.QueryParams) > 0 {
|
||||||
wf("**Query Params:**\n\n")
|
wf("**Query Params:**\n\n")
|
||||||
@ -66,13 +92,13 @@ func (api *API) generateDocumentation() string {
|
|||||||
requestType := reflect.TypeOf(endpoint.Request)
|
requestType := reflect.TypeOf(endpoint.Request)
|
||||||
if requestType != nil {
|
if requestType != nil {
|
||||||
wf("**Request body:**\n\n")
|
wf("**Request body:**\n\n")
|
||||||
wf("```json\n%s\n```\n\n", getTypeNameRecursively(requestType, 0))
|
wf("```typescript\n%s\n```\n\n", getTypeNameRecursively(requestType, 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
responseType := reflect.TypeOf(endpoint.Response)
|
responseType := reflect.TypeOf(endpoint.Response)
|
||||||
if responseType != nil {
|
if responseType != nil {
|
||||||
wf("**Response body:**\n\n")
|
wf("**Response body:**\n\n")
|
||||||
wf("```json\n%s\n```\n\n", getTypeNameRecursively(responseType, 0))
|
wf("```typescript\n%s\n```\n\n", getTypeNameRecursively(responseType, 0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -123,7 +149,6 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
|||||||
elemType := t.Elem()
|
elemType := t.Elem()
|
||||||
if elemType.Kind() == reflect.Uint8 { // treat []byte as string in docs
|
if elemType.Kind() == reflect.Uint8 { // treat []byte as string in docs
|
||||||
return prefix + "string"
|
return prefix + "string"
|
||||||
|
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s[\n%s\n%s]\n", prefix, getTypeNameRecursively(elemType, level+1), prefix)
|
return fmt.Sprintf("%s[\n%s\n%s]\n", prefix, getTypeNameRecursively(elemType, level+1), prefix)
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
@ -132,7 +157,7 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
|||||||
if typeName != "unknown" {
|
if typeName != "unknown" {
|
||||||
toReturn := typeName
|
toReturn := typeName
|
||||||
if len(elaboration) > 0 {
|
if len(elaboration) > 0 {
|
||||||
toReturn += " (" + elaboration + ")"
|
toReturn += " // " + elaboration
|
||||||
}
|
}
|
||||||
return toReturn
|
return toReturn
|
||||||
}
|
}
|
||||||
@ -140,9 +165,9 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
|||||||
var fields []string
|
var fields []string
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
field := t.Field(i)
|
field := t.Field(i)
|
||||||
jsonTag := field.Tag.Get("json")
|
jsonInfo := parseJSONTag(t, field)
|
||||||
if jsonTag != "" && jsonTag != "-" {
|
if !jsonInfo.Skip {
|
||||||
fields = append(fields, prefix+"\t"+jsonTag+": "+getTypeNameRecursively(field.Type, level+1))
|
fields = append(fields, prefix+"\t"+jsonInfo.FieldName+": "+getTypeNameRecursively(field.Type, level+1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s{\n%s\n%s}\n", prefix, strings.Join(fields, "\n"), prefix)
|
return fmt.Sprintf("%s{\n%s\n%s}\n", prefix, strings.Join(fields, "\n"), prefix)
|
||||||
@ -150,7 +175,7 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
|||||||
typeName, elaboration := getDocType(t)
|
typeName, elaboration := getDocType(t)
|
||||||
toReturn := typeName
|
toReturn := typeName
|
||||||
if len(elaboration) > 0 {
|
if len(elaboration) > 0 {
|
||||||
toReturn += " (" + elaboration + ")"
|
toReturn += " // " + elaboration
|
||||||
}
|
}
|
||||||
return toReturn
|
return toReturn
|
||||||
}
|
}
|
||||||
|
@ -4,75 +4,256 @@
|
|||||||
package apigen
|
package apigen
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
|
||||||
|
"storj.io/common/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errsEndpoint = errs.Class("Endpoint")
|
||||||
|
|
||||||
|
goNameRegExp = regexp.MustCompile(`^[A-Z]\w*$`)
|
||||||
|
typeScriptNameRegExp = regexp.MustCompile(`^[a-z][a-zA-Z0-9_$]*$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Endpoint represents endpoint's configuration.
|
// Endpoint represents endpoint's configuration.
|
||||||
|
//
|
||||||
|
// Passing an anonymous type to the fields that define the request or response will make the API
|
||||||
|
// generator to panic. Anonymous types aren't allowed such as named structs that have fields with
|
||||||
|
// direct or indirect of anonymous types, slices or arrays whose direct or indirect elements are of
|
||||||
|
// anonymous types.
|
||||||
type Endpoint struct {
|
type Endpoint struct {
|
||||||
Name string
|
// Name is a free text used to name the endpoint for documentation purpose.
|
||||||
Description string
|
// It cannot be empty.
|
||||||
MethodName string
|
Name string
|
||||||
RequestName string
|
// Description is a free text to describe the endpoint for documentation purpose.
|
||||||
NoCookieAuth bool
|
Description string
|
||||||
NoAPIAuth bool
|
// GoName is an identifier used by the Go generator to generate specific server side code for this
|
||||||
Request interface{}
|
// endpoint.
|
||||||
Response interface{}
|
//
|
||||||
QueryParams []Param
|
// It must start with an uppercase letter and fulfill the Go language specification for method
|
||||||
PathParams []Param
|
// names (https://go.dev/ref/spec#MethodName).
|
||||||
|
// It cannot be empty.
|
||||||
|
GoName string
|
||||||
|
// TypeScriptName is an identifier used by the TypeScript generator to generate specific client
|
||||||
|
// code for this endpoint
|
||||||
|
//
|
||||||
|
// It must start with a lowercase letter and can only contains letters, digits, _, and $.
|
||||||
|
// It cannot be empty.
|
||||||
|
TypeScriptName string
|
||||||
|
// Request is the type that defines the format of the request body.
|
||||||
|
Request interface{}
|
||||||
|
// Response is the type that defines the format of the response body.
|
||||||
|
Response interface{}
|
||||||
|
// QueryParams is the list of query parameters that the endpoint accepts.
|
||||||
|
QueryParams []Param
|
||||||
|
// PathParams is the list of path parameters that appear in the path associated with this
|
||||||
|
// endpoint.
|
||||||
|
PathParams []Param
|
||||||
|
// ResponseMock is the data to use as a response for the generated mocks.
|
||||||
|
// It must be of the same type than Response.
|
||||||
|
// If a mock generator is called it must not be nil unless Response is nil.
|
||||||
|
ResponseMock interface{}
|
||||||
|
// Settings is the data to pass to the middleware handlers to adapt the generated
|
||||||
|
// code to this endpoints.
|
||||||
|
//
|
||||||
|
// Not all the middlware handlers need extra data. Some of them use this data to disable it in
|
||||||
|
// some endpoints.
|
||||||
|
Settings map[any]any
|
||||||
}
|
}
|
||||||
|
|
||||||
// CookieAuth returns endpoint's cookie auth status.
|
// Validate validates the endpoint fields values are correct according to the documented constraints.
|
||||||
func (e *Endpoint) CookieAuth() bool {
|
func (e *Endpoint) Validate() error {
|
||||||
return !e.NoCookieAuth
|
newErr := func(m string, a ...any) error {
|
||||||
|
e := fmt.Sprintf(". Endpoint: %s", e.Name)
|
||||||
|
m += e
|
||||||
|
return errsEndpoint.New(m, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Name == "" {
|
||||||
|
return newErr("Name cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Description == "" {
|
||||||
|
return newErr("Description cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !goNameRegExp.MatchString(e.GoName) {
|
||||||
|
return newErr("GoName doesn't match the regular expression %q", goNameRegExp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !typeScriptNameRegExp.MatchString(e.TypeScriptName) {
|
||||||
|
return newErr("TypeScriptName doesn't match the regular expression %q", typeScriptNameRegExp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Request != nil {
|
||||||
|
switch t := reflect.TypeOf(e.Request); t.Kind() {
|
||||||
|
case reflect.Invalid,
|
||||||
|
reflect.Complex64,
|
||||||
|
reflect.Complex128,
|
||||||
|
reflect.Chan,
|
||||||
|
reflect.Func,
|
||||||
|
reflect.Interface,
|
||||||
|
reflect.Map,
|
||||||
|
reflect.Pointer,
|
||||||
|
reflect.UnsafePointer:
|
||||||
|
return newErr("Request cannot be of a type %q", t.Kind())
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
if t.Elem().Name() == "" {
|
||||||
|
return newErr("Request cannot be of %q of anonymous struct elements", t.Kind())
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if t.Name() == "" {
|
||||||
|
return newErr("Request cannot be of an anonymous struct")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Response != nil {
|
||||||
|
switch t := reflect.TypeOf(e.Response); t.Kind() {
|
||||||
|
case reflect.Invalid,
|
||||||
|
reflect.Complex64,
|
||||||
|
reflect.Complex128,
|
||||||
|
reflect.Chan,
|
||||||
|
reflect.Func,
|
||||||
|
reflect.Interface,
|
||||||
|
reflect.Map,
|
||||||
|
reflect.Pointer,
|
||||||
|
reflect.UnsafePointer:
|
||||||
|
return newErr("Response cannot be of a type %q", t.Kind())
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
if t.Elem().Name() == "" {
|
||||||
|
return newErr("Response cannot be of %q of anonymous struct elements", t.Kind())
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if t.Name() == "" {
|
||||||
|
return newErr("Response cannot be of an anonymous struct")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.ResponseMock != nil {
|
||||||
|
if m, r := reflect.TypeOf(e.ResponseMock), reflect.TypeOf(e.Response); m != r {
|
||||||
|
return newErr(
|
||||||
|
"ResponseMock isn't of the same type than Response. Have=%q Want=%q", m, r,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIAuth returns endpoint's API auth status.
|
// FullEndpoint represents endpoint with path and method.
|
||||||
func (e *Endpoint) APIAuth() bool {
|
type FullEndpoint struct {
|
||||||
return !e.NoAPIAuth
|
|
||||||
}
|
|
||||||
|
|
||||||
// fullEndpoint represents endpoint with path and method.
|
|
||||||
type fullEndpoint struct {
|
|
||||||
Endpoint
|
Endpoint
|
||||||
Path string
|
Path string
|
||||||
Method string
|
Method string
|
||||||
}
|
}
|
||||||
|
|
||||||
// EndpointGroup represents endpoints group.
|
// EndpointGroup represents endpoints group.
|
||||||
|
// You should always create a group using API.Group because it validates the field values to
|
||||||
|
// guarantee correct code generation.
|
||||||
type EndpointGroup struct {
|
type EndpointGroup struct {
|
||||||
Name string
|
// Name is the group name.
|
||||||
Prefix string
|
//
|
||||||
endpoints []*fullEndpoint
|
// Go generator uses it as part of type, functions, interfaces names, and in code comments.
|
||||||
|
// The casing is adjusted according where it's used.
|
||||||
|
//
|
||||||
|
// TypeScript generator uses it as part of types names for the API functionality of this group.
|
||||||
|
// The casing is adjusted according where it's used.
|
||||||
|
//
|
||||||
|
// Document generator uses as it is.
|
||||||
|
Name string
|
||||||
|
// Prefix is a prefix used for
|
||||||
|
//
|
||||||
|
// Go generator uses it as part of variables names, error messages, and the URL base path for the group.
|
||||||
|
// The casing is adjusted according where it's used, but for the URL base path, lowercase is used.
|
||||||
|
//
|
||||||
|
// TypeScript generator uses it for composing the URL base path (lowercase).
|
||||||
|
//
|
||||||
|
// Document generator uses as it is.
|
||||||
|
Prefix string
|
||||||
|
// Middleware is a list of additional processing of requests that apply to all the endpoints of this group.
|
||||||
|
Middleware []Middleware
|
||||||
|
// endpoints is the list of endpoints added to this group through the "HTTP method" methods (e.g.
|
||||||
|
// Get, Patch, etc.).
|
||||||
|
endpoints []*FullEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get adds new GET endpoint to endpoints group.
|
// Get adds new GET endpoint to endpoints group.
|
||||||
|
// It panics if path doesn't begin with '/'.
|
||||||
func (eg *EndpointGroup) Get(path string, endpoint *Endpoint) {
|
func (eg *EndpointGroup) Get(path string, endpoint *Endpoint) {
|
||||||
eg.addEndpoint(path, http.MethodGet, endpoint)
|
eg.addEndpoint(path, http.MethodGet, endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Patch adds new PATCH endpoint to endpoints group.
|
// Patch adds new PATCH endpoint to endpoints group.
|
||||||
|
// It panics if path doesn't begin with '/'.
|
||||||
func (eg *EndpointGroup) Patch(path string, endpoint *Endpoint) {
|
func (eg *EndpointGroup) Patch(path string, endpoint *Endpoint) {
|
||||||
eg.addEndpoint(path, http.MethodPatch, endpoint)
|
eg.addEndpoint(path, http.MethodPatch, endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Post adds new POST endpoint to endpoints group.
|
// Post adds new POST endpoint to endpoints group.
|
||||||
|
// It panics if path doesn't begin with '/'.
|
||||||
func (eg *EndpointGroup) Post(path string, endpoint *Endpoint) {
|
func (eg *EndpointGroup) Post(path string, endpoint *Endpoint) {
|
||||||
eg.addEndpoint(path, http.MethodPost, endpoint)
|
eg.addEndpoint(path, http.MethodPost, endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete adds new DELETE endpoint to endpoints group.
|
// Delete adds new DELETE endpoint to endpoints group.
|
||||||
|
// It panics if path doesn't begin with '/'.
|
||||||
func (eg *EndpointGroup) Delete(path string, endpoint *Endpoint) {
|
func (eg *EndpointGroup) Delete(path string, endpoint *Endpoint) {
|
||||||
eg.addEndpoint(path, http.MethodDelete, endpoint)
|
eg.addEndpoint(path, http.MethodDelete, endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// addEndpoint adds new endpoint to endpoints list.
|
// addEndpoint adds new endpoint to endpoints list.
|
||||||
|
// It panics if:
|
||||||
|
// - path doesn't begin with '/'.
|
||||||
|
// - endpoint.Validate() returns an error.
|
||||||
|
// - An Endpoint with the same path and method already exists.
|
||||||
func (eg *EndpointGroup) addEndpoint(path, method string, endpoint *Endpoint) {
|
func (eg *EndpointGroup) addEndpoint(path, method string, endpoint *Endpoint) {
|
||||||
ep := &fullEndpoint{*endpoint, path, method}
|
if !strings.HasPrefix(path, "/") {
|
||||||
for i, e := range eg.endpoints {
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"invalid path for method %q of EndpointGroup %q. path must start with slash, got %q",
|
||||||
|
method,
|
||||||
|
eg.Name,
|
||||||
|
path,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := endpoint.Validate(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ep := &FullEndpoint{*endpoint, path, method}
|
||||||
|
for _, e := range eg.endpoints {
|
||||||
if e.Path == path && e.Method == method {
|
if e.Path == path && e.Method == method {
|
||||||
eg.endpoints[i] = ep
|
panic(fmt.Sprintf("there is already an endpoint defined with path %q and method %q", path, method))
|
||||||
return
|
}
|
||||||
|
|
||||||
|
if e.GoName == ep.GoName {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf("GoName %q is already used by the endpoint with path %q and method %q", e.GoName, e.Path, e.Method),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.TypeScriptName == ep.TypeScriptName {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"TypeScriptName %q is already used by the endpoint with path %q and method %q",
|
||||||
|
e.TypeScriptName,
|
||||||
|
e.Path,
|
||||||
|
e.Method,
|
||||||
|
),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
eg.endpoints = append(eg.endpoints, ep)
|
eg.endpoints = append(eg.endpoints, ep)
|
||||||
@ -84,10 +265,176 @@ type Param struct {
|
|||||||
Type reflect.Type
|
Type reflect.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewParam constructor which creates new Param entity by given name and type.
|
// NewParam constructor which creates new Param entity by given name and type through instance.
|
||||||
|
//
|
||||||
|
// instance can only be a unsigned integer (of any size), string, uuid.UUID or time.Time, otherwise
|
||||||
|
// it panics.
|
||||||
func NewParam(name string, instance interface{}) Param {
|
func NewParam(name string, instance interface{}) Param {
|
||||||
|
switch t := reflect.TypeOf(instance); t {
|
||||||
|
case reflect.TypeOf(uuid.UUID{}), reflect.TypeOf(time.Time{}):
|
||||||
|
default:
|
||||||
|
switch k := t.Kind(); k {
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.String:
|
||||||
|
default:
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
`Unsupported parameter, only types: %q, %q, string, and "unsigned numbers" are supported . Found type=%q, Kind=%q`,
|
||||||
|
reflect.TypeOf(uuid.UUID{}),
|
||||||
|
reflect.TypeOf(time.Time{}),
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return Param{
|
return Param{
|
||||||
Name: name,
|
Name: name,
|
||||||
Type: reflect.TypeOf(instance),
|
Type: reflect.TypeOf(instance),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Middleware allows to generate custom code that's executed at the beginning of the handler.
|
||||||
|
//
|
||||||
|
// The implementation must declare their dependencies through unexported struct fields which doesn't
|
||||||
|
// begin with underscore (_), except fields whose name is just underscore (the blank identifier).
|
||||||
|
// The API generator will add the import those dependencies and allow to pass them through the
|
||||||
|
// constructor parameters of the group handler implementation, except the fields named with the
|
||||||
|
// blank identifier that should be only used to import packages that the generated code needs.
|
||||||
|
//
|
||||||
|
// The limitation of using fields with the blank identifier as its names is that those packages
|
||||||
|
// must at least to export a type, hence, it isn't possible to import packages that only export
|
||||||
|
// constants or variables.
|
||||||
|
//
|
||||||
|
// Middleware implementation with the same struct field name and type will be handled as one
|
||||||
|
// parameter, so the dependency will be shared between them. If they have the same struct field
|
||||||
|
// name, but a different type, the API generator will panic.
|
||||||
|
// NOTE types are compared as [package].[type name], hence, package name collision are not handled
|
||||||
|
// and it will produce code that doesn't compile.
|
||||||
|
type Middleware interface {
|
||||||
|
// Generate generates the code that the API generator adds to a handler endpoint before calling
|
||||||
|
// the service.
|
||||||
|
//
|
||||||
|
// All the dependencies defined as struct fields of the implementation of this interface are
|
||||||
|
// available as fields of the struct handler. The generated code is executed inside of the methods
|
||||||
|
// of the struct handler, hence it has access to all its fields. The handler instance is available
|
||||||
|
// through the variable name h. For example:
|
||||||
|
//
|
||||||
|
// type middlewareImpl struct {
|
||||||
|
// log *zap.Logger // Import path: "go.uber.org/zap"
|
||||||
|
// auth api.Auth // Import path: "storj.io/storj/private/api"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The generated code can access to log and auth through h.log and h.auth.
|
||||||
|
//
|
||||||
|
// Each handler method where the code is executed has access to the following variables names:
|
||||||
|
// ctx of type context.Context, w of type http.ResponseWriter, and r of type *http.Request.
|
||||||
|
// Make sure to not declare variable with those names in the generated code unless that's wrapped
|
||||||
|
// in a scope.
|
||||||
|
Generate(api *API, group *EndpointGroup, ep *FullEndpoint) string
|
||||||
|
}
|
||||||
|
|
||||||
|
func middlewareImports(m any) []string {
|
||||||
|
imports := []string{}
|
||||||
|
middlewareWalkFields(m, func(f reflect.StructField) {
|
||||||
|
if p := f.Type.PkgPath(); p != "" {
|
||||||
|
imports = append(imports, p)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return imports
|
||||||
|
}
|
||||||
|
|
||||||
|
// middlewareFields returns the list of fields of a middleware implementation. It panics if m isn't
|
||||||
|
// a struct type, it has embedded fields, or it has unexported fields.
|
||||||
|
func middlewareFields(api *API, m any) []middlewareField {
|
||||||
|
fields := []middlewareField{}
|
||||||
|
middlewareWalkFields(m, func(f reflect.StructField) {
|
||||||
|
if f.Name == "_" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
psymbol := ""
|
||||||
|
t := f.Type
|
||||||
|
if t.Kind() == reflect.Pointer {
|
||||||
|
psymbol = "*"
|
||||||
|
t = f.Type.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
typeref := psymbol + t.Name()
|
||||||
|
if p := t.PkgPath(); p != "" && p != api.PackagePath {
|
||||||
|
pn, _ := importPath(p).PkgName()
|
||||||
|
typeref = fmt.Sprintf("%s%s.%s", psymbol, pn, t.Name())
|
||||||
|
}
|
||||||
|
fields = append(fields, middlewareField{Name: f.Name, Type: typeref})
|
||||||
|
})
|
||||||
|
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func middlewareWalkFields(m any, walk func(f reflect.StructField)) {
|
||||||
|
t := reflect.TypeOf(m)
|
||||||
|
if t.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("middleware %q isn't a struct type", t.Name()))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.FieldByIndex([]int{i})
|
||||||
|
if f.Anonymous {
|
||||||
|
panic(fmt.Sprintf("middleware %q has a embedded field %q", t.Name(), f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Name != "_" {
|
||||||
|
// Disallow fields that begin with underscore.
|
||||||
|
if !unicode.IsLetter([]rune(f.Name)[0]) {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"middleware %q has a field name beginning with no letter %q. Change it to begin with lower case letter",
|
||||||
|
t.Name(),
|
||||||
|
f.Name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unicode.IsUpper([]rune(f.Name)[0]) {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"middleware %q has a field name beginning with upper case %q. Change it to begin with lower case",
|
||||||
|
t.Name(),
|
||||||
|
f.Name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
walk(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// middlewareField has the name of the field and type for adding to handler structs that the
|
||||||
|
// API generator generates during the generation phase.
|
||||||
|
type middlewareField struct {
|
||||||
|
// Name is the name of the field. It must fulfill Go identifiers specification
|
||||||
|
// https://go.dev/ref/spec#Identifiers
|
||||||
|
Name string
|
||||||
|
// Type is the type's name of the field.
|
||||||
|
Type string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadSetting returns from endpoint.Settings the value assigned to key or
|
||||||
|
// returns defaultValue if the key doesn't exist.
|
||||||
|
//
|
||||||
|
// It panics if key doesn't have a value of the type T.
|
||||||
|
func LoadSetting[T any](key any, endpoint *FullEndpoint, defaultValue T) T {
|
||||||
|
v, ok := endpoint.Settings[key]
|
||||||
|
if !ok {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
vt, vtok := v.(T)
|
||||||
|
if !vtok {
|
||||||
|
panic(fmt.Sprintf("expected %T got %T", vt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
return vt
|
||||||
|
}
|
||||||
|
290
private/apigen/endpoint_test.go
Normal file
290
private/apigen/endpoint_test.go
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
// Copyright (C) 2022 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package apigen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEndpoint_Validate(t *testing.T) {
|
||||||
|
validEndpoint := Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest",
|
||||||
|
TypeScriptName: "genTest",
|
||||||
|
}
|
||||||
|
|
||||||
|
tcases := []struct {
|
||||||
|
testName string
|
||||||
|
endpointFn func() *Endpoint
|
||||||
|
errMsg string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testName: "valid endpoint",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
return &validEndpoint
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "empty name",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.Name = ""
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "Name cannot be empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "empty description",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.Description = ""
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "Description cannot be empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "empty Go name",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.GoName = ""
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "GoName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "no capitalized Go name ",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.GoName = "genTest"
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "GoName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "symbol in Go name",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.GoName = "GenTe$t"
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "GoName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "empty TypeScript name",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.TypeScriptName = ""
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "TypeScriptName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "capitalized TypeScript name ",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.TypeScriptName = "GenTest"
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "TypeScriptName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "dash in TypeScript name",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.TypeScriptName = "genTest-2"
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: "TypeScriptName doesn't match the regular expression",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "invalid Request type",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
request := &struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}{}
|
||||||
|
e := validEndpoint
|
||||||
|
e.Request = request
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: fmt.Sprintf("Request cannot be of a type %q", reflect.Pointer),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "invalid Response type",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.Response = map[string]string{}
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: fmt.Sprintf("Response cannot be of a type %q", reflect.Map),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "different ResponseMock type",
|
||||||
|
endpointFn: func() *Endpoint {
|
||||||
|
e := validEndpoint
|
||||||
|
e.Response = int(0)
|
||||||
|
e.ResponseMock = int8(0)
|
||||||
|
return &e
|
||||||
|
},
|
||||||
|
errMsg: fmt.Sprintf(
|
||||||
|
"ResponseMock isn't of the same type than Response. Have=%q Want=%q",
|
||||||
|
reflect.TypeOf(int8(0)),
|
||||||
|
reflect.TypeOf(int(0)),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcases {
|
||||||
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
|
ep := tc.endpointFn()
|
||||||
|
|
||||||
|
err := ep.Validate()
|
||||||
|
|
||||||
|
if tc.errMsg == "" {
|
||||||
|
require.NoError(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.ErrorContains(t, err, tc.errMsg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEndpointGroup(t *testing.T) {
|
||||||
|
t.Run("add endpoints", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest" + postfix,
|
||||||
|
TypeScriptName: "genTest" + postfix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/" + strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.NotPanics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.NotPanics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.NotPanics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.NotPanics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
|
||||||
|
require.Len(t, eg.endpoints, 4, "Group endpoints count")
|
||||||
|
for i, m := range []string{http.MethodGet, http.MethodPatch, http.MethodPost, http.MethodDelete} {
|
||||||
|
ep := eg.endpoints[i]
|
||||||
|
assert.Equal(t, m, ep.Method)
|
||||||
|
assert.Equal(t, path, ep.Path)
|
||||||
|
assert.EqualValues(t, endpointFn(m), &ep.Endpoint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("path does not begin with slash", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest" + postfix,
|
||||||
|
TypeScriptName: "genTest" + postfix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.Panics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.Panics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.Panics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.Panics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid endpoint", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest" + postfix,
|
||||||
|
TypeScriptName: "genTest" + postfix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/" + strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.Panics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.Panics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.Panics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.Panics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("endpoint duplicate path method", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest" + postfix,
|
||||||
|
TypeScriptName: "genTest" + postfix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/" + strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.NotPanics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.NotPanics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.NotPanics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.NotPanics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
|
||||||
|
assert.Panics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.Panics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.Panics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.Panics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("endpoint duplicate GoName", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest",
|
||||||
|
TypeScriptName: "genTest" + postfix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/" + strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.NotPanics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.Panics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.Panics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.Panics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("endpoint duplicate TypeScriptName", func(t *testing.T) {
|
||||||
|
endpointFn := func(postfix string) *Endpoint {
|
||||||
|
return &Endpoint{
|
||||||
|
Name: "Test Endpoint",
|
||||||
|
Description: "This is an Endpoint purely for testing purposes",
|
||||||
|
GoName: "GenTest" + postfix,
|
||||||
|
TypeScriptName: "genTest",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/" + strconv.Itoa(rand.Int())
|
||||||
|
eg := EndpointGroup{}
|
||||||
|
|
||||||
|
assert.NotPanics(t, func() { eg.Patch(path, endpointFn(http.MethodPatch)) }, "Patch")
|
||||||
|
assert.Panics(t, func() { eg.Get(path, endpointFn(http.MethodGet)) }, "Get")
|
||||||
|
assert.Panics(t, func() { eg.Post(path, endpointFn(http.MethodPost)) }, "Post")
|
||||||
|
assert.Panics(t, func() { eg.Delete(path, endpointFn(http.MethodDelete)) }, "Delete")
|
||||||
|
})
|
||||||
|
}
|
@ -16,44 +16,196 @@ import (
|
|||||||
|
|
||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/storj/private/api"
|
"storj.io/storj/private/api"
|
||||||
|
"storj.io/storj/private/apigen/example/myapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
const dateLayout = "2006-01-02T15:04:05.999Z"
|
const dateLayout = "2006-01-02T15:04:05.999Z"
|
||||||
|
|
||||||
var ErrTestapiAPI = errs.Class("example testapi api")
|
var ErrDocsAPI = errs.Class("example docs api")
|
||||||
|
var ErrUsersAPI = errs.Class("example users api")
|
||||||
|
|
||||||
type TestAPIService interface {
|
type DocumentsService interface {
|
||||||
GenTestAPI(ctx context.Context, path string, id uuid.UUID, date time.Time, request struct{ Content string }) (*struct {
|
Get(ctx context.Context) ([]myapi.Document, api.HTTPError)
|
||||||
ID uuid.UUID
|
GetOne(ctx context.Context, path string) (*myapi.Document, api.HTTPError)
|
||||||
Date time.Time
|
GetTag(ctx context.Context, path, tagName string) (*[2]string, api.HTTPError)
|
||||||
PathParam string
|
GetVersions(ctx context.Context, path string) ([]myapi.Version, api.HTTPError)
|
||||||
Body string
|
UpdateContent(ctx context.Context, path string, id uuid.UUID, date time.Time, request myapi.NewDocument) (*myapi.Document, api.HTTPError)
|
||||||
}, api.HTTPError)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestAPIHandler is an api handler that exposes all testapi related functionality.
|
type UsersService interface {
|
||||||
type TestAPIHandler struct {
|
Get(ctx context.Context) ([]myapi.User, api.HTTPError)
|
||||||
|
Create(ctx context.Context, request []myapi.User) api.HTTPError
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocumentsHandler is an api handler that implements all Documents API endpoints functionality.
|
||||||
|
type DocumentsHandler struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
mon *monkit.Scope
|
mon *monkit.Scope
|
||||||
service TestAPIService
|
service DocumentsService
|
||||||
auth api.Auth
|
auth api.Auth
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTestAPI(log *zap.Logger, mon *monkit.Scope, service TestAPIService, router *mux.Router, auth api.Auth) *TestAPIHandler {
|
// UsersHandler is an api handler that implements all Users API endpoints functionality.
|
||||||
handler := &TestAPIHandler{
|
type UsersHandler struct {
|
||||||
|
log *zap.Logger
|
||||||
|
mon *monkit.Scope
|
||||||
|
service UsersService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDocuments(log *zap.Logger, mon *monkit.Scope, service DocumentsService, router *mux.Router, auth api.Auth) *DocumentsHandler {
|
||||||
|
handler := &DocumentsHandler{
|
||||||
log: log,
|
log: log,
|
||||||
mon: mon,
|
mon: mon,
|
||||||
service: service,
|
service: service,
|
||||||
auth: auth,
|
auth: auth,
|
||||||
}
|
}
|
||||||
|
|
||||||
testapiRouter := router.PathPrefix("/api/v0/testapi").Subrouter()
|
docsRouter := router.PathPrefix("/api/v0/docs").Subrouter()
|
||||||
testapiRouter.HandleFunc("/{path}", handler.handleGenTestAPI).Methods("POST")
|
docsRouter.HandleFunc("/", handler.handleGet).Methods("GET")
|
||||||
|
docsRouter.HandleFunc("/{path}", handler.handleGetOne).Methods("GET")
|
||||||
|
docsRouter.HandleFunc("/{path}/tag/{tagName}", handler.handleGetTag).Methods("GET")
|
||||||
|
docsRouter.HandleFunc("/{path}/versions", handler.handleGetVersions).Methods("GET")
|
||||||
|
docsRouter.HandleFunc("/{path}", handler.handleUpdateContent).Methods("POST")
|
||||||
|
|
||||||
return handler
|
return handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request) {
|
func NewUsers(log *zap.Logger, mon *monkit.Scope, service UsersService, router *mux.Router) *UsersHandler {
|
||||||
|
handler := &UsersHandler{
|
||||||
|
log: log,
|
||||||
|
mon: mon,
|
||||||
|
service: service,
|
||||||
|
}
|
||||||
|
|
||||||
|
usersRouter := router.PathPrefix("/api/v0/users").Subrouter()
|
||||||
|
usersRouter.HandleFunc("/", handler.handleGet).Methods("GET")
|
||||||
|
usersRouter.HandleFunc("/", handler.handleCreate).Methods("POST")
|
||||||
|
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *DocumentsHandler) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
retVal, httpErr := h.service.Get(ctx)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Debug("failed to write json Get response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *DocumentsHandler) handleGetOne(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
path, ok := mux.Vars(r)["path"]
|
||||||
|
if !ok {
|
||||||
|
api.ServeError(h.log, w, http.StatusBadRequest, errs.New("missing path route param"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, err = h.auth.IsAuthenticated(ctx, r, true, true)
|
||||||
|
if err != nil {
|
||||||
|
h.auth.RemoveAuthCookie(w)
|
||||||
|
api.ServeError(h.log, w, http.StatusUnauthorized, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retVal, httpErr := h.service.GetOne(ctx, path)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Debug("failed to write json GetOne response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *DocumentsHandler) handleGetTag(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
path, ok := mux.Vars(r)["path"]
|
||||||
|
if !ok {
|
||||||
|
api.ServeError(h.log, w, http.StatusBadRequest, errs.New("missing path route param"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tagName, ok := mux.Vars(r)["tagName"]
|
||||||
|
if !ok {
|
||||||
|
api.ServeError(h.log, w, http.StatusBadRequest, errs.New("missing tagName route param"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, err = h.auth.IsAuthenticated(ctx, r, true, true)
|
||||||
|
if err != nil {
|
||||||
|
h.auth.RemoveAuthCookie(w)
|
||||||
|
api.ServeError(h.log, w, http.StatusUnauthorized, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retVal, httpErr := h.service.GetTag(ctx, path, tagName)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Debug("failed to write json GetTag response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *DocumentsHandler) handleGetVersions(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
path, ok := mux.Vars(r)["path"]
|
||||||
|
if !ok {
|
||||||
|
api.ServeError(h.log, w, http.StatusBadRequest, errs.New("missing path route param"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, err = h.auth.IsAuthenticated(ctx, r, true, true)
|
||||||
|
if err != nil {
|
||||||
|
h.auth.RemoveAuthCookie(w)
|
||||||
|
api.ServeError(h.log, w, http.StatusUnauthorized, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retVal, httpErr := h.service.GetVersions(ctx, path)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Debug("failed to write json GetVersions response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *DocumentsHandler) handleUpdateContent(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
var err error
|
var err error
|
||||||
defer h.mon.Task()(&ctx)(&err)
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
@ -90,7 +242,7 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
payload := struct{ Content string }{}
|
payload := myapi.NewDocument{}
|
||||||
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||||
api.ServeError(h.log, w, http.StatusBadRequest, err)
|
api.ServeError(h.log, w, http.StatusBadRequest, err)
|
||||||
return
|
return
|
||||||
@ -103,7 +255,7 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
retVal, httpErr := h.service.GenTestAPI(ctx, path, id, date, payload)
|
retVal, httpErr := h.service.UpdateContent(ctx, path, id, date, payload)
|
||||||
if httpErr.Err != nil {
|
if httpErr.Err != nil {
|
||||||
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
return
|
return
|
||||||
@ -111,6 +263,44 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
|||||||
|
|
||||||
err = json.NewEncoder(w).Encode(retVal)
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.log.Debug("failed to write json GenTestAPI response", zap.Error(ErrTestapiAPI.Wrap(err)))
|
h.log.Debug("failed to write json UpdateContent response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *UsersHandler) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
retVal, httpErr := h.service.Get(ctx)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewEncoder(w).Encode(retVal)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Debug("failed to write json Get response", zap.Error(ErrUsersAPI.Wrap(err)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *UsersHandler) handleCreate(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
var err error
|
||||||
|
defer h.mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
payload := []myapi.User{}
|
||||||
|
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||||
|
api.ServeError(h.log, w, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
httpErr := h.service.Create(ctx, payload)
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
224
private/apigen/example/apidocs.gen.md
Normal file
224
private/apigen/example/apidocs.gen.md
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
# API Docs
|
||||||
|
|
||||||
|
**Version:** `v0`
|
||||||
|
|
||||||
|
<h2 id='list-of-endpoints'>List of Endpoints</h2>
|
||||||
|
|
||||||
|
* Documents
|
||||||
|
* [Get Documents](#documents-get-documents)
|
||||||
|
* [Get One](#documents-get-one)
|
||||||
|
* [Get a tag](#documents-get-a-tag)
|
||||||
|
* [Get Version](#documents-get-version)
|
||||||
|
* [Update Content](#documents-update-content)
|
||||||
|
* Users
|
||||||
|
* [Get Users](#users-get-users)
|
||||||
|
* [Create User](#users-create-user)
|
||||||
|
|
||||||
|
<h3 id='documents-get-documents'>Get Documents (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Get the paths to all the documents under the specified paths
|
||||||
|
|
||||||
|
`GET /api/v0/docs/`
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
[
|
||||||
|
{
|
||||||
|
id: string // UUID formatted as `00000000-0000-0000-0000-000000000000`
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
pathParam: string
|
||||||
|
body: string
|
||||||
|
version: {
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
number: number
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata: {
|
||||||
|
owner: string
|
||||||
|
tags: [
|
||||||
|
unknown
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='documents-get-one'>Get One (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Get the document in the specified path
|
||||||
|
|
||||||
|
`GET /api/v0/docs/{path}`
|
||||||
|
|
||||||
|
**Path Params:**
|
||||||
|
|
||||||
|
| name | type | elaboration |
|
||||||
|
|---|---|---|
|
||||||
|
| `path` | `string` | |
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
id: string // UUID formatted as `00000000-0000-0000-0000-000000000000`
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
pathParam: string
|
||||||
|
body: string
|
||||||
|
version: {
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
number: number
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata: {
|
||||||
|
owner: string
|
||||||
|
tags: [
|
||||||
|
unknown
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='documents-get-a-tag'>Get a tag (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Get the tag of the document in the specified path and tag label
|
||||||
|
|
||||||
|
`GET /api/v0/docs/{path}/tag/{tagName}`
|
||||||
|
|
||||||
|
**Path Params:**
|
||||||
|
|
||||||
|
| name | type | elaboration |
|
||||||
|
|---|---|---|
|
||||||
|
| `path` | `string` | |
|
||||||
|
| `tagName` | `string` | |
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
unknown
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='documents-get-version'>Get Version (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Get all the version of the document in the specified path
|
||||||
|
|
||||||
|
`GET /api/v0/docs/{path}/versions`
|
||||||
|
|
||||||
|
**Path Params:**
|
||||||
|
|
||||||
|
| name | type | elaboration |
|
||||||
|
|---|---|---|
|
||||||
|
| `path` | `string` | |
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
[
|
||||||
|
{
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
number: number
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='documents-update-content'>Update Content (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Update the content of the document with the specified path and ID if the last update is before the indicated date
|
||||||
|
|
||||||
|
`POST /api/v0/docs/{path}`
|
||||||
|
|
||||||
|
**Query Params:**
|
||||||
|
|
||||||
|
| name | type | elaboration |
|
||||||
|
|---|---|---|
|
||||||
|
| `id` | `string` | UUID formatted as `00000000-0000-0000-0000-000000000000` |
|
||||||
|
| `date` | `string` | Date timestamp formatted as `2006-01-02T15:00:00Z` |
|
||||||
|
|
||||||
|
**Path Params:**
|
||||||
|
|
||||||
|
| name | type | elaboration |
|
||||||
|
|---|---|---|
|
||||||
|
| `path` | `string` | |
|
||||||
|
|
||||||
|
**Request body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
id: string // UUID formatted as `00000000-0000-0000-0000-000000000000`
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
pathParam: string
|
||||||
|
body: string
|
||||||
|
version: {
|
||||||
|
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||||
|
number: number
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata: {
|
||||||
|
owner: string
|
||||||
|
tags: [
|
||||||
|
unknown
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='users-get-users'>Get Users (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Get the list of registered users
|
||||||
|
|
||||||
|
`GET /api/v0/users/`
|
||||||
|
|
||||||
|
**Response body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: string
|
||||||
|
surname: string
|
||||||
|
email: string
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<h3 id='users-create-user'>Create User (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||||
|
|
||||||
|
Create a user
|
||||||
|
|
||||||
|
`POST /api/v0/users/`
|
||||||
|
|
||||||
|
**Request body:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: string
|
||||||
|
surname: string
|
||||||
|
email: string
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
137
private/apigen/example/client-api-mock.gen.ts
Normal file
137
private/apigen/example/client-api-mock.gen.ts
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// AUTOGENERATED BY private/apigen
|
||||||
|
// DO NOT EDIT.
|
||||||
|
import { Time, UUID } from '@/types/common';
|
||||||
|
|
||||||
|
export class Document {
|
||||||
|
id: UUID;
|
||||||
|
date: Time;
|
||||||
|
pathParam: string;
|
||||||
|
body: string;
|
||||||
|
version: Version;
|
||||||
|
metadata: Metadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Metadata {
|
||||||
|
owner?: string;
|
||||||
|
tags: string[][] | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class NewDocument {
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class User {
|
||||||
|
name: string;
|
||||||
|
surname: string;
|
||||||
|
email: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Version {
|
||||||
|
date: Time;
|
||||||
|
number: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
class APIError extends Error {
|
||||||
|
constructor(
|
||||||
|
public readonly msg: string,
|
||||||
|
public readonly responseStatusCode?: number,
|
||||||
|
) {
|
||||||
|
super(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class DocumentsHttpApiV0 {
|
||||||
|
public readonly respStatusCode: number;
|
||||||
|
|
||||||
|
// When respStatuscode is passed, the client throws an APIError on each method call
|
||||||
|
// with respStatusCode as HTTP status code.
|
||||||
|
// respStatuscode must be equal or greater than 400
|
||||||
|
constructor(respStatusCode?: number) {
|
||||||
|
if (typeof respStatusCode === 'undefined') {
|
||||||
|
this.respStatusCode = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (respStatusCode < 400) {
|
||||||
|
throw new Error('invalid response status code for API Error, it must be greater or equal than 400');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.respStatusCode = respStatusCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async get(): Promise<Document[]> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('[{"id":"00000000-0000-0000-0000-000000000000","date":"0001-01-01T00:00:00Z","pathParam":"/workspace/notes.md","body":"","version":{"date":"0001-01-01T00:00:00Z","number":0},"metadata":{"owner":"Storj","tags":[["category","general"]]}}]') as Document[];
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getOne(path: string): Promise<Document> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('{"id":"00000000-0000-0000-0000-000000000000","date":"2001-02-02T04:05:06.000000007Z","pathParam":"ID","body":"## Notes","version":{"date":"2001-02-03T03:35:06.000000007Z","number":1},"metadata":{"tags":null}}') as Document;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getTag(path: string, tagName: string): Promise<string[]> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('["category","notes"]') as string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getVersions(path: string): Promise<Version[]> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('[{"date":"2001-01-19T04:05:06.000000007Z","number":1},{"date":"2001-02-02T23:05:06.000000007Z","number":2}]') as Version[];
|
||||||
|
}
|
||||||
|
|
||||||
|
public async updateContent(request: NewDocument, path: string, id: UUID, date: Time): Promise<Document> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('{"id":"00000000-0000-0000-0000-000000000000","date":"2001-02-03T04:05:06.000000007Z","pathParam":"ID","body":"## Notes\n### General","version":{"date":"0001-01-01T00:00:00Z","number":0},"metadata":{"tags":null}}') as Document;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class UsersHttpApiV0 {
|
||||||
|
public readonly respStatusCode: number;
|
||||||
|
|
||||||
|
// When respStatuscode is passed, the client throws an APIError on each method call
|
||||||
|
// with respStatusCode as HTTP status code.
|
||||||
|
// respStatuscode must be equal or greater than 400
|
||||||
|
constructor(respStatusCode?: number) {
|
||||||
|
if (typeof respStatusCode === 'undefined') {
|
||||||
|
this.respStatusCode = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (respStatusCode < 400) {
|
||||||
|
throw new Error('invalid response status code for API Error, it must be greater or equal than 400');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.respStatusCode = respStatusCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async get(): Promise<User[]> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSON.parse('[{"name":"Storj","surname":"Labs","email":"storj@storj.test"},{"name":"Test1","surname":"Testing","email":"test1@example.test"},{"name":"Test2","surname":"Testing","email":"test2@example.test"}]') as User[];
|
||||||
|
}
|
||||||
|
|
||||||
|
public async create(request: User[]): Promise<void> {
|
||||||
|
if (this.respStatusCode !== 0) {
|
||||||
|
throw new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
126
private/apigen/example/client-api.gen.ts
Normal file
126
private/apigen/example/client-api.gen.ts
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
// AUTOGENERATED BY private/apigen
|
||||||
|
// DO NOT EDIT.
|
||||||
|
|
||||||
|
import { HttpClient } from '@/utils/httpClient';
|
||||||
|
import { Time, UUID } from '@/types/common';
|
||||||
|
|
||||||
|
export class Document {
|
||||||
|
id: UUID;
|
||||||
|
date: Time;
|
||||||
|
pathParam: string;
|
||||||
|
body: string;
|
||||||
|
version: Version;
|
||||||
|
metadata: Metadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Metadata {
|
||||||
|
owner?: string;
|
||||||
|
tags: string[][] | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class NewDocument {
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class User {
|
||||||
|
name: string;
|
||||||
|
surname: string;
|
||||||
|
email: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class Version {
|
||||||
|
date: Time;
|
||||||
|
number: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
class APIError extends Error {
|
||||||
|
constructor(
|
||||||
|
public readonly msg: string,
|
||||||
|
public readonly responseStatusCode?: number,
|
||||||
|
) {
|
||||||
|
super(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class DocumentsHttpApiV0 {
|
||||||
|
private readonly http: HttpClient = new HttpClient();
|
||||||
|
private readonly ROOT_PATH: string = '/api/v0/docs';
|
||||||
|
|
||||||
|
public async get(): Promise<Document[]> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/`;
|
||||||
|
const response = await this.http.get(fullPath);
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as Document[]);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getOne(path: string): Promise<Document> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/${path}`;
|
||||||
|
const response = await this.http.get(fullPath);
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as Document);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getTag(path: string, tagName: string): Promise<string[]> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/${path}/${tagName}`;
|
||||||
|
const response = await this.http.get(fullPath);
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as string[]);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async getVersions(path: string): Promise<Version[]> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/${path}`;
|
||||||
|
const response = await this.http.get(fullPath);
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as Version[]);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async updateContent(request: NewDocument, path: string, id: UUID, date: Time): Promise<Document> {
|
||||||
|
const u = new URL(`${this.ROOT_PATH}/${path}`, window.location.href);
|
||||||
|
u.searchParams.set('id', id);
|
||||||
|
u.searchParams.set('date', date);
|
||||||
|
const fullPath = u.toString();
|
||||||
|
const response = await this.http.post(fullPath, JSON.stringify(request));
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as Document);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class UsersHttpApiV0 {
|
||||||
|
private readonly http: HttpClient = new HttpClient();
|
||||||
|
private readonly ROOT_PATH: string = '/api/v0/users';
|
||||||
|
|
||||||
|
public async get(): Promise<User[]> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/`;
|
||||||
|
const response = await this.http.get(fullPath);
|
||||||
|
if (response.ok) {
|
||||||
|
return response.json().then((body) => body as User[]);
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async create(request: User[]): Promise<void> {
|
||||||
|
const fullPath = `${this.ROOT_PATH}/`;
|
||||||
|
const response = await this.http.post(fullPath, JSON.stringify(request));
|
||||||
|
if (response.ok) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const err = await response.json();
|
||||||
|
throw new APIError(err.error, response.status);
|
||||||
|
}
|
||||||
|
}
|
@ -7,26 +7,109 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
|
|
||||||
|
"storj.io/storj/private/api"
|
||||||
"storj.io/storj/private/apigen"
|
"storj.io/storj/private/apigen"
|
||||||
|
"storj.io/storj/private/apigen/example/myapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
a := &apigen.API{PackageName: "example"}
|
a := &apigen.API{
|
||||||
|
PackagePath: "storj.io/storj/private/apigen/example",
|
||||||
|
Version: "v0",
|
||||||
|
BasePath: "/api",
|
||||||
|
}
|
||||||
|
|
||||||
g := a.Group("TestAPI", "testapi")
|
g := a.Group("Documents", "docs")
|
||||||
|
g.Middleware = append(g.Middleware,
|
||||||
|
authMiddleware{},
|
||||||
|
)
|
||||||
|
|
||||||
|
now := time.Date(2001, 02, 03, 04, 05, 06, 07, time.UTC)
|
||||||
|
|
||||||
|
g.Get("/", &apigen.Endpoint{
|
||||||
|
Name: "Get Documents",
|
||||||
|
Description: "Get the paths to all the documents under the specified paths",
|
||||||
|
GoName: "Get",
|
||||||
|
TypeScriptName: "get",
|
||||||
|
Response: []myapi.Document{},
|
||||||
|
ResponseMock: []myapi.Document{{
|
||||||
|
ID: uuid.UUID{},
|
||||||
|
PathParam: "/workspace/notes.md",
|
||||||
|
Metadata: myapi.Metadata{
|
||||||
|
Owner: "Storj",
|
||||||
|
Tags: [][2]string{{"category", "general"}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Settings: map[any]any{
|
||||||
|
NoAPIKey: true,
|
||||||
|
NoCookie: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
g.Get("/{path}", &apigen.Endpoint{
|
||||||
|
Name: "Get One",
|
||||||
|
Description: "Get the document in the specified path",
|
||||||
|
GoName: "GetOne",
|
||||||
|
TypeScriptName: "getOne",
|
||||||
|
Response: myapi.Document{},
|
||||||
|
PathParams: []apigen.Param{
|
||||||
|
apigen.NewParam("path", ""),
|
||||||
|
},
|
||||||
|
ResponseMock: myapi.Document{
|
||||||
|
ID: uuid.UUID{},
|
||||||
|
Date: now.Add(-24 * time.Hour),
|
||||||
|
PathParam: "ID",
|
||||||
|
Body: "## Notes",
|
||||||
|
Version: myapi.Version{
|
||||||
|
Date: now.Add(-30 * time.Minute),
|
||||||
|
Number: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
g.Get("/{path}/tag/{tagName}", &apigen.Endpoint{
|
||||||
|
Name: "Get a tag",
|
||||||
|
Description: "Get the tag of the document in the specified path and tag label ",
|
||||||
|
GoName: "GetTag",
|
||||||
|
TypeScriptName: "getTag",
|
||||||
|
Response: [2]string{},
|
||||||
|
PathParams: []apigen.Param{
|
||||||
|
apigen.NewParam("path", ""),
|
||||||
|
apigen.NewParam("tagName", ""),
|
||||||
|
},
|
||||||
|
ResponseMock: [2]string{"category", "notes"},
|
||||||
|
})
|
||||||
|
|
||||||
|
g.Get("/{path}/versions", &apigen.Endpoint{
|
||||||
|
Name: "Get Version",
|
||||||
|
Description: "Get all the version of the document in the specified path",
|
||||||
|
GoName: "GetVersions",
|
||||||
|
TypeScriptName: "getVersions",
|
||||||
|
Response: []myapi.Version{},
|
||||||
|
PathParams: []apigen.Param{
|
||||||
|
apigen.NewParam("path", ""),
|
||||||
|
},
|
||||||
|
ResponseMock: []myapi.Version{
|
||||||
|
{Date: now.Add(-360 * time.Hour), Number: 1},
|
||||||
|
{Date: now.Add(-5 * time.Hour), Number: 2},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
g.Post("/{path}", &apigen.Endpoint{
|
g.Post("/{path}", &apigen.Endpoint{
|
||||||
MethodName: "GenTestAPI",
|
Name: "Update Content",
|
||||||
Response: struct {
|
Description: "Update the content of the document with the specified path and ID if the last update is before the indicated date",
|
||||||
ID uuid.UUID
|
GoName: "UpdateContent",
|
||||||
Date time.Time
|
TypeScriptName: "updateContent",
|
||||||
PathParam string
|
Response: myapi.Document{},
|
||||||
Body string
|
Request: myapi.NewDocument{},
|
||||||
}{},
|
|
||||||
Request: struct{ Content string }{},
|
|
||||||
QueryParams: []apigen.Param{
|
QueryParams: []apigen.Param{
|
||||||
apigen.NewParam("id", uuid.UUID{}),
|
apigen.NewParam("id", uuid.UUID{}),
|
||||||
apigen.NewParam("date", time.Time{}),
|
apigen.NewParam("date", time.Time{}),
|
||||||
@ -34,7 +117,79 @@ func main() {
|
|||||||
PathParams: []apigen.Param{
|
PathParams: []apigen.Param{
|
||||||
apigen.NewParam("path", ""),
|
apigen.NewParam("path", ""),
|
||||||
},
|
},
|
||||||
|
ResponseMock: myapi.Document{
|
||||||
|
ID: uuid.UUID{},
|
||||||
|
Date: now,
|
||||||
|
PathParam: "ID",
|
||||||
|
Body: "## Notes\n### General",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
g = a.Group("Users", "users")
|
||||||
|
|
||||||
|
g.Get("/", &apigen.Endpoint{
|
||||||
|
Name: "Get Users",
|
||||||
|
Description: "Get the list of registered users",
|
||||||
|
GoName: "Get",
|
||||||
|
TypeScriptName: "get",
|
||||||
|
Response: []myapi.User{},
|
||||||
|
ResponseMock: []myapi.User{
|
||||||
|
{Name: "Storj", Surname: "Labs", Email: "storj@storj.test"},
|
||||||
|
{Name: "Test1", Surname: "Testing", Email: "test1@example.test"},
|
||||||
|
{Name: "Test2", Surname: "Testing", Email: "test2@example.test"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
g.Post("/", &apigen.Endpoint{
|
||||||
|
Name: "Create User",
|
||||||
|
Description: "Create a user",
|
||||||
|
GoName: "Create",
|
||||||
|
TypeScriptName: "create",
|
||||||
|
Request: []myapi.User{},
|
||||||
})
|
})
|
||||||
|
|
||||||
a.MustWriteGo("api.gen.go")
|
a.MustWriteGo("api.gen.go")
|
||||||
|
a.MustWriteTS("client-api.gen.ts")
|
||||||
|
a.MustWriteTSMock("client-api-mock.gen.ts")
|
||||||
|
a.MustWriteDocs("apidocs.gen.md")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// authMiddleware customize endpoints to authenticate requests by API Key or Cookie.
|
||||||
|
type authMiddleware struct {
|
||||||
|
log *zap.Logger
|
||||||
|
auth api.Auth
|
||||||
|
_ http.ResponseWriter // Import the http package to use its HTTP status constants
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate satisfies the apigen.Middleware.
|
||||||
|
func (a authMiddleware) Generate(api *apigen.API, group *apigen.EndpointGroup, ep *apigen.FullEndpoint) string {
|
||||||
|
noapikey := apigen.LoadSetting(NoAPIKey, ep, false)
|
||||||
|
nocookie := apigen.LoadSetting(NoCookie, ep, false)
|
||||||
|
|
||||||
|
if noapikey && nocookie {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(`ctx, err = h.auth.IsAuthenticated(ctx, r, %t, %t)
|
||||||
|
if err != nil {
|
||||||
|
h.auth.RemoveAuthCookie(w)
|
||||||
|
api.ServeError(h.log, w, http.StatusUnauthorized, err)
|
||||||
|
return
|
||||||
|
}`, !nocookie, !noapikey)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ apigen.Middleware = authMiddleware{}
|
||||||
|
|
||||||
|
type (
|
||||||
|
tagNoAPIKey struct{}
|
||||||
|
tagNoCookie struct{}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NoAPIKey is the key for endpoint settings to indicate that it doesn't use API Key
|
||||||
|
// authentication mechanism.
|
||||||
|
NoAPIKey tagNoAPIKey
|
||||||
|
// NoCookie is the key for endpoint settings to indicate that it doesn't use cookie authentication
|
||||||
|
// mechanism.
|
||||||
|
NoCookie tagNoCookie
|
||||||
|
)
|
||||||
|
44
private/apigen/example/myapi/types.go
Normal file
44
private/apigen/example/myapi/types.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package myapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"storj.io/common/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Document is a retrieved document.
|
||||||
|
type Document struct {
|
||||||
|
ID uuid.UUID `json:"id"`
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
PathParam string `json:"pathParam"`
|
||||||
|
Body string `json:"body"`
|
||||||
|
Version Version `json:"version"`
|
||||||
|
Metadata Metadata `json:"metadata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version is document version.
|
||||||
|
type Version struct {
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
Number uint `json:"number"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata is metadata associated to a document.
|
||||||
|
type Metadata struct {
|
||||||
|
Owner string `json:"owner,omitempty"`
|
||||||
|
Tags [][2]string `json:"tags"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDocument contains the content the data to create a new document.
|
||||||
|
type NewDocument struct {
|
||||||
|
Content string `json:"content"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// User contains information of a user.
|
||||||
|
type User struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Surname string `json:"surname"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
}
|
@ -4,16 +4,16 @@
|
|||||||
package apigen
|
package apigen
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"go/format"
|
"go/format"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
"golang.org/x/text/cases"
|
|
||||||
"golang.org/x/text/language"
|
|
||||||
|
|
||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
)
|
)
|
||||||
@ -22,10 +22,11 @@ import (
|
|||||||
const DateFormat = "2006-01-02T15:04:05.999Z"
|
const DateFormat = "2006-01-02T15:04:05.999Z"
|
||||||
|
|
||||||
// MustWriteGo writes generated Go code into a file.
|
// MustWriteGo writes generated Go code into a file.
|
||||||
|
// If an error occurs, it panics.
|
||||||
func (a *API) MustWriteGo(path string) {
|
func (a *API) MustWriteGo(path string) {
|
||||||
generated, err := a.generateGo()
|
generated, err := a.generateGo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(errs.Wrap(err))
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.WriteFile(path, generated, 0644)
|
err = os.WriteFile(path, generated, 0644)
|
||||||
@ -39,32 +40,38 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
result := &StringBuilder{}
|
result := &StringBuilder{}
|
||||||
pf := result.Writelnf
|
pf := result.Writelnf
|
||||||
|
|
||||||
getPackageName := func(path string) string {
|
if a.PackagePath == "" {
|
||||||
pathPackages := strings.Split(path, "/")
|
return nil, errs.New("Package path must be defined")
|
||||||
return pathPackages[len(pathPackages)-1]
|
}
|
||||||
|
|
||||||
|
packageName := a.PackageName
|
||||||
|
if packageName == "" {
|
||||||
|
parts := strings.Split(a.PackagePath, "/")
|
||||||
|
packageName = parts[len(parts)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
imports := struct {
|
imports := struct {
|
||||||
All map[string]bool
|
All map[importPath]bool
|
||||||
Standard []string
|
Standard []importPath
|
||||||
External []string
|
External []importPath
|
||||||
Internal []string
|
Internal []importPath
|
||||||
}{
|
}{
|
||||||
All: make(map[string]bool),
|
All: make(map[importPath]bool),
|
||||||
}
|
}
|
||||||
|
|
||||||
i := func(paths ...string) {
|
i := func(paths ...string) {
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
if path == "" || getPackageName(path) == a.PackageName {
|
if path == "" || path == a.PackagePath {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := imports.All[path]; ok {
|
ipath := importPath(path)
|
||||||
|
if _, ok := imports.All[ipath]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
imports.All[path] = true
|
imports.All[ipath] = true
|
||||||
|
|
||||||
var slice *[]string
|
var slice *[]importPath
|
||||||
switch {
|
switch {
|
||||||
case !strings.Contains(path, "."):
|
case !strings.Contains(path, "."):
|
||||||
slice = &imports.Standard
|
slice = &imports.Standard
|
||||||
@ -73,7 +80,7 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
default:
|
default:
|
||||||
slice = &imports.External
|
slice = &imports.External
|
||||||
}
|
}
|
||||||
*slice = append(*slice, path)
|
*slice = append(*slice, ipath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,15 +107,25 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
|
|
||||||
for _, group := range a.EndpointGroups {
|
for _, group := range a.EndpointGroups {
|
||||||
i("github.com/zeebo/errs")
|
i("github.com/zeebo/errs")
|
||||||
pf("var Err%sAPI = errs.Class(\"%s %s api\")", cases.Title(language.Und).String(group.Prefix), a.PackageName, group.Prefix)
|
pf(
|
||||||
|
"var Err%sAPI = errs.Class(\"%s %s api\")",
|
||||||
|
capitalize(group.Prefix),
|
||||||
|
packageName,
|
||||||
|
strings.ToLower(group.Prefix),
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, m := range group.Middleware {
|
||||||
|
i(middlewareImports(m)...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pf("")
|
pf("")
|
||||||
|
|
||||||
params := make(map[*fullEndpoint][]Param)
|
params := make(map[*FullEndpoint][]Param)
|
||||||
|
|
||||||
for _, group := range a.EndpointGroups {
|
for _, group := range a.EndpointGroups {
|
||||||
pf("type %sService interface {", group.Name)
|
// Define the service interface
|
||||||
|
pf("type %sService interface {", capitalize(group.Name))
|
||||||
for _, e := range group.endpoints {
|
for _, e := range group.endpoints {
|
||||||
params[e] = append(e.PathParams, e.QueryParams...)
|
params[e] = append(e.PathParams, e.QueryParams...)
|
||||||
|
|
||||||
@ -131,9 +148,9 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
if !isNillableType(responseType) {
|
if !isNillableType(responseType) {
|
||||||
returnParam = "*" + returnParam
|
returnParam = "*" + returnParam
|
||||||
}
|
}
|
||||||
pf("%s(ctx context.Context, "+paramStr+") (%s, api.HTTPError)", e.MethodName, returnParam)
|
pf("%s(ctx context.Context, "+paramStr+") (%s, api.HTTPError)", e.GoName, returnParam)
|
||||||
} else {
|
} else {
|
||||||
pf("%s(ctx context.Context, "+paramStr+") (api.HTTPError)", e.MethodName)
|
pf("%s(ctx context.Context, "+paramStr+") (api.HTTPError)", e.GoName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pf("}")
|
pf("}")
|
||||||
@ -141,36 +158,104 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, group := range a.EndpointGroups {
|
for _, group := range a.EndpointGroups {
|
||||||
|
cname := capitalize(group.Name)
|
||||||
i("go.uber.org/zap", "github.com/spacemonkeygo/monkit/v3")
|
i("go.uber.org/zap", "github.com/spacemonkeygo/monkit/v3")
|
||||||
pf("// %sHandler is an api handler that exposes all %s related functionality.", group.Name, group.Prefix)
|
pf(
|
||||||
pf("type %sHandler struct {", group.Name)
|
"// %sHandler is an api handler that implements all %s API endpoints functionality.",
|
||||||
|
cname,
|
||||||
|
group.Name,
|
||||||
|
)
|
||||||
|
pf("type %sHandler struct {", cname)
|
||||||
pf("log *zap.Logger")
|
pf("log *zap.Logger")
|
||||||
pf("mon *monkit.Scope")
|
pf("mon *monkit.Scope")
|
||||||
pf("service %sService", group.Name)
|
pf("service %sService", cname)
|
||||||
pf("auth api.Auth")
|
|
||||||
|
autodefinedFields := map[string]string{"log": "*zap.Logger", "mon": "*monkit.Scope", "service": cname + "Service"}
|
||||||
|
for _, m := range group.Middleware {
|
||||||
|
for _, f := range middlewareFields(a, m) {
|
||||||
|
if t, ok := autodefinedFields[f.Name]; ok {
|
||||||
|
if t != f.Type {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"middleware %q has a field with name %q and type %q which clashes with another defined field with the same name but with type %q",
|
||||||
|
reflect.TypeOf(m).Name(),
|
||||||
|
f.Name,
|
||||||
|
f.Type,
|
||||||
|
t,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
autodefinedFields[f.Name] = f.Type
|
||||||
|
pf("%s %s", f.Name, f.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pf("}")
|
pf("}")
|
||||||
pf("")
|
pf("")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, group := range a.EndpointGroups {
|
for _, group := range a.EndpointGroups {
|
||||||
|
cname := capitalize(group.Name)
|
||||||
i("github.com/gorilla/mux")
|
i("github.com/gorilla/mux")
|
||||||
pf(
|
|
||||||
"func New%s(log *zap.Logger, mon *monkit.Scope, service %sService, router *mux.Router, auth api.Auth) *%sHandler {",
|
autodedefined := map[string]struct{}{"log": {}, "mon": {}, "service": {}}
|
||||||
group.Name,
|
middlewareArgs := make([]string, 0, len(group.Middleware))
|
||||||
group.Name,
|
middlewareFieldsList := make([]string, 0, len(group.Middleware))
|
||||||
group.Name,
|
for _, m := range group.Middleware {
|
||||||
)
|
for _, f := range middlewareFields(a, m) {
|
||||||
pf("handler := &%sHandler{", group.Name)
|
if _, ok := autodedefined[f.Name]; !ok {
|
||||||
|
middlewareArgs = append(middlewareArgs, fmt.Sprintf("%s %s", f.Name, f.Type))
|
||||||
|
middlewareFieldsList = append(middlewareFieldsList, fmt.Sprintf("%[1]s: %[1]s", f.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(middlewareArgs) > 0 {
|
||||||
|
pf(
|
||||||
|
"func New%s(log *zap.Logger, mon *monkit.Scope, service %sService, router *mux.Router, %s) *%sHandler {",
|
||||||
|
cname,
|
||||||
|
cname,
|
||||||
|
strings.Join(middlewareArgs, ", "),
|
||||||
|
cname,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
pf(
|
||||||
|
"func New%s(log *zap.Logger, mon *monkit.Scope, service %sService, router *mux.Router) *%sHandler {",
|
||||||
|
cname,
|
||||||
|
cname,
|
||||||
|
cname,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pf("handler := &%sHandler{", cname)
|
||||||
pf("log: log,")
|
pf("log: log,")
|
||||||
pf("mon: mon,")
|
pf("mon: mon,")
|
||||||
pf("service: service,")
|
pf("service: service,")
|
||||||
pf("auth: auth,")
|
|
||||||
|
if len(middlewareFieldsList) > 0 {
|
||||||
|
pf(strings.Join(middlewareFieldsList, ",") + ",")
|
||||||
|
}
|
||||||
|
|
||||||
pf("}")
|
pf("}")
|
||||||
pf("")
|
pf("")
|
||||||
pf("%sRouter := router.PathPrefix(\"/api/v0/%s\").Subrouter()", group.Prefix, group.Prefix)
|
pf(
|
||||||
|
"%sRouter := router.PathPrefix(\"%s/%s\").Subrouter()",
|
||||||
|
uncapitalize(group.Prefix),
|
||||||
|
a.endpointBasePath(),
|
||||||
|
strings.ToLower(group.Prefix),
|
||||||
|
)
|
||||||
for _, endpoint := range group.endpoints {
|
for _, endpoint := range group.endpoints {
|
||||||
handlerName := "handle" + endpoint.MethodName
|
handlerName := "handle" + endpoint.GoName
|
||||||
pf("%sRouter.HandleFunc(\"%s\", handler.%s).Methods(\"%s\")", group.Prefix, endpoint.Path, handlerName, endpoint.Method)
|
pf(
|
||||||
|
"%sRouter.HandleFunc(\"%s\", handler.%s).Methods(\"%s\")",
|
||||||
|
uncapitalize(group.Prefix),
|
||||||
|
endpoint.Path,
|
||||||
|
handlerName,
|
||||||
|
endpoint.Method,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
pf("")
|
pf("")
|
||||||
pf("return handler")
|
pf("return handler")
|
||||||
@ -182,13 +267,12 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
for _, endpoint := range group.endpoints {
|
for _, endpoint := range group.endpoints {
|
||||||
i("net/http")
|
i("net/http")
|
||||||
pf("")
|
pf("")
|
||||||
handlerName := "handle" + endpoint.MethodName
|
handlerName := "handle" + endpoint.GoName
|
||||||
pf("func (h *%sHandler) %s(w http.ResponseWriter, r *http.Request) {", group.Name, handlerName)
|
pf("func (h *%sHandler) %s(w http.ResponseWriter, r *http.Request) {", capitalize(group.Name), handlerName)
|
||||||
pf("ctx := r.Context()")
|
pf("ctx := r.Context()")
|
||||||
pf("var err error")
|
pf("var err error")
|
||||||
pf("defer h.mon.Task()(&ctx)(&err)")
|
pf("defer h.mon.Task()(&ctx)(&err)")
|
||||||
pf("")
|
pf("")
|
||||||
|
|
||||||
pf("w.Header().Set(\"Content-Type\", \"application/json\")")
|
pf("w.Header().Set(\"Content-Type\", \"application/json\")")
|
||||||
pf("")
|
pf("")
|
||||||
|
|
||||||
@ -200,17 +284,10 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
handleBody(pf, endpoint.Request)
|
handleBody(pf, endpoint.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !endpoint.NoCookieAuth || !endpoint.NoAPIAuth {
|
for _, m := range group.Middleware {
|
||||||
pf("ctx, err = h.auth.IsAuthenticated(ctx, r, %v, %v)", !endpoint.NoCookieAuth, !endpoint.NoAPIAuth)
|
pf(m.Generate(a, group, endpoint))
|
||||||
pf("if err != nil {")
|
|
||||||
if !endpoint.NoCookieAuth {
|
|
||||||
pf("h.auth.RemoveAuthCookie(w)")
|
|
||||||
}
|
|
||||||
pf("api.ServeError(h.log, w, http.StatusUnauthorized, err)")
|
|
||||||
pf("return")
|
|
||||||
pf("}")
|
|
||||||
pf("")
|
|
||||||
}
|
}
|
||||||
|
pf("")
|
||||||
|
|
||||||
var methodFormat string
|
var methodFormat string
|
||||||
if endpoint.Response != nil {
|
if endpoint.Response != nil {
|
||||||
@ -227,7 +304,7 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
methodFormat += ")"
|
methodFormat += ")"
|
||||||
pf(methodFormat, endpoint.MethodName)
|
pf(methodFormat, endpoint.GoName)
|
||||||
pf("if httpErr.Err != nil {")
|
pf("if httpErr.Err != nil {")
|
||||||
pf("api.ServeError(h.log, w, httpErr.Status, httpErr.Err)")
|
pf("api.ServeError(h.log, w, httpErr.Status, httpErr.Err)")
|
||||||
if endpoint.Response == nil {
|
if endpoint.Response == nil {
|
||||||
@ -242,7 +319,11 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
pf("")
|
pf("")
|
||||||
pf("err = json.NewEncoder(w).Encode(retVal)")
|
pf("err = json.NewEncoder(w).Encode(retVal)")
|
||||||
pf("if err != nil {")
|
pf("if err != nil {")
|
||||||
pf("h.log.Debug(\"failed to write json %s response\", zap.Error(Err%sAPI.Wrap(err)))", endpoint.MethodName, cases.Title(language.Und).String(group.Prefix))
|
pf(
|
||||||
|
"h.log.Debug(\"failed to write json %s response\", zap.Error(Err%sAPI.Wrap(err)))",
|
||||||
|
endpoint.GoName,
|
||||||
|
capitalize(group.Prefix),
|
||||||
|
)
|
||||||
pf("}")
|
pf("}")
|
||||||
pf("}")
|
pf("}")
|
||||||
}
|
}
|
||||||
@ -256,16 +337,21 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
pf("// DO NOT EDIT.")
|
pf("// DO NOT EDIT.")
|
||||||
pf("")
|
pf("")
|
||||||
|
|
||||||
pf("package %s", a.PackageName)
|
pf("package %s", packageName)
|
||||||
pf("")
|
pf("")
|
||||||
|
|
||||||
pf("import (")
|
pf("import (")
|
||||||
slices := [][]string{imports.Standard, imports.External, imports.Internal}
|
all := [][]importPath{imports.Standard, imports.External, imports.Internal}
|
||||||
for sn, slice := range slices {
|
for sn, slice := range all {
|
||||||
sort.Strings(slice)
|
slices.Sort(slice)
|
||||||
for pn, path := range slice {
|
for pn, path := range slice {
|
||||||
pf(`"%s"`, path)
|
if r, ok := path.PkgName(); ok {
|
||||||
if pn == len(slice)-1 && sn < len(slices)-1 {
|
pf(`%s "%s"`, r, path)
|
||||||
|
} else {
|
||||||
|
pf(`"%s"`, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pn == len(slice)-1 && sn < len(all)-1 {
|
||||||
pf("")
|
pf("")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,7 +368,7 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
|
|
||||||
output, err := format.Source([]byte(result.String()))
|
output, err := format.Source([]byte(result.String()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return output, nil
|
return output, nil
|
||||||
@ -292,8 +378,17 @@ func (a *API) generateGo() ([]byte, error) {
|
|||||||
// If type is from the same package then we use only type's name.
|
// If type is from the same package then we use only type's name.
|
||||||
// If type is from external package then we use type along with its appropriate package name.
|
// If type is from external package then we use type along with its appropriate package name.
|
||||||
func (a *API) handleTypesPackage(t reflect.Type) string {
|
func (a *API) handleTypesPackage(t reflect.Type) string {
|
||||||
if strings.HasPrefix(t.String(), a.PackageName) {
|
switch t.Kind() {
|
||||||
return t.Elem().Name()
|
case reflect.Array:
|
||||||
|
return fmt.Sprintf("[%d]%s", t.Len(), a.handleTypesPackage(t.Elem()))
|
||||||
|
case reflect.Slice:
|
||||||
|
return "[]" + a.handleTypesPackage(t.Elem())
|
||||||
|
case reflect.Pointer:
|
||||||
|
return "*" + a.handleTypesPackage(t.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.PkgPath() == a.PackagePath {
|
||||||
|
return t.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.String()
|
return t.String()
|
||||||
@ -381,3 +476,20 @@ func handleBody(pf func(format string, a ...interface{}), body interface{}) {
|
|||||||
pf("}")
|
pf("}")
|
||||||
pf("")
|
pf("")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type importPath string
|
||||||
|
|
||||||
|
// PkgName returns the name of the package based of the last part of the import
|
||||||
|
// path and false if the name isn't a rename, otherwise it returns true.
|
||||||
|
//
|
||||||
|
// The package name is renamed when the last part of the path contains hyphen
|
||||||
|
// (-) or dot (.) and the rename is this part with the hyphens and dots
|
||||||
|
// stripped.
|
||||||
|
func (i importPath) PkgName() (rename string, ok bool) {
|
||||||
|
b := filepath.Base(string(i))
|
||||||
|
if strings.Contains(b, "-") || strings.Contains(b, ".") {
|
||||||
|
return strings.ReplaceAll(strings.ReplaceAll(b, "-", ""), ".", ""), true
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, false
|
||||||
|
}
|
||||||
|
@ -25,17 +25,12 @@ import (
|
|||||||
"storj.io/storj/private/api"
|
"storj.io/storj/private/api"
|
||||||
"storj.io/storj/private/apigen"
|
"storj.io/storj/private/apigen"
|
||||||
"storj.io/storj/private/apigen/example"
|
"storj.io/storj/private/apigen/example"
|
||||||
|
"storj.io/storj/private/apigen/example/myapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
auth struct{}
|
auth struct{}
|
||||||
service struct{}
|
service struct{}
|
||||||
response = struct {
|
|
||||||
ID uuid.UUID
|
|
||||||
Date time.Time
|
|
||||||
PathParam string
|
|
||||||
Body string
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (a auth) IsAuthenticated(ctx context.Context, r *http.Request, isCookieAuth, isKeyAuth bool) (context.Context, error) {
|
func (a auth) IsAuthenticated(ctx context.Context, r *http.Request, isCookieAuth, isKeyAuth bool) (context.Context, error) {
|
||||||
@ -44,8 +39,42 @@ func (a auth) IsAuthenticated(ctx context.Context, r *http.Request, isCookieAuth
|
|||||||
|
|
||||||
func (a auth) RemoveAuthCookie(w http.ResponseWriter) {}
|
func (a auth) RemoveAuthCookie(w http.ResponseWriter) {}
|
||||||
|
|
||||||
func (s service) GenTestAPI(ctx context.Context, pathParam string, id uuid.UUID, date time.Time, body struct{ Content string }) (*response, api.HTTPError) {
|
func (s service) Get(
|
||||||
return &response{
|
ctx context.Context,
|
||||||
|
) ([]myapi.Document, api.HTTPError) {
|
||||||
|
return []myapi.Document{}, api.HTTPError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s service) GetOne(
|
||||||
|
ctx context.Context,
|
||||||
|
pathParam string,
|
||||||
|
) (*myapi.Document, api.HTTPError) {
|
||||||
|
return &myapi.Document{}, api.HTTPError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s service) GetTag(
|
||||||
|
ctx context.Context,
|
||||||
|
pathParam string,
|
||||||
|
tagName string,
|
||||||
|
) (*[2]string, api.HTTPError) {
|
||||||
|
return &[2]string{}, api.HTTPError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s service) GetVersions(
|
||||||
|
ctx context.Context,
|
||||||
|
pathParam string,
|
||||||
|
) ([]myapi.Version, api.HTTPError) {
|
||||||
|
return []myapi.Version{}, api.HTTPError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s service) UpdateContent(
|
||||||
|
ctx context.Context,
|
||||||
|
pathParam string,
|
||||||
|
id uuid.UUID,
|
||||||
|
date time.Time,
|
||||||
|
body myapi.NewDocument,
|
||||||
|
) (*myapi.Document, api.HTTPError) {
|
||||||
|
return &myapi.Document{
|
||||||
ID: id,
|
ID: id,
|
||||||
Date: date,
|
Date: date,
|
||||||
PathParam: pathParam,
|
PathParam: pathParam,
|
||||||
@ -53,7 +82,9 @@ func (s service) GenTestAPI(ctx context.Context, pathParam string, id uuid.UUID,
|
|||||||
}, api.HTTPError{}
|
}, api.HTTPError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func send(ctx context.Context, method string, url string, body interface{}) ([]byte, error) {
|
func send(ctx context.Context, t *testing.T, method string, url string, body interface{}) ([]byte, error) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
var bodyReader io.Reader = http.NoBody
|
var bodyReader io.Reader = http.NoBody
|
||||||
if body != nil {
|
if body != nil {
|
||||||
bodyJSON, err := json.Marshal(body)
|
bodyJSON, err := json.Marshal(body)
|
||||||
@ -73,6 +104,10 @@ func send(ctx context.Context, method string, url string, body interface{}) ([]b
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c := resp.StatusCode; c != http.StatusOK {
|
||||||
|
t.Fatalf("unexpected status code. Want=%d, Got=%d", http.StatusOK, c)
|
||||||
|
}
|
||||||
|
|
||||||
respBody, err := io.ReadAll(resp.Body)
|
respBody, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -90,7 +125,7 @@ func TestAPIServer(t *testing.T) {
|
|||||||
defer ctx.Cleanup()
|
defer ctx.Cleanup()
|
||||||
|
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
example.NewTestAPI(zaptest.NewLogger(t), monkit.Package(), service{}, router, auth{})
|
example.NewDocuments(zaptest.NewLogger(t), monkit.Package(), service{}, router, auth{})
|
||||||
|
|
||||||
server := httptest.NewServer(router)
|
server := httptest.NewServer(router)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
@ -98,15 +133,15 @@ func TestAPIServer(t *testing.T) {
|
|||||||
id, err := uuid.New()
|
id, err := uuid.New()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expected := response{
|
expected := myapi.Document{
|
||||||
ID: id,
|
ID: id,
|
||||||
Date: time.Now(),
|
Date: time.Now(),
|
||||||
PathParam: "foo",
|
PathParam: "foo",
|
||||||
Body: "bar",
|
Body: "bar",
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := send(ctx, http.MethodPost,
|
resp, err := send(ctx, t, http.MethodPost,
|
||||||
fmt.Sprintf("%s/api/v0/testapi/%s?id=%s&date=%s",
|
fmt.Sprintf("%s/api/v0/docs/%s?id=%s&date=%s",
|
||||||
server.URL,
|
server.URL,
|
||||||
expected.PathParam,
|
expected.PathParam,
|
||||||
url.QueryEscape(expected.ID.String()),
|
url.QueryEscape(expected.ID.String()),
|
||||||
@ -115,13 +150,16 @@ func TestAPIServer(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var actual map[string]string
|
fmt.Println(string(resp))
|
||||||
|
|
||||||
|
var actual map[string]any
|
||||||
require.NoError(t, json.Unmarshal(resp, &actual))
|
require.NoError(t, json.Unmarshal(resp, &actual))
|
||||||
|
|
||||||
for _, key := range []string{"ID", "Date", "PathParam", "Body"} {
|
for _, key := range []string{"id", "date", "pathParam", "body"} {
|
||||||
require.Contains(t, actual, key)
|
require.Contains(t, actual, key)
|
||||||
}
|
}
|
||||||
require.Equal(t, expected.ID.String(), actual["ID"])
|
require.Equal(t, expected.ID.String(), actual["id"].(string))
|
||||||
require.Equal(t, expected.Date.Format(apigen.DateFormat), actual["Date"])
|
require.Equal(t, expected.Date.Format(apigen.DateFormat), actual["date"].(string))
|
||||||
require.Equal(t, expected.Body, actual["Body"])
|
require.Equal(t, expected.PathParam, actual["pathParam"].(string))
|
||||||
|
require.Equal(t, expected.Body, actual["body"].(string))
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,10 @@ import (
|
|||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MustWriteTS writes generated TypeScript code into a file.
|
// MustWriteTS writes generated TypeScript code into a file indicated by path.
|
||||||
|
// The generated code is an API client to run in the browser.
|
||||||
|
//
|
||||||
|
// If an error occurs, it panics.
|
||||||
func (a *API) MustWriteTS(path string) {
|
func (a *API) MustWriteTS(path string) {
|
||||||
f := newTSGenFile(path, a)
|
f := newTSGenFile(path, a)
|
||||||
|
|
||||||
@ -57,8 +60,18 @@ func (f *tsGenFile) generateTS() {
|
|||||||
f.registerTypes()
|
f.registerTypes()
|
||||||
f.result += f.types.GenerateTypescriptDefinitions()
|
f.result += f.types.GenerateTypescriptDefinitions()
|
||||||
|
|
||||||
|
f.result += `
|
||||||
|
class APIError extends Error {
|
||||||
|
constructor(
|
||||||
|
public readonly msg: string,
|
||||||
|
public readonly responseStatusCode?: number,
|
||||||
|
) {
|
||||||
|
super(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
for _, group := range f.api.EndpointGroups {
|
for _, group := range f.api.EndpointGroups {
|
||||||
// Not sure if this is a good name
|
|
||||||
f.createAPIClient(group)
|
f.createAPIClient(group)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -83,45 +96,50 @@ func (f *tsGenFile) registerTypes() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *tsGenFile) createAPIClient(group *EndpointGroup) {
|
func (f *tsGenFile) createAPIClient(group *EndpointGroup) {
|
||||||
f.pf("\nexport class %sHttpApi%s {", group.Prefix, strings.ToUpper(f.api.Version))
|
f.pf("\nexport class %sHttpApi%s {", capitalize(group.Name), strings.ToUpper(f.api.Version))
|
||||||
f.pf("\tprivate readonly http: HttpClient = new HttpClient();")
|
f.pf("\tprivate readonly http: HttpClient = new HttpClient();")
|
||||||
f.pf("\tprivate readonly ROOT_PATH: string = '/api/%s/%s';", f.api.Version, group.Prefix)
|
f.pf("\tprivate readonly ROOT_PATH: string = '%s/%s';", f.api.endpointBasePath(), strings.ToLower(group.Prefix))
|
||||||
for _, method := range group.endpoints {
|
for _, method := range group.endpoints {
|
||||||
f.pf("")
|
f.pf("")
|
||||||
|
|
||||||
funcArgs, path := f.getArgsAndPath(method)
|
funcArgs, path := f.getArgsAndPath(method, group)
|
||||||
|
|
||||||
returnStmt := "return"
|
returnStmt := "return"
|
||||||
returnType := "void"
|
returnType := "void"
|
||||||
if method.Response != nil {
|
if method.Response != nil {
|
||||||
returnType = TypescriptTypeName(getElementaryType(reflect.TypeOf(method.Response)))
|
returnType = TypescriptTypeName(reflect.TypeOf(method.Response))
|
||||||
if v := reflect.ValueOf(method.Response); v.Kind() == reflect.Array || v.Kind() == reflect.Slice {
|
|
||||||
returnType = fmt.Sprintf("Array<%s>", returnType)
|
|
||||||
}
|
|
||||||
returnStmt += fmt.Sprintf(" response.json().then((body) => body as %s)", returnType)
|
returnStmt += fmt.Sprintf(" response.json().then((body) => body as %s)", returnType)
|
||||||
}
|
}
|
||||||
returnStmt += ";"
|
returnStmt += ";"
|
||||||
|
|
||||||
f.pf("\tpublic async %s(%s): Promise<%s> {", method.RequestName, funcArgs, returnType)
|
f.pf("\tpublic async %s(%s): Promise<%s> {", method.TypeScriptName, funcArgs, returnType)
|
||||||
f.pf("\t\tconst path = `%s`;", path)
|
if len(method.QueryParams) > 0 {
|
||||||
|
f.pf("\t\tconst u = new URL(`%s`, window.location.href);", path)
|
||||||
|
for _, p := range method.QueryParams {
|
||||||
|
f.pf("\t\tu.searchParams.set('%s', %s);", p.Name, p.Name)
|
||||||
|
}
|
||||||
|
f.pf("\t\tconst fullPath = u.toString();")
|
||||||
|
} else {
|
||||||
|
f.pf("\t\tconst fullPath = `%s`;", path)
|
||||||
|
}
|
||||||
|
|
||||||
if method.Request != nil {
|
if method.Request != nil {
|
||||||
f.pf("\t\tconst response = await this.http.%s(path, JSON.stringify(request));", strings.ToLower(method.Method))
|
f.pf("\t\tconst response = await this.http.%s(fullPath, JSON.stringify(request));", strings.ToLower(method.Method))
|
||||||
} else {
|
} else {
|
||||||
f.pf("\t\tconst response = await this.http.%s(path);", strings.ToLower(method.Method))
|
f.pf("\t\tconst response = await this.http.%s(fullPath);", strings.ToLower(method.Method))
|
||||||
}
|
}
|
||||||
|
|
||||||
f.pf("\t\tif (response.ok) {")
|
f.pf("\t\tif (response.ok) {")
|
||||||
f.pf("\t\t\t%s", returnStmt)
|
f.pf("\t\t\t%s", returnStmt)
|
||||||
f.pf("\t\t}")
|
f.pf("\t\t}")
|
||||||
f.pf("\t\tconst err = await response.json();")
|
f.pf("\t\tconst err = await response.json();")
|
||||||
f.pf("\t\tthrow new Error(err.error);")
|
f.pf("\t\tthrow new APIError(err.error, response.status);")
|
||||||
f.pf("\t}")
|
f.pf("\t}")
|
||||||
}
|
}
|
||||||
f.pf("}")
|
f.pf("}")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *tsGenFile) getArgsAndPath(method *fullEndpoint) (funcArgs, path string) {
|
func (f *tsGenFile) getArgsAndPath(method *FullEndpoint, group *EndpointGroup) (funcArgs, path string) {
|
||||||
// remove path parameter placeholders
|
// remove path parameter placeholders
|
||||||
path = method.Path
|
path = method.Path
|
||||||
i := strings.Index(path, "{")
|
i := strings.Index(path, "{")
|
||||||
@ -131,8 +149,7 @@ func (f *tsGenFile) getArgsAndPath(method *fullEndpoint) (funcArgs, path string)
|
|||||||
path = "${this.ROOT_PATH}" + path
|
path = "${this.ROOT_PATH}" + path
|
||||||
|
|
||||||
if method.Request != nil {
|
if method.Request != nil {
|
||||||
t := getElementaryType(reflect.TypeOf(method.Request))
|
funcArgs += fmt.Sprintf("request: %s, ", TypescriptTypeName(reflect.TypeOf(method.Request)))
|
||||||
funcArgs += fmt.Sprintf("request: %s, ", TypescriptTypeName(t))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, p := range method.PathParams {
|
for _, p := range method.PathParams {
|
||||||
@ -140,15 +157,8 @@ func (f *tsGenFile) getArgsAndPath(method *fullEndpoint) (funcArgs, path string)
|
|||||||
path += fmt.Sprintf("/${%s}", p.Name)
|
path += fmt.Sprintf("/${%s}", p.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, p := range method.QueryParams {
|
for _, p := range method.QueryParams {
|
||||||
if i == 0 {
|
|
||||||
path += "?"
|
|
||||||
} else {
|
|
||||||
path += "&"
|
|
||||||
}
|
|
||||||
|
|
||||||
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.Type))
|
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.Type))
|
||||||
path += fmt.Sprintf("%s=${%s}", p.Name, p.Name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
path = strings.ReplaceAll(path, "//", "/")
|
path = strings.ReplaceAll(path, "//", "/")
|
||||||
|
129
private/apigen/tsgenmock.go
Normal file
129
private/apigen/tsgenmock.go
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package apigen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MustWriteTSMock writes generated TypeScript code into a file indicated by path.
|
||||||
|
// The generated code is an API client mock to run in the browser.
|
||||||
|
//
|
||||||
|
// If an error occurs, it panics.
|
||||||
|
func (a *API) MustWriteTSMock(path string) {
|
||||||
|
f := newTSGenMockFile(path, a)
|
||||||
|
|
||||||
|
f.generateTS()
|
||||||
|
|
||||||
|
err := f.write()
|
||||||
|
if err != nil {
|
||||||
|
panic(errs.Wrap(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type tsGenMockFile struct {
|
||||||
|
*tsGenFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTSGenMockFile(filepath string, api *API) *tsGenMockFile {
|
||||||
|
return &tsGenMockFile{
|
||||||
|
tsGenFile: newTSGenFile(filepath, api),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *tsGenMockFile) generateTS() {
|
||||||
|
f.pf("// AUTOGENERATED BY private/apigen")
|
||||||
|
f.pf("// DO NOT EDIT.")
|
||||||
|
|
||||||
|
f.registerTypes()
|
||||||
|
f.result += f.types.GenerateTypescriptDefinitions()
|
||||||
|
|
||||||
|
f.result += `
|
||||||
|
class APIError extends Error {
|
||||||
|
constructor(
|
||||||
|
public readonly msg: string,
|
||||||
|
public readonly responseStatusCode?: number,
|
||||||
|
) {
|
||||||
|
super(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
for _, group := range f.api.EndpointGroups {
|
||||||
|
f.createAPIClient(group)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *tsGenMockFile) createAPIClient(group *EndpointGroup) {
|
||||||
|
f.pf("\nexport class %sHttpApi%s {", capitalize(group.Name), strings.ToUpper(f.api.Version))
|
||||||
|
// Properties.
|
||||||
|
f.pf("\tpublic readonly respStatusCode: number;")
|
||||||
|
f.pf("")
|
||||||
|
|
||||||
|
// Constructor
|
||||||
|
f.pf("\t// When respStatuscode is passed, the client throws an APIError on each method call")
|
||||||
|
f.pf("\t// with respStatusCode as HTTP status code.")
|
||||||
|
f.pf("\t// respStatuscode must be equal or greater than 400")
|
||||||
|
f.pf("\tconstructor(respStatusCode?: number) {")
|
||||||
|
f.pf("\t\tif (typeof respStatusCode === 'undefined') {")
|
||||||
|
f.pf("\t\t\tthis.respStatusCode = 0;")
|
||||||
|
f.pf("\t\t\treturn;")
|
||||||
|
f.pf("\t\t}")
|
||||||
|
f.pf("")
|
||||||
|
f.pf("\t\tif (respStatusCode < 400) {")
|
||||||
|
f.pf("\t\t\tthrow new Error('invalid response status code for API Error, it must be greater or equal than 400');")
|
||||||
|
f.pf("\t\t}")
|
||||||
|
f.pf("")
|
||||||
|
f.pf("\t\tthis.respStatusCode = respStatusCode;")
|
||||||
|
f.pf("\t}")
|
||||||
|
|
||||||
|
// Methods to call API endpoints.
|
||||||
|
for _, method := range group.endpoints {
|
||||||
|
f.pf("")
|
||||||
|
|
||||||
|
funcArgs, _ := f.getArgsAndPath(method, group)
|
||||||
|
|
||||||
|
returnType := "void"
|
||||||
|
if method.Response != nil {
|
||||||
|
if method.ResponseMock == nil {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"ResponseMock is nil and Response isn't nil. Endpoint.Method=%q, Endpoint.Path=%q",
|
||||||
|
method.Method, method.Path,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
returnType = TypescriptTypeName(reflect.TypeOf(method.Response))
|
||||||
|
}
|
||||||
|
|
||||||
|
f.pf("\tpublic async %s(%s): Promise<%s> {", method.TypeScriptName, funcArgs, returnType)
|
||||||
|
f.pf("\t\tif (this.respStatusCode !== 0) {")
|
||||||
|
f.pf("\t\t\tthrow new APIError('mock error message: ' + this.respStatusCode, this.respStatusCode);")
|
||||||
|
f.pf("\t\t}")
|
||||||
|
f.pf("")
|
||||||
|
|
||||||
|
if method.ResponseMock != nil {
|
||||||
|
res, err := json.Marshal(method.ResponseMock)
|
||||||
|
if err != nil {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error when marshaling ResponseMock: %+v. Endpoint.Method=%q, Endpoint.Path=%q",
|
||||||
|
err, method.Method, method.Path,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
f.pf("\t\treturn JSON.parse('%s') as %s;", string(res), returnType)
|
||||||
|
} else {
|
||||||
|
f.pf("\t\treturn;")
|
||||||
|
}
|
||||||
|
|
||||||
|
f.pf("\t}")
|
||||||
|
}
|
||||||
|
f.pf("}")
|
||||||
|
}
|
@ -36,41 +36,60 @@ type Types struct {
|
|||||||
|
|
||||||
// Register registers a type for generation.
|
// Register registers a type for generation.
|
||||||
func (types *Types) Register(t reflect.Type) {
|
func (types *Types) Register(t reflect.Type) {
|
||||||
|
if t.Name() == "" {
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Ptr:
|
||||||
|
if t.Elem().Name() == "" {
|
||||||
|
panic(
|
||||||
|
fmt.Sprintf("register an %q of elements of an anonymous type is not supported", t.Name()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("register an anonymous type is not supported. All the types must have a name")
|
||||||
|
}
|
||||||
|
}
|
||||||
types.top[t] = struct{}{}
|
types.top[t] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All returns a slice containing every top-level type and their dependencies.
|
// All returns a map containing every top-level and their dependency types with their associated name.
|
||||||
func (types *Types) All() []reflect.Type {
|
func (types *Types) All() map[reflect.Type]string {
|
||||||
seen := map[reflect.Type]struct{}{}
|
all := map[reflect.Type]string{}
|
||||||
all := []reflect.Type{}
|
|
||||||
|
|
||||||
var walk func(t reflect.Type)
|
var walk func(t reflect.Type)
|
||||||
walk = func(t reflect.Type) {
|
walk = func(t reflect.Type) {
|
||||||
if _, ok := seen[t]; ok {
|
if _, ok := all[t]; ok {
|
||||||
return
|
|
||||||
}
|
|
||||||
seen[t] = struct{}{}
|
|
||||||
all = append(all, t)
|
|
||||||
|
|
||||||
if _, ok := commonClasses[t]; ok {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t.Kind() {
|
if n, ok := commonClasses[t]; ok {
|
||||||
case reflect.Array, reflect.Ptr, reflect.Slice:
|
all[t] = n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k := t.Kind(); k {
|
||||||
|
case reflect.Ptr:
|
||||||
|
walk(t.Elem())
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
walk(t.Elem())
|
walk(t.Elem())
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
for i := 0; i < t.NumField(); i++ {
|
if t.Name() == "" {
|
||||||
walk(t.Field(i).Type)
|
panic(fmt.Sprintf("BUG: found an anonymous 'struct'. Found type=%q", t))
|
||||||
}
|
}
|
||||||
case reflect.Bool:
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
all[t] = t.Name()
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
case reflect.Float32, reflect.Float64:
|
for i := 0; i < t.NumField(); i++ {
|
||||||
case reflect.String:
|
field := t.Field(i)
|
||||||
break
|
walk(field.Type)
|
||||||
|
}
|
||||||
|
case reflect.Bool,
|
||||||
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||||
|
reflect.Float32, reflect.Float64,
|
||||||
|
reflect.String:
|
||||||
|
all[t] = t.Name()
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("type '%s' is not supported", t.Kind().String()))
|
panic(fmt.Sprintf("type %q is not supported", t.Kind().String()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,10 +97,6 @@ func (types *Types) All() []reflect.Type {
|
|||||||
walk(t)
|
walk(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(all, func(i, j int) bool {
|
|
||||||
return strings.Compare(all[i].Name(), all[j].Name()) < 0
|
|
||||||
})
|
|
||||||
|
|
||||||
return all
|
return all
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,40 +105,44 @@ func (types *Types) GenerateTypescriptDefinitions() string {
|
|||||||
var out StringBuilder
|
var out StringBuilder
|
||||||
pf := out.Writelnf
|
pf := out.Writelnf
|
||||||
|
|
||||||
pf(types.getTypescriptImports())
|
{
|
||||||
|
i := types.getTypescriptImports()
|
||||||
|
if i != "" {
|
||||||
|
pf(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
all := filter(types.All(), func(t reflect.Type) bool {
|
allTypes := types.All()
|
||||||
if _, ok := commonClasses[t]; ok {
|
namedTypes := mapToSlice(allTypes)
|
||||||
|
allStructs := filter(namedTypes, func(tn typeAndName) bool {
|
||||||
|
if _, ok := commonClasses[tn.Type]; ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return t.Kind() == reflect.Struct
|
|
||||||
|
return tn.Type.Kind() == reflect.Struct
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, t := range all {
|
for _, t := range allStructs {
|
||||||
func() {
|
func() {
|
||||||
pf("\nexport class %s {", t.Name())
|
name := capitalize(t.Name)
|
||||||
|
pf("\nexport class %s {", name)
|
||||||
defer pf("}")
|
defer pf("}")
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.Type.NumField(); i++ {
|
||||||
field := t.Field(i)
|
field := t.Type.Field(i)
|
||||||
attributes := strings.Fields(field.Tag.Get("json"))
|
jsonInfo := parseJSONTag(t.Type, field)
|
||||||
if len(attributes) == 0 || attributes[0] == "" {
|
if jsonInfo.Skip {
|
||||||
pathParts := strings.Split(t.PkgPath(), "/")
|
|
||||||
pkg := pathParts[len(pathParts)-1]
|
|
||||||
panic(fmt.Sprintf("(%s.%s).%s missing json declaration", pkg, t.Name(), field.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonField := attributes[0]
|
|
||||||
if jsonField == "-" {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
isOptional := ""
|
var isOptional, isNullable string
|
||||||
if isNillableType(t) {
|
if jsonInfo.OmitEmpty {
|
||||||
isOptional = "?"
|
isOptional = "?"
|
||||||
|
} else if isNillableType(field.Type) {
|
||||||
|
isNullable = " | null"
|
||||||
}
|
}
|
||||||
|
|
||||||
pf("\t%s%s: %s;", jsonField, isOptional, TypescriptTypeName(field.Type))
|
pf("\t%s%s: %s%s;", jsonInfo.FieldName, isOptional, TypescriptTypeName(field.Type), isNullable)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -135,8 +154,7 @@ func (types *Types) GenerateTypescriptDefinitions() string {
|
|||||||
func (types *Types) getTypescriptImports() string {
|
func (types *Types) getTypescriptImports() string {
|
||||||
classes := []string{}
|
classes := []string{}
|
||||||
|
|
||||||
all := types.All()
|
for t := range types.All() {
|
||||||
for _, t := range all {
|
|
||||||
if tsClass, ok := commonClasses[t]; ok {
|
if tsClass, ok := commonClasses[t]; ok {
|
||||||
classes = append(classes, tsClass)
|
classes = append(classes, tsClass)
|
||||||
}
|
}
|
||||||
@ -154,6 +172,7 @@ func (types *Types) getTypescriptImports() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TypescriptTypeName gets the corresponding TypeScript type for a provided reflect.Type.
|
// TypescriptTypeName gets the corresponding TypeScript type for a provided reflect.Type.
|
||||||
|
// If the type is an anonymous struct, it returns an empty string.
|
||||||
func TypescriptTypeName(t reflect.Type) string {
|
func TypescriptTypeName(t reflect.Type) string {
|
||||||
if override, ok := commonClasses[t]; ok {
|
if override, ok := commonClasses[t]; ok {
|
||||||
return override
|
return override
|
||||||
@ -162,15 +181,18 @@ func TypescriptTypeName(t reflect.Type) string {
|
|||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
return TypescriptTypeName(t.Elem())
|
return TypescriptTypeName(t.Elem())
|
||||||
case reflect.Slice:
|
case reflect.Array, reflect.Slice:
|
||||||
|
if t.Name() != "" {
|
||||||
|
return capitalize(t.Name())
|
||||||
|
}
|
||||||
|
|
||||||
// []byte ([]uint8) is marshaled as a base64 string
|
// []byte ([]uint8) is marshaled as a base64 string
|
||||||
elem := t.Elem()
|
elem := t.Elem()
|
||||||
if elem.Kind() == reflect.Uint8 {
|
if elem.Kind() == reflect.Uint8 {
|
||||||
return "string"
|
return "string"
|
||||||
}
|
}
|
||||||
fallthrough
|
|
||||||
case reflect.Array:
|
return TypescriptTypeName(elem) + "[]"
|
||||||
return TypescriptTypeName(t.Elem()) + "[]"
|
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
return "string"
|
return "string"
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
@ -182,8 +204,11 @@ func TypescriptTypeName(t reflect.Type) string {
|
|||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return "boolean"
|
return "boolean"
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
return t.Name()
|
if t.Name() == "" {
|
||||||
|
panic(fmt.Sprintf(`anonymous struct aren't accepted because their type doesn't have a name. Type="%+v"`, t))
|
||||||
|
}
|
||||||
|
return capitalize(t.Name())
|
||||||
default:
|
default:
|
||||||
panic("unhandled type: " + t.Name())
|
panic(fmt.Sprintf(`unhandled type. Type="%+v"`, t))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
81
private/apigen/tstypes_test.go
Normal file
81
private/apigen/tstypes_test.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package apigen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testTypesValoration struct {
|
||||||
|
Points uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTypes(t *testing.T) {
|
||||||
|
t.Run("Register panics with some anonymous types", func(t *testing.T) {
|
||||||
|
types := NewTypes()
|
||||||
|
require.Panics(t, func() {
|
||||||
|
types.Register(reflect.TypeOf([2]struct{}{}))
|
||||||
|
}, "array")
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
types.Register(reflect.TypeOf([]struct{}{}))
|
||||||
|
}, "slice")
|
||||||
|
|
||||||
|
require.Panics(t, func() {
|
||||||
|
types.Register(reflect.TypeOf(struct{}{}))
|
||||||
|
}, "struct")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("All returns nested types", func(t *testing.T) {
|
||||||
|
typesList := []reflect.Type{
|
||||||
|
reflect.TypeOf(true),
|
||||||
|
reflect.TypeOf(int64(10)),
|
||||||
|
reflect.TypeOf(uint8(9)),
|
||||||
|
reflect.TypeOf(float64(99.9)),
|
||||||
|
reflect.TypeOf("this is a test"),
|
||||||
|
reflect.TypeOf(testTypesValoration{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
types := NewTypes()
|
||||||
|
for _, li := range typesList {
|
||||||
|
types.Register(li)
|
||||||
|
}
|
||||||
|
|
||||||
|
allTypes := types.All()
|
||||||
|
|
||||||
|
require.Len(t, allTypes, 7, "total number of types")
|
||||||
|
require.Subset(t, allTypes, typesList, "all types contains at least the registered ones")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Anonymous types panics", func(t *testing.T) {
|
||||||
|
type Address struct {
|
||||||
|
Address string
|
||||||
|
PO string
|
||||||
|
}
|
||||||
|
type Job struct {
|
||||||
|
Company string
|
||||||
|
Position string
|
||||||
|
StartingYear uint
|
||||||
|
ContractClauses []struct { // This is what it makes Types.All to panic
|
||||||
|
ClauseID uint
|
||||||
|
CauseDesc string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Citizen struct {
|
||||||
|
Name string
|
||||||
|
Addresses []Address
|
||||||
|
Job Job
|
||||||
|
}
|
||||||
|
|
||||||
|
types := NewTypes()
|
||||||
|
types.Register(reflect.TypeOf(Citizen{}))
|
||||||
|
require.Panics(t, func() {
|
||||||
|
types.All()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
@ -27,7 +27,9 @@ message DiskSpaceResponse {
|
|||||||
int64 allocated = 1;
|
int64 allocated = 1;
|
||||||
int64 used_pieces = 2;
|
int64 used_pieces = 2;
|
||||||
int64 used_trash = 3;
|
int64 used_trash = 3;
|
||||||
|
// Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
|
||||||
int64 free = 4;
|
int64 free = 4;
|
||||||
|
// Available is the amount of free space on the allocated disk space, in bytes.
|
||||||
int64 available = 5;
|
int64 available = 5;
|
||||||
int64 overused = 6;
|
int64 overused = 6;
|
||||||
}
|
}
|
||||||
|
@ -55,18 +55,20 @@ func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client,
|
|||||||
// before creating SMTPSender
|
// before creating SMTPSender
|
||||||
host, _, _ := net.SplitHostPort(sender.ServerAddress)
|
host, _, _ := net.SplitHostPort(sender.ServerAddress)
|
||||||
|
|
||||||
// send smtp hello or ehlo msg and establish connection over tls
|
if sender.Auth != nil {
|
||||||
err := client.StartTLS(&tls.Config{ServerName: host})
|
// send smtp hello or ehlo msg and establish connection over tls
|
||||||
if err != nil {
|
err := client.StartTLS(&tls.Config{ServerName: host})
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.Auth(sender.Auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.Auth(sender.Auth)
|
err := client.Mail(sender.From.Address)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.Mail(sender.From.Address)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
51
private/server/fastopen_freebsd.go
Normal file
51
private/server/fastopen_freebsd.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const tcpFastOpen = 1025
|
||||||
|
|
||||||
|
func setTCPFastOpen(fd uintptr, _queue int) error {
|
||||||
|
return syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, tcpFastOpen, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tryInitFastOpenOnce sync.Once
|
||||||
|
var initFastOpenPossiblyEnabled bool
|
||||||
|
|
||||||
|
// tryInitFastOpen returns true if fastopen support is possibly enabled.
|
||||||
|
func tryInitFastOpen(log *zap.Logger) bool {
|
||||||
|
tryInitFastOpenOnce.Do(func() {
|
||||||
|
initFastOpenPossiblyEnabled = true
|
||||||
|
output, err := exec.Command("sysctl", "-n", "net.inet.tcp.fastopen.server_enable").Output()
|
||||||
|
if err != nil {
|
||||||
|
log.Sugar().Infof("kernel support for tcp fast open unknown")
|
||||||
|
initFastOpenPossiblyEnabled = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
enabled, err := strconv.ParseBool(strings.TrimSpace(string(output)))
|
||||||
|
if err != nil {
|
||||||
|
log.Sugar().Infof("kernel support for tcp fast open unparsable")
|
||||||
|
initFastOpenPossiblyEnabled = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if enabled {
|
||||||
|
log.Sugar().Infof("kernel support for server-side tcp fast open enabled.")
|
||||||
|
} else {
|
||||||
|
log.Sugar().Infof("kernel support for server-side tcp fast open not enabled.")
|
||||||
|
log.Sugar().Infof("enable with: sysctl net.inet.tcp.fastopen.server_enable=1")
|
||||||
|
log.Sugar().Infof("enable on-boot by setting net.inet.tcp.fastopen.server_enable=1 in /etc/sysctl.conf")
|
||||||
|
}
|
||||||
|
initFastOpenPossiblyEnabled = enabled
|
||||||
|
})
|
||||||
|
return initFastOpenPossiblyEnabled
|
||||||
|
}
|
@ -1,8 +1,8 @@
|
|||||||
// Copyright (C) 2023 Storj Labs, Inc.
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
// See LICENSE for copying information.
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
//go:build !linux && !windows
|
//go:build !linux && !windows && !freebsd
|
||||||
// +build !linux,!windows
|
// +build !linux,!windows,!freebsd
|
||||||
|
|
||||||
package server
|
package server
|
||||||
|
|
||||||
|
@ -4,22 +4,44 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const tcpFastOpenServer = 15
|
const tcpFastOpen = 15 // Corresponds to TCP_FASTOPEN from MS SDK
|
||||||
|
|
||||||
func setTCPFastOpen(fd uintptr, queue int) error {
|
func setTCPFastOpen(fd uintptr, queue int) error {
|
||||||
return syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_TCP, tcpFastOpenServer, 1)
|
return syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_TCP, tcpFastOpen, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var tryInitFastOpenOnce sync.Once
|
||||||
|
var initFastOpenPossiblyEnabled bool
|
||||||
|
|
||||||
// tryInitFastOpen returns true if fastopen support is possibly enabled.
|
// tryInitFastOpen returns true if fastopen support is possibly enabled.
|
||||||
func tryInitFastOpen(*zap.Logger) bool {
|
func tryInitFastOpen(*zap.Logger) bool {
|
||||||
// should we log or check something along the lines of
|
tryInitFastOpenOnce.Do(func() {
|
||||||
// netsh int tcp set global fastopen=enabled
|
// TCP-FASTOPEN is supported as of Windows 10 build 1607, but is
|
||||||
// netsh int tcp set global fastopenfallback=disabled
|
// enabled per socket. If the socket option isn't supported then the
|
||||||
// ?
|
// call to opt-in will fail. So as long as we can set up a listening
|
||||||
return false
|
// socket with the right socket option set, we should be good.
|
||||||
|
if listener, err := (&net.ListenConfig{
|
||||||
|
Control: func(network, addr string, c syscall.RawConn) error {
|
||||||
|
var sockOptErr error
|
||||||
|
if controlErr := c.Control(func(fd uintptr) {
|
||||||
|
sockOptErr = setTCPFastOpen(fd, 0) // queue is unused
|
||||||
|
}); controlErr != nil {
|
||||||
|
return controlErr
|
||||||
|
}
|
||||||
|
return sockOptErr
|
||||||
|
},
|
||||||
|
}).Listen(context.Background(), "tcp", "127.0.0.1:0"); err == nil {
|
||||||
|
listener.Close()
|
||||||
|
initFastOpenPossiblyEnabled = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return initFastOpenPossiblyEnabled
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ func (planet *Planet) newMultinode(ctx context.Context, prefix string, index int
|
|||||||
|
|
||||||
config := multinode.Config{
|
config := multinode.Config{
|
||||||
Debug: debug.Config{
|
Debug: debug.Config{
|
||||||
Address: "",
|
Addr: "",
|
||||||
},
|
},
|
||||||
Console: server.Config{
|
Console: server.Config{
|
||||||
Address: "127.0.0.1:0",
|
Address: "127.0.0.1:0",
|
||||||
|
@ -66,10 +66,10 @@ type Satellite struct {
|
|||||||
|
|
||||||
Core *satellite.Core
|
Core *satellite.Core
|
||||||
API *satellite.API
|
API *satellite.API
|
||||||
|
UI *satellite.UI
|
||||||
Repairer *satellite.Repairer
|
Repairer *satellite.Repairer
|
||||||
Auditor *satellite.Auditor
|
Auditor *satellite.Auditor
|
||||||
Admin *satellite.Admin
|
Admin *satellite.Admin
|
||||||
GC *satellite.GarbageCollection
|
|
||||||
GCBF *satellite.GarbageCollectionBF
|
GCBF *satellite.GarbageCollectionBF
|
||||||
RangedLoop *satellite.RangedLoop
|
RangedLoop *satellite.RangedLoop
|
||||||
|
|
||||||
@ -173,12 +173,17 @@ type Satellite struct {
|
|||||||
Service *mailservice.Service
|
Service *mailservice.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
Console struct {
|
ConsoleBackend struct {
|
||||||
Listener net.Listener
|
Listener net.Listener
|
||||||
Service *console.Service
|
Service *console.Service
|
||||||
Endpoint *consoleweb.Server
|
Endpoint *consoleweb.Server
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConsoleFrontend struct {
|
||||||
|
Listener net.Listener
|
||||||
|
Endpoint *consoleweb.Server
|
||||||
|
}
|
||||||
|
|
||||||
NodeStats struct {
|
NodeStats struct {
|
||||||
Endpoint *nodestats.Endpoint
|
Endpoint *nodestats.Endpoint
|
||||||
}
|
}
|
||||||
@ -256,7 +261,7 @@ func (system *Satellite) AddProject(ctx context.Context, ownerID uuid.UUID, name
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Wrap(err)
|
return nil, errs.Wrap(err)
|
||||||
}
|
}
|
||||||
project, err := system.API.Console.Service.CreateProject(ctx, console.ProjectInfo{
|
project, err := system.API.Console.Service.CreateProject(ctx, console.UpsertProjectInfo{
|
||||||
Name: name,
|
Name: name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -285,7 +290,6 @@ func (system *Satellite) Close() error {
|
|||||||
system.Repairer.Close(),
|
system.Repairer.Close(),
|
||||||
system.Auditor.Close(),
|
system.Auditor.Close(),
|
||||||
system.Admin.Close(),
|
system.Admin.Close(),
|
||||||
system.GC.Close(),
|
|
||||||
system.GCBF.Close(),
|
system.GCBF.Close(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -300,6 +304,11 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
|
|||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(system.API.Run(ctx))
|
return errs2.IgnoreCanceled(system.API.Run(ctx))
|
||||||
})
|
})
|
||||||
|
if system.UI != nil {
|
||||||
|
group.Go(func() error {
|
||||||
|
return errs2.IgnoreCanceled(system.UI.Run(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(system.Repairer.Run(ctx))
|
return errs2.IgnoreCanceled(system.Repairer.Run(ctx))
|
||||||
})
|
})
|
||||||
@ -309,9 +318,6 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
|
|||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(system.Admin.Run(ctx))
|
return errs2.IgnoreCanceled(system.Admin.Run(ctx))
|
||||||
})
|
})
|
||||||
group.Go(func() error {
|
|
||||||
return errs2.IgnoreCanceled(system.GC.Run(ctx))
|
|
||||||
})
|
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
return errs2.IgnoreCanceled(system.GCBF.Run(ctx))
|
return errs2.IgnoreCanceled(system.GCBF.Run(ctx))
|
||||||
})
|
})
|
||||||
@ -405,6 +411,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
|||||||
// cfgstruct devDefaults. we need to make sure it's safe to remove
|
// cfgstruct devDefaults. we need to make sure it's safe to remove
|
||||||
// these lines and then remove them.
|
// these lines and then remove them.
|
||||||
config.Debug.Control = false
|
config.Debug.Control = false
|
||||||
|
config.Debug.Addr = ""
|
||||||
config.Reputation.AuditHistory.OfflineDQEnabled = false
|
config.Reputation.AuditHistory.OfflineDQEnabled = false
|
||||||
config.Server.Config.Extensions.Revocation = false
|
config.Server.Config.Extensions.Revocation = false
|
||||||
config.Orders.OrdersSemaphoreSize = 0
|
config.Orders.OrdersSemaphoreSize = 0
|
||||||
@ -458,6 +465,10 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
|||||||
config.Console.StaticDir = filepath.Join(developmentRoot, "web/satellite")
|
config.Console.StaticDir = filepath.Join(developmentRoot, "web/satellite")
|
||||||
config.Payments.Storjscan.DisableLoop = true
|
config.Payments.Storjscan.DisableLoop = true
|
||||||
|
|
||||||
|
if os.Getenv("STORJ_TEST_DISABLEQUIC") != "" {
|
||||||
|
config.Server.DisableQUIC = true
|
||||||
|
}
|
||||||
|
|
||||||
if planet.config.Reconfigure.Satellite != nil {
|
if planet.config.Reconfigure.Satellite != nil {
|
||||||
planet.config.Reconfigure.Satellite(log, index, &config)
|
planet.config.Reconfigure.Satellite(log, index, &config)
|
||||||
}
|
}
|
||||||
@ -524,6 +535,15 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
|||||||
return nil, errs.Wrap(err)
|
return nil, errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// only run if front-end endpoints on console back-end server are disabled.
|
||||||
|
var ui *satellite.UI
|
||||||
|
if !config.Console.FrontendEnable {
|
||||||
|
ui, err = planet.newUI(ctx, index, identity, config, api.ExternalAddress, api.Console.Listener.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.Wrap(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
adminPeer, err := planet.newAdmin(ctx, index, identity, db, metabaseDB, config, versionInfo)
|
adminPeer, err := planet.newAdmin(ctx, index, identity, db, metabaseDB, config, versionInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Wrap(err)
|
return nil, errs.Wrap(err)
|
||||||
@ -539,11 +559,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
|||||||
return nil, errs.Wrap(err)
|
return nil, errs.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, metabaseDB, config, versionInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Wrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gcBFPeer, err := planet.newGarbageCollectionBF(ctx, index, db, metabaseDB, config, versionInfo)
|
gcBFPeer, err := planet.newGarbageCollectionBF(ctx, index, db, metabaseDB, config, versionInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errs.Wrap(err)
|
return nil, errs.Wrap(err)
|
||||||
@ -558,23 +573,23 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
|||||||
peer.Mail.EmailReminders.TestSetLinkAddress("http://" + api.Console.Listener.Addr().String() + "/")
|
peer.Mail.EmailReminders.TestSetLinkAddress("http://" + api.Console.Listener.Addr().String() + "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
return createNewSystem(prefix, log, config, peer, api, repairerPeer, auditorPeer, adminPeer, gcPeer, gcBFPeer, rangedLoopPeer), nil
|
return createNewSystem(prefix, log, config, peer, api, ui, repairerPeer, auditorPeer, adminPeer, gcBFPeer, rangedLoopPeer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createNewSystem makes a new Satellite System and exposes the same interface from
|
// createNewSystem makes a new Satellite System and exposes the same interface from
|
||||||
// before we split out the API. In the short term this will help keep all the tests passing
|
// before we split out the API. In the short term this will help keep all the tests passing
|
||||||
// without much modification needed. However long term, we probably want to rework this
|
// without much modification needed. However long term, we probably want to rework this
|
||||||
// so it represents how the satellite will run when it is made up of many processes.
|
// so it represents how the satellite will run when it is made up of many processes.
|
||||||
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcPeer *satellite.GarbageCollection, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite {
|
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, ui *satellite.UI, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite {
|
||||||
system := &Satellite{
|
system := &Satellite{
|
||||||
Name: name,
|
Name: name,
|
||||||
Config: config,
|
Config: config,
|
||||||
Core: peer,
|
Core: peer,
|
||||||
API: api,
|
API: api,
|
||||||
|
UI: ui,
|
||||||
Repairer: repairerPeer,
|
Repairer: repairerPeer,
|
||||||
Auditor: auditorPeer,
|
Auditor: auditorPeer,
|
||||||
Admin: adminPeer,
|
Admin: adminPeer,
|
||||||
GC: gcPeer,
|
|
||||||
GCBF: gcBFPeer,
|
GCBF: gcBFPeer,
|
||||||
RangedLoop: rangedLoopPeer,
|
RangedLoop: rangedLoopPeer,
|
||||||
}
|
}
|
||||||
@ -622,7 +637,7 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
|||||||
system.Audit.Reporter = auditorPeer.Audit.Reporter
|
system.Audit.Reporter = auditorPeer.Audit.Reporter
|
||||||
system.Audit.ContainmentSyncChore = peer.Audit.ContainmentSyncChore
|
system.Audit.ContainmentSyncChore = peer.Audit.ContainmentSyncChore
|
||||||
|
|
||||||
system.GarbageCollection.Sender = gcPeer.GarbageCollection.Sender
|
system.GarbageCollection.Sender = peer.GarbageCollection.Sender
|
||||||
|
|
||||||
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
|
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
|
||||||
system.ZombieDeletion.Chore = peer.ZombieDeletion.Chore
|
system.ZombieDeletion.Chore = peer.ZombieDeletion.Chore
|
||||||
@ -666,13 +681,28 @@ func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.
|
|||||||
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
|
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (planet *Planet) newUI(ctx context.Context, index int, identity *identity.FullIdentity, config satellite.Config, satelliteAddr, consoleAPIAddr string) (_ *satellite.UI, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
prefix := "satellite-ui" + strconv.Itoa(index)
|
||||||
|
log := planet.log.Named(prefix)
|
||||||
|
|
||||||
|
return satellite.NewUI(log, identity, &config, nil, satelliteAddr, consoleAPIAddr)
|
||||||
|
}
|
||||||
|
|
||||||
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Admin, err error) {
|
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Admin, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
prefix := "satellite-admin" + strconv.Itoa(index)
|
prefix := "satellite-admin" + strconv.Itoa(index)
|
||||||
log := planet.log.Named(prefix)
|
log := planet.log.Named(prefix)
|
||||||
|
|
||||||
return satellite.NewAdmin(log, identity, db, metabaseDB, versionInfo, &config, nil)
|
liveAccounting, err := live.OpenCache(ctx, log.Named("live-accounting"), config.LiveAccounting)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.Wrap(err)
|
||||||
|
}
|
||||||
|
planet.databases = append(planet.databases, liveAccounting)
|
||||||
|
|
||||||
|
return satellite.NewAdmin(log, identity, db, metabaseDB, liveAccounting, versionInfo, &config, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (planet *Planet) newRepairer(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Repairer, err error) {
|
func (planet *Planet) newRepairer(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Repairer, err error) {
|
||||||
@ -713,20 +743,6 @@ func (cache rollupsWriteCacheCloser) Close() error {
|
|||||||
return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
|
return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollection, err error) {
|
|
||||||
defer mon.Task()(&ctx)(&err)
|
|
||||||
|
|
||||||
prefix := "satellite-gc" + strconv.Itoa(index)
|
|
||||||
log := planet.log.Named(prefix)
|
|
||||||
|
|
||||||
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errs.Wrap(err)
|
|
||||||
}
|
|
||||||
planet.databases = append(planet.databases, revocationDB)
|
|
||||||
return satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, versionInfo, &config, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (planet *Planet) newGarbageCollectionBF(ctx context.Context, index int, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollectionBF, err error) {
|
func (planet *Planet) newGarbageCollectionBF(ctx context.Context, index int, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollectionBF, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
@ -746,7 +762,6 @@ func (planet *Planet) newRangedLoop(ctx context.Context, index int, db satellite
|
|||||||
|
|
||||||
prefix := "satellite-ranged-loop" + strconv.Itoa(index)
|
prefix := "satellite-ranged-loop" + strconv.Itoa(index)
|
||||||
log := planet.log.Named(prefix)
|
log := planet.log.Named(prefix)
|
||||||
|
|
||||||
return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil)
|
return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"storj.io/common/peertls/tlsopts"
|
"storj.io/common/peertls/tlsopts"
|
||||||
"storj.io/common/storj"
|
"storj.io/common/storj"
|
||||||
"storj.io/private/debug"
|
"storj.io/private/debug"
|
||||||
|
"storj.io/storj/cmd/storagenode/internalcmd"
|
||||||
"storj.io/storj/private/revocation"
|
"storj.io/storj/private/revocation"
|
||||||
"storj.io/storj/private/server"
|
"storj.io/storj/private/server"
|
||||||
"storj.io/storj/storagenode"
|
"storj.io/storj/storagenode"
|
||||||
@ -133,7 +134,7 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Debug: debug.Config{
|
Debug: debug.Config{
|
||||||
Address: "",
|
Addr: "",
|
||||||
},
|
},
|
||||||
Preflight: preflight.Config{
|
Preflight: preflight.Config{
|
||||||
LocalTimeCheck: false,
|
LocalTimeCheck: false,
|
||||||
@ -215,6 +216,14 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
|
|||||||
MinDownloadTimeout: 2 * time.Minute,
|
MinDownloadTimeout: 2 * time.Minute,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if os.Getenv("STORJ_TEST_DISABLEQUIC") != "" {
|
||||||
|
config.Server.DisableQUIC = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable the lazy filewalker
|
||||||
|
config.Pieces.EnableLazyFilewalker = true
|
||||||
|
|
||||||
if planet.config.Reconfigure.StorageNode != nil {
|
if planet.config.Reconfigure.StorageNode != nil {
|
||||||
planet.config.Reconfigure.StorageNode(index, &config)
|
planet.config.Reconfigure.StorageNode(index, &config)
|
||||||
}
|
}
|
||||||
@ -275,6 +284,21 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
|
|||||||
return nil, errs.New("error while trying to issue new api key: %v", err)
|
return nil, errs.New("error while trying to issue new api key: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// set up the used space lazyfilewalker filewalker
|
||||||
|
cmd := internalcmd.NewUsedSpaceFilewalkerCmd()
|
||||||
|
cmd.Logger = log.Named("used-space-filewalker")
|
||||||
|
cmd.Ctx = ctx
|
||||||
|
peer.Storage2.LazyFileWalker.TestingSetUsedSpaceCmd(cmd)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// set up the GC lazyfilewalker filewalker
|
||||||
|
cmd := internalcmd.NewGCFilewalkerCmd()
|
||||||
|
cmd.Logger = log.Named("gc-filewalker")
|
||||||
|
cmd.Ctx = ctx
|
||||||
|
peer.Storage2.LazyFileWalker.TestingSetGCCmd(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
return &StorageNode{
|
return &StorageNode{
|
||||||
Name: prefix,
|
Name: prefix,
|
||||||
Config: config,
|
Config: config,
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"storj.io/storj/private/revocation"
|
"storj.io/storj/private/revocation"
|
||||||
"storj.io/storj/private/server"
|
"storj.io/storj/private/server"
|
||||||
"storj.io/storj/private/testplanet"
|
"storj.io/storj/private/testplanet"
|
||||||
|
"storj.io/storj/satellite/nodeselection"
|
||||||
"storj.io/uplink"
|
"storj.io/uplink"
|
||||||
"storj.io/uplink/private/metaclient"
|
"storj.io/uplink/private/metaclient"
|
||||||
)
|
)
|
||||||
@ -105,9 +106,15 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// confirm that we marked the correct number of storage nodes as offline
|
// confirm that we marked the correct number of storage nodes as offline
|
||||||
nodes, err := satellite.Overlay.Service.Reliable(ctx)
|
allNodes, err := satellite.Overlay.Service.GetParticipatingNodes(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, nodes, len(planet.StorageNodes)-toKill)
|
online := make([]nodeselection.SelectedNode, 0, len(allNodes))
|
||||||
|
for _, node := range allNodes {
|
||||||
|
if node.Online {
|
||||||
|
online = append(online, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Len(t, online, len(planet.StorageNodes)-toKill)
|
||||||
|
|
||||||
// we should be able to download data without any of the original nodes
|
// we should be able to download data without any of the original nodes
|
||||||
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/vivint/infectious"
|
|
||||||
|
|
||||||
"storj.io/common/memory"
|
"storj.io/common/memory"
|
||||||
"storj.io/common/pb"
|
"storj.io/common/pb"
|
||||||
@ -41,7 +40,7 @@ func TestECClient(t *testing.T) {
|
|||||||
|
|
||||||
k := storageNodes / 2
|
k := storageNodes / 2
|
||||||
n := storageNodes
|
n := storageNodes
|
||||||
fc, err := infectious.NewFEC(k, n)
|
fc, err := eestream.NewFEC(k, n)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
es := eestream.NewRSScheme(fc, dataSize.Int()/n)
|
es := eestream.NewRSScheme(fc, dataSize.Int()/n)
|
||||||
|
@ -101,7 +101,14 @@ func newTestPeer(t *testing.T, ctx *testcontext.Context) *versioncontrol.Peer {
|
|||||||
},
|
},
|
||||||
Binary: testVersions,
|
Binary: testVersions,
|
||||||
}
|
}
|
||||||
peer, err := versioncontrol.New(zaptest.NewLogger(t), serverConfig)
|
|
||||||
|
return newTestPeerWithConfig(t, ctx, serverConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestPeerWithConfig(t *testing.T, ctx *testcontext.Context, config *versioncontrol.Config) *versioncontrol.Peer {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
peer, err := versioncontrol.New(zaptest.NewLogger(t), config)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx.Go(func() error {
|
ctx.Go(func() error {
|
||||||
|
@ -98,11 +98,13 @@ func (service *Service) checkVersion(ctx context.Context) (_ version.SemVer, all
|
|||||||
service.checked.Release()
|
service.checked.Release()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
allowedVersions, err := service.client.All(ctx)
|
process, err := service.client.Process(ctx, service.service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
service.log.Error("failed to get process version info", zap.Error(err))
|
||||||
return service.acceptedVersion, true
|
return service.acceptedVersion, true
|
||||||
}
|
}
|
||||||
suggestedVersion, err := allowedVersions.Processes.Storagenode.Suggested.SemVer()
|
|
||||||
|
suggestedVersion, err := process.Suggested.SemVer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return service.acceptedVersion, true
|
return service.acceptedVersion, true
|
||||||
}
|
}
|
||||||
@ -121,28 +123,40 @@ func (service *Service) checkVersion(ctx context.Context) (_ version.SemVer, all
|
|||||||
return suggestedVersion, true
|
return suggestedVersion, true
|
||||||
}
|
}
|
||||||
|
|
||||||
minimumOld, err := service.client.OldMinimum(ctx, service.service)
|
minimum, err = process.Minimum.SemVer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Log about the error, but dont crash the Service and allow further operation
|
|
||||||
service.log.Error("Failed to do periodic version check.", zap.Error(err))
|
|
||||||
return suggestedVersion, true
|
return suggestedVersion, true
|
||||||
}
|
}
|
||||||
|
|
||||||
minimum, err = version.NewSemVer(minimumOld.String())
|
if minimum.IsZero() {
|
||||||
if err != nil {
|
// if the minimum version is not set, we check if the old minimum version is set
|
||||||
service.log.Error("Failed to convert old sem version to sem version.")
|
// TODO: I'm not sure if we should remove this check and stop supporting the old format,
|
||||||
return suggestedVersion, true
|
// but it seems like it's no longer needed, assuming there are no known community
|
||||||
|
// satellites (or SNOs personally) running an old version control server, which (I think)
|
||||||
|
// is very obviously 100% true currently.
|
||||||
|
minimumOld, err := service.client.OldMinimum(ctx, service.service)
|
||||||
|
if err != nil {
|
||||||
|
return suggestedVersion, true
|
||||||
|
}
|
||||||
|
|
||||||
|
minOld, err := version.NewSemVer(minimumOld.String())
|
||||||
|
if err != nil {
|
||||||
|
service.log.Error("failed to convert old sem version to new sem version", zap.Error(err))
|
||||||
|
return suggestedVersion, true
|
||||||
|
}
|
||||||
|
|
||||||
|
minimum = minOld
|
||||||
}
|
}
|
||||||
|
|
||||||
service.log.Debug("Allowed minimum version from control server.", zap.Stringer("Minimum Version", minimum.Version))
|
service.log.Debug("Allowed minimum version from control server.", zap.Stringer("Minimum Version", minimum.Version))
|
||||||
|
|
||||||
if isAcceptedVersion(service.Info.Version, minimumOld) {
|
if service.Info.Version.Compare(minimum) >= 0 {
|
||||||
service.log.Debug("Running on allowed version.", zap.Stringer("Version", service.Info.Version.Version))
|
service.log.Debug("Running on allowed version.", zap.Stringer("Version", service.Info.Version.Version))
|
||||||
return suggestedVersion, true
|
return suggestedVersion, true
|
||||||
}
|
}
|
||||||
service.log.Warn("version not allowed/outdated",
|
service.log.Warn("version not allowed/outdated",
|
||||||
zap.Stringer("current version", service.Info.Version.Version),
|
zap.Stringer("current version", service.Info.Version.Version),
|
||||||
zap.Stringer("minimum allowed version", minimumOld),
|
zap.String("minimum allowed version", minimum.String()),
|
||||||
)
|
)
|
||||||
return suggestedVersion, false
|
return suggestedVersion, false
|
||||||
}
|
}
|
||||||
@ -168,8 +182,3 @@ func (service *Service) SetAcceptedVersion(version version.SemVer) {
|
|||||||
func (service *Service) Checked() bool {
|
func (service *Service) Checked() bool {
|
||||||
return service.checked.Released()
|
return service.checked.Released()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isAcceptedVersion compares and checks if the passed version is greater/equal than the minimum required version.
|
|
||||||
func isAcceptedVersion(test version.SemVer, target version.OldSemVer) bool {
|
|
||||||
return test.Major > uint64(target.Major) || (test.Major == uint64(target.Major) && (test.Minor > uint64(target.Minor) || (test.Minor == uint64(target.Minor) && test.Patch >= uint64(target.Patch))))
|
|
||||||
}
|
|
||||||
|
111
private/version/checker/service_test.go
Normal file
111
private/version/checker/service_test.go
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
// Copyright (C) 2023 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package checker_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
|
||||||
|
"storj.io/common/testcontext"
|
||||||
|
"storj.io/private/version"
|
||||||
|
"storj.io/storj/private/version/checker"
|
||||||
|
"storj.io/storj/versioncontrol"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVersion(t *testing.T) {
|
||||||
|
ctx := testcontext.New(t)
|
||||||
|
defer ctx.Cleanup()
|
||||||
|
|
||||||
|
minimum := "v1.89.5"
|
||||||
|
suggested := "v1.90.2"
|
||||||
|
|
||||||
|
testVersions := newTestVersions(t)
|
||||||
|
testVersions.Storagenode.Minimum.Version = minimum
|
||||||
|
testVersions.Storagenode.Suggested.Version = suggested
|
||||||
|
|
||||||
|
serverConfig := &versioncontrol.Config{
|
||||||
|
Address: "127.0.0.1:0",
|
||||||
|
Versions: versioncontrol.OldVersionConfig{
|
||||||
|
Satellite: "v0.0.1",
|
||||||
|
Storagenode: "v0.0.1",
|
||||||
|
Uplink: "v0.0.1",
|
||||||
|
Gateway: "v0.0.1",
|
||||||
|
Identity: "v0.0.1",
|
||||||
|
},
|
||||||
|
Binary: testVersions,
|
||||||
|
}
|
||||||
|
|
||||||
|
peer := newTestPeerWithConfig(t, ctx, serverConfig)
|
||||||
|
defer ctx.Check(peer.Close)
|
||||||
|
|
||||||
|
clientConfig := checker.ClientConfig{
|
||||||
|
ServerAddress: "http://" + peer.Addr(),
|
||||||
|
RequestTimeout: 0,
|
||||||
|
}
|
||||||
|
config := checker.Config{
|
||||||
|
ClientConfig: clientConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("CheckVersion", func(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
name string
|
||||||
|
version string
|
||||||
|
errorMsg string
|
||||||
|
isAcceptedVersion bool
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []args{
|
||||||
|
{
|
||||||
|
name: "runs outdated version",
|
||||||
|
version: "1.80.0",
|
||||||
|
errorMsg: "outdated software version (v1.80.0), please update",
|
||||||
|
isAcceptedVersion: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "runs minimum version",
|
||||||
|
version: minimum,
|
||||||
|
isAcceptedVersion: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "runs suggested version",
|
||||||
|
version: suggested,
|
||||||
|
isAcceptedVersion: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "runs version newer than minimum",
|
||||||
|
version: "v1.90.2",
|
||||||
|
isAcceptedVersion: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
ver, err := version.NewSemVer(test.version)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
versionInfo := version.Info{
|
||||||
|
Version: ver,
|
||||||
|
Release: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
service := checker.NewService(zaptest.NewLogger(t), config, versionInfo, "storagenode")
|
||||||
|
latest, err := service.CheckVersion(ctx)
|
||||||
|
if test.errorMsg != "" {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.errorMsg)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, suggested, latest.String())
|
||||||
|
|
||||||
|
minVersion, isAllowed := service.IsAllowed(ctx)
|
||||||
|
require.Equal(t, isAllowed, test.isAcceptedVersion)
|
||||||
|
require.Equal(t, minimum, minVersion.String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
@ -6,16 +6,16 @@ package version
|
|||||||
import _ "unsafe" // needed for go:linkname
|
import _ "unsafe" // needed for go:linkname
|
||||||
|
|
||||||
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
|
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
|
||||||
var buildTimestamp string
|
var buildTimestamp string = "1702047568"
|
||||||
|
|
||||||
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
|
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
|
||||||
var buildCommitHash string
|
var buildCommitHash string = "5767191bfc1a5eca25502780d90f8bbf52e7af40"
|
||||||
|
|
||||||
//go:linkname buildVersion storj.io/private/version.buildVersion
|
//go:linkname buildVersion storj.io/private/version.buildVersion
|
||||||
var buildVersion string
|
var buildVersion string = "v1.94.1"
|
||||||
|
|
||||||
//go:linkname buildRelease storj.io/private/version.buildRelease
|
//go:linkname buildRelease storj.io/private/version.buildRelease
|
||||||
var buildRelease string
|
var buildRelease string = "true"
|
||||||
|
|
||||||
// ensure that linter understands that the variables are being used.
|
// ensure that linter understands that the variables are being used.
|
||||||
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }
|
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }
|
||||||
|
@ -4,24 +4,32 @@
|
|||||||
package web
|
package web
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"storj.io/common/http/requestid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServeJSONError writes a JSON error to the response output stream.
|
// ServeJSONError writes a JSON error to the response output stream.
|
||||||
func ServeJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error) {
|
func ServeJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error) {
|
||||||
ServeCustomJSONError(log, w, status, err, err.Error())
|
ServeCustomJSONError(ctx, log, w, status, err, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServeCustomJSONError writes a JSON error with a custom message to the response output stream.
|
// ServeCustomJSONError writes a JSON error with a custom message to the response output stream.
|
||||||
func ServeCustomJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) {
|
func ServeCustomJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) {
|
||||||
fields := []zap.Field{
|
fields := []zap.Field{
|
||||||
zap.Int("code", status),
|
zap.Int("code", status),
|
||||||
zap.String("message", msg),
|
zap.String("message", msg),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if requestID := requestid.FromContext(ctx); requestID != "" {
|
||||||
|
fields = append(fields, zap.String("requestID", requestID))
|
||||||
|
}
|
||||||
|
|
||||||
switch status {
|
switch status {
|
||||||
case http.StatusNoContent:
|
case http.StatusNoContent:
|
||||||
return
|
return
|
||||||
|
@ -87,12 +87,12 @@ func (rl *RateLimiter) Limit(next http.Handler) http.Handler {
|
|||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
key, err := rl.keyFunc(r)
|
key, err := rl.keyFunc(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ServeCustomJSONError(rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg)
|
ServeCustomJSONError(r.Context(), rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
limit := rl.getUserLimit(key)
|
limit := rl.getUserLimit(key)
|
||||||
if !limit.Allow() {
|
if !limit.Allow() {
|
||||||
ServeJSONError(rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg))
|
ServeJSONError(r.Context(), rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
|
@ -5,6 +5,7 @@ package accounting
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"storj.io/common/memory"
|
"storj.io/common/memory"
|
||||||
@ -111,16 +112,16 @@ type ProjectUsageByDay struct {
|
|||||||
|
|
||||||
// BucketUsage consist of total bucket usage for period.
|
// BucketUsage consist of total bucket usage for period.
|
||||||
type BucketUsage struct {
|
type BucketUsage struct {
|
||||||
ProjectID uuid.UUID
|
ProjectID uuid.UUID `json:"projectID"`
|
||||||
BucketName string
|
BucketName string `json:"bucketName"`
|
||||||
|
|
||||||
Storage float64
|
Storage float64 `json:"storage"`
|
||||||
Egress float64
|
Egress float64 `json:"egress"`
|
||||||
ObjectCount int64
|
ObjectCount int64 `json:"objectCount"`
|
||||||
SegmentCount int64
|
SegmentCount int64 `json:"segmentCount"`
|
||||||
|
|
||||||
Since time.Time
|
Since time.Time `json:"since"`
|
||||||
Before time.Time
|
Before time.Time `json:"before"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketUsageCursor holds info for bucket usage
|
// BucketUsageCursor holds info for bucket usage
|
||||||
@ -133,15 +134,15 @@ type BucketUsageCursor struct {
|
|||||||
|
|
||||||
// BucketUsagePage represents bucket usage page result.
|
// BucketUsagePage represents bucket usage page result.
|
||||||
type BucketUsagePage struct {
|
type BucketUsagePage struct {
|
||||||
BucketUsages []BucketUsage
|
BucketUsages []BucketUsage `json:"bucketUsages"`
|
||||||
|
|
||||||
Search string
|
Search string `json:"search"`
|
||||||
Limit uint
|
Limit uint `json:"limit"`
|
||||||
Offset uint64
|
Offset uint64 `json:"offset"`
|
||||||
|
|
||||||
PageCount uint
|
PageCount uint `json:"pageCount"`
|
||||||
CurrentPage uint
|
CurrentPage uint `json:"currentPage"`
|
||||||
TotalCount uint64
|
TotalCount uint64 `json:"totalCount"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketUsageRollup is total bucket usage info
|
// BucketUsageRollup is total bucket usage info
|
||||||
@ -164,6 +165,36 @@ type BucketUsageRollup struct {
|
|||||||
Before time.Time `json:"before"`
|
Before time.Time `json:"before"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProjectReportItem is total bucket usage info with project details for certain period.
|
||||||
|
type ProjectReportItem struct {
|
||||||
|
ProjectID uuid.UUID
|
||||||
|
ProjectName string
|
||||||
|
|
||||||
|
BucketName string
|
||||||
|
Storage float64
|
||||||
|
Egress float64
|
||||||
|
SegmentCount float64
|
||||||
|
ObjectCount float64
|
||||||
|
|
||||||
|
Since time.Time `json:"since"`
|
||||||
|
Before time.Time `json:"before"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToStringSlice converts report item values to a slice of strings.
|
||||||
|
func (b *ProjectReportItem) ToStringSlice() []string {
|
||||||
|
return []string{
|
||||||
|
b.ProjectName,
|
||||||
|
b.ProjectID.String(),
|
||||||
|
b.BucketName,
|
||||||
|
fmt.Sprintf("%f", b.Storage),
|
||||||
|
fmt.Sprintf("%f", b.Egress),
|
||||||
|
fmt.Sprintf("%f", b.ObjectCount),
|
||||||
|
fmt.Sprintf("%f", b.SegmentCount),
|
||||||
|
b.Since.String(),
|
||||||
|
b.Before.String(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Usage contains project's usage split on segments and storage.
|
// Usage contains project's usage split on segments and storage.
|
||||||
type Usage struct {
|
type Usage struct {
|
||||||
Storage int64
|
Storage int64
|
||||||
@ -219,6 +250,8 @@ type ProjectAccounting interface {
|
|||||||
GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error)
|
GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error)
|
||||||
// GetProjectBandwidth returns project allocated bandwidth for the specified year, month and day.
|
// GetProjectBandwidth returns project allocated bandwidth for the specified year, month and day.
|
||||||
GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (int64, error)
|
GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (int64, error)
|
||||||
|
// GetProjectSettledBandwidth returns the used settled bandwidth for the specified year and month.
|
||||||
|
GetProjectSettledBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, asOfSystemInterval time.Duration) (int64, error)
|
||||||
// GetProjectDailyBandwidth returns bandwidth (allocated and settled) for the specified day.
|
// GetProjectDailyBandwidth returns bandwidth (allocated and settled) for the specified day.
|
||||||
GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (int64, int64, int64, error)
|
GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (int64, int64, int64, error)
|
||||||
// DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time
|
// DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time
|
||||||
|
@ -26,6 +26,7 @@ type Config struct {
|
|||||||
StorageBackend string `help:"what to use for storing real-time accounting data"`
|
StorageBackend string `help:"what to use for storing real-time accounting data"`
|
||||||
BandwidthCacheTTL time.Duration `default:"5m" help:"bandwidth cache key time to live"`
|
BandwidthCacheTTL time.Duration `default:"5m" help:"bandwidth cache key time to live"`
|
||||||
AsOfSystemInterval time.Duration `default:"-10s" help:"as of system interval"`
|
AsOfSystemInterval time.Duration `default:"-10s" help:"as of system interval"`
|
||||||
|
BatchSize int `default:"5000" help:"how much projects usage should be requested from redis cache at once"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenCache creates a new accounting.Cache instance using the type specified backend in
|
// OpenCache creates a new accounting.Cache instance using the type specified backend in
|
||||||
@ -49,7 +50,7 @@ func OpenCache(ctx context.Context, log *zap.Logger, config Config) (accounting.
|
|||||||
backendType = parts[0]
|
backendType = parts[0]
|
||||||
switch backendType {
|
switch backendType {
|
||||||
case "redis":
|
case "redis":
|
||||||
return openRedisLiveAccounting(ctx, config.StorageBackend)
|
return openRedisLiveAccounting(ctx, config.StorageBackend, config.BatchSize)
|
||||||
default:
|
default:
|
||||||
return nil, Error.New("unrecognized live accounting backend specifier %q. Currently only redis is supported", backendType)
|
return nil, Error.New("unrecognized live accounting backend specifier %q. Currently only redis is supported", backendType)
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ package live_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -136,19 +137,28 @@ func TestGetAllProjectTotals(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
usage, err := cache.GetAllProjectTotals(ctx)
|
for _, batchSize := range []int{1, 2, 3, 10, 13, 10000} {
|
||||||
require.NoError(t, err)
|
t.Run("batch-size-"+strconv.Itoa(batchSize), func(t *testing.T) {
|
||||||
require.Len(t, usage, len(projectIDs))
|
config.BatchSize = batchSize
|
||||||
|
testCache, err := live.OpenCache(ctx, zaptest.NewLogger(t).Named("live-accounting"), config)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer ctx.Check(testCache.Close)
|
||||||
|
|
||||||
// make sure each project ID and total was received
|
usage, err := testCache.GetAllProjectTotals(ctx)
|
||||||
for _, projID := range projectIDs {
|
require.NoError(t, err)
|
||||||
totalStorage, err := cache.GetProjectStorageUsage(ctx, projID)
|
require.Len(t, usage, len(projectIDs))
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, totalStorage, usage[projID].Storage)
|
|
||||||
|
|
||||||
totalSegments, err := cache.GetProjectSegmentUsage(ctx, projID)
|
// make sure each project ID and total was received
|
||||||
require.NoError(t, err)
|
for _, projID := range projectIDs {
|
||||||
assert.Equal(t, totalSegments, usage[projID].Segments)
|
totalStorage, err := testCache.GetProjectStorageUsage(ctx, projID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, totalStorage, usage[projID].Storage)
|
||||||
|
|
||||||
|
totalSegments, err := testCache.GetProjectSegmentUsage(ctx, projID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, totalSegments, usage[projID].Segments)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
|
"github.com/zeebo/errs/v2"
|
||||||
|
|
||||||
"storj.io/common/uuid"
|
"storj.io/common/uuid"
|
||||||
"storj.io/storj/satellite/accounting"
|
"storj.io/storj/satellite/accounting"
|
||||||
@ -18,6 +19,8 @@ import (
|
|||||||
|
|
||||||
type redisLiveAccounting struct {
|
type redisLiveAccounting struct {
|
||||||
client *redis.Client
|
client *redis.Client
|
||||||
|
|
||||||
|
batchSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
// openRedisLiveAccounting returns a redisLiveAccounting cache instance.
|
// openRedisLiveAccounting returns a redisLiveAccounting cache instance.
|
||||||
@ -29,14 +32,15 @@ type redisLiveAccounting struct {
|
|||||||
// it fails then it returns an instance and accounting.ErrSystemOrNetError
|
// it fails then it returns an instance and accounting.ErrSystemOrNetError
|
||||||
// because it means that Redis may not be operative at this precise moment but
|
// because it means that Redis may not be operative at this precise moment but
|
||||||
// it may be in future method calls as it handles automatically reconnects.
|
// it may be in future method calls as it handles automatically reconnects.
|
||||||
func openRedisLiveAccounting(ctx context.Context, address string) (*redisLiveAccounting, error) {
|
func openRedisLiveAccounting(ctx context.Context, address string, batchSize int) (*redisLiveAccounting, error) {
|
||||||
opts, err := redis.ParseURL(address)
|
opts, err := redis.ParseURL(address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, accounting.ErrInvalidArgument.Wrap(err)
|
return nil, accounting.ErrInvalidArgument.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cache := &redisLiveAccounting{
|
cache := &redisLiveAccounting{
|
||||||
client: redis.NewClient(opts),
|
client: redis.NewClient(opts),
|
||||||
|
batchSize: batchSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ping here to verify we are able to connect to Redis with the initialized client.
|
// ping here to verify we are able to connect to Redis with the initialized client.
|
||||||
@ -52,7 +56,7 @@ func openRedisLiveAccounting(ctx context.Context, address string) (*redisLiveAcc
|
|||||||
func (cache *redisLiveAccounting) GetProjectStorageUsage(ctx context.Context, projectID uuid.UUID) (totalUsed int64, err error) {
|
func (cache *redisLiveAccounting) GetProjectStorageUsage(ctx context.Context, projectID uuid.UUID) (totalUsed int64, err error) {
|
||||||
defer mon.Task()(&ctx, projectID)(&err)
|
defer mon.Task()(&ctx, projectID)(&err)
|
||||||
|
|
||||||
return cache.getInt64(ctx, string(projectID[:]))
|
return cache.getInt64(ctx, createStorageProjectIDKey(projectID))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProjectBandwidthUsage returns the current bandwidth usage
|
// GetProjectBandwidthUsage returns the current bandwidth usage
|
||||||
@ -175,7 +179,7 @@ func (cache *redisLiveAccounting) AddProjectSegmentUsageUpToLimit(ctx context.Co
|
|||||||
func (cache *redisLiveAccounting) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) (err error) {
|
func (cache *redisLiveAccounting) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) (err error) {
|
||||||
defer mon.Task()(&ctx, projectID, spaceUsed)(&err)
|
defer mon.Task()(&ctx, projectID, spaceUsed)(&err)
|
||||||
|
|
||||||
_, err = cache.client.IncrBy(ctx, string(projectID[:]), spaceUsed).Result()
|
_, err = cache.client.IncrBy(ctx, createStorageProjectIDKey(projectID), spaceUsed).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return accounting.ErrSystemOrNetError.New("Redis incrby failed: %w", err)
|
return accounting.ErrSystemOrNetError.New("Redis incrby failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -216,6 +220,7 @@ func (cache *redisLiveAccounting) GetAllProjectTotals(ctx context.Context) (_ ma
|
|||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
projects := make(map[uuid.UUID]accounting.Usage)
|
projects := make(map[uuid.UUID]accounting.Usage)
|
||||||
|
|
||||||
it := cache.client.Scan(ctx, 0, "*", 0).Iterator()
|
it := cache.client.Scan(ctx, 0, "*", 0).Iterator()
|
||||||
for it.Next(ctx) {
|
for it.Next(ctx) {
|
||||||
key := it.Val()
|
key := it.Val()
|
||||||
@ -231,58 +236,112 @@ func (cache *redisLiveAccounting) GetAllProjectTotals(ctx context.Context) (_ ma
|
|||||||
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
usage := accounting.Usage{}
|
projects[projectID] = accounting.Usage{}
|
||||||
if seenUsage, seen := projects[projectID]; seen {
|
|
||||||
if seenUsage.Segments != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
usage = seenUsage
|
|
||||||
}
|
|
||||||
|
|
||||||
segmentUsage, err := cache.GetProjectSegmentUsage(ctx, projectID)
|
|
||||||
if err != nil {
|
|
||||||
if accounting.ErrKeyNotFound.Has(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
usage.Segments = segmentUsage
|
|
||||||
projects[projectID] = usage
|
|
||||||
} else {
|
} else {
|
||||||
projectID, err := uuid.FromBytes([]byte(key))
|
projectID, err := uuid.FromBytes([]byte(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
usage := accounting.Usage{}
|
projects[projectID] = accounting.Usage{}
|
||||||
if seenUsage, seen := projects[projectID]; seen {
|
|
||||||
if seenUsage.Storage != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
usage = seenUsage
|
|
||||||
}
|
|
||||||
|
|
||||||
storageUsage, err := cache.getInt64(ctx, key)
|
|
||||||
if err != nil {
|
|
||||||
if accounting.ErrKeyNotFound.Has(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
usage.Storage = storageUsage
|
|
||||||
projects[projectID] = usage
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return cache.fillUsage(ctx, projects)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cache *redisLiveAccounting) fillUsage(ctx context.Context, projects map[uuid.UUID]accounting.Usage) (_ map[uuid.UUID]accounting.Usage, err error) {
|
||||||
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
|
if len(projects) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
projectIDs := make([]uuid.UUID, 0, cache.batchSize)
|
||||||
|
segmentKeys := make([]string, 0, cache.batchSize)
|
||||||
|
storageKeys := make([]string, 0, cache.batchSize)
|
||||||
|
|
||||||
|
fetchProjectsUsage := func() error {
|
||||||
|
if len(projectIDs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
segmentResult, err := cache.client.MGet(ctx, segmentKeys...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return accounting.ErrGetProjectLimitCache.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storageResult, err := cache.client.MGet(ctx, storageKeys...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return accounting.ErrGetProjectLimitCache.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note, because we are using a cache, it might be empty and not contain the
|
||||||
|
// information we are looking for -- or they might be still empty for some reason.
|
||||||
|
|
||||||
|
for i, projectID := range projectIDs {
|
||||||
|
segmentsUsage, err := parseAnyAsInt64(segmentResult[i])
|
||||||
|
if err != nil {
|
||||||
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storageUsage, err := parseAnyAsInt64(storageResult[i])
|
||||||
|
if err != nil {
|
||||||
|
return errs.Wrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
projects[projectID] = accounting.Usage{
|
||||||
|
Segments: segmentsUsage,
|
||||||
|
Storage: storageUsage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for projectID := range projects {
|
||||||
|
projectIDs = append(projectIDs, projectID)
|
||||||
|
segmentKeys = append(segmentKeys, createSegmentProjectIDKey(projectID))
|
||||||
|
storageKeys = append(storageKeys, createStorageProjectIDKey(projectID))
|
||||||
|
|
||||||
|
if len(projectIDs) >= cache.batchSize {
|
||||||
|
err := fetchProjectsUsage()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
projectIDs = projectIDs[:0]
|
||||||
|
segmentKeys = segmentKeys[:0]
|
||||||
|
storageKeys = storageKeys[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fetchProjectsUsage()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return projects, nil
|
return projects, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseAnyAsInt64(v any) (int64, error) {
|
||||||
|
if v == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
return 0, accounting.ErrUnexpectedValue.New("cannot parse the value as int64; val=%q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, accounting.ErrUnexpectedValue.New("cannot parse the value as int64; val=%q", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Close the DB connection.
|
// Close the DB connection.
|
||||||
func (cache *redisLiveAccounting) Close() error {
|
func (cache *redisLiveAccounting) Close() error {
|
||||||
err := cache.client.Close()
|
err := cache.client.Close()
|
||||||
@ -325,3 +384,8 @@ func createBandwidthProjectIDKey(projectID uuid.UUID, now time.Time) string {
|
|||||||
func createSegmentProjectIDKey(projectID uuid.UUID) string {
|
func createSegmentProjectIDKey(projectID uuid.UUID) string {
|
||||||
return string(projectID[:]) + ":segment"
|
return string(projectID[:]) + ":segment"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createStorageProjectIDKey creates the storage project key.
|
||||||
|
func createStorageProjectIDKey(projectID uuid.UUID) string {
|
||||||
|
return string(projectID[:])
|
||||||
|
}
|
||||||
|
@ -40,7 +40,8 @@ type ProjectLimitConfig struct {
|
|||||||
// ProjectLimitCache stores the values for both storage usage limit and bandwidth limit for
|
// ProjectLimitCache stores the values for both storage usage limit and bandwidth limit for
|
||||||
// each project ID if they differ from the default limits.
|
// each project ID if they differ from the default limits.
|
||||||
type ProjectLimitCache struct {
|
type ProjectLimitCache struct {
|
||||||
projectLimitDB ProjectLimitDB
|
projectLimitDB ProjectLimitDB
|
||||||
|
|
||||||
defaultMaxUsage memory.Size
|
defaultMaxUsage memory.Size
|
||||||
defaultMaxBandwidth memory.Size
|
defaultMaxBandwidth memory.Size
|
||||||
defaultMaxSegments int64
|
defaultMaxSegments int64
|
||||||
@ -121,10 +122,6 @@ func (c *ProjectLimitCache) getProjectLimits(ctx context.Context, projectID uuid
|
|||||||
defaultSegments := c.defaultMaxSegments
|
defaultSegments := c.defaultMaxSegments
|
||||||
projectLimits.Segments = &defaultSegments
|
projectLimits.Segments = &defaultSegments
|
||||||
}
|
}
|
||||||
if projectLimits.Segments == nil {
|
|
||||||
defaultSegments := c.defaultMaxSegments
|
|
||||||
projectLimits.Segments = &defaultSegments
|
|
||||||
}
|
|
||||||
|
|
||||||
return projectLimits, nil
|
return projectLimits, nil
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user