Compare commits
396 Commits
main
...
gui-prebui
Author | SHA1 | Date | |
---|---|---|---|
540c9cb647 | |||
|
a331610a8a | ||
|
98e778e26c | ||
|
20232e435a | ||
|
a9265ae543 | ||
|
e98b94f114 | ||
|
00cd29cc6d | ||
|
68fe9a0a52 | ||
|
b9c84d637b | ||
|
822c59638e | ||
|
5c5306cef2 | ||
|
c14e4b1eb4 | ||
|
58b98bc335 | ||
|
63645205c0 | ||
|
e1215d5da8 | ||
|
2a8e5aecfd | ||
|
72189330fd | ||
|
3d3785a605 | ||
|
f63f3f19ee | ||
|
0affe03007 | ||
|
05a276ecc7 | ||
|
f5af0f2268 | ||
|
f78cde93ac | ||
|
8689f609d7 | ||
|
6555a68fa9 | ||
|
48d7be7eab | ||
|
7ab7ac49c8 | ||
|
00484429d6 | ||
|
a9901cc7d0 | ||
|
822a13570e | ||
|
d67bb4f2c5 | ||
|
4cbfa28e10 | ||
|
4a9d5edbfc | ||
|
9a06de9058 | ||
|
fe0b5743b0 | ||
|
1b8bd6c082 | ||
|
c44e3d78d8 | ||
|
b4fdc49194 | ||
|
18d5caad7e | ||
|
7d0b8f6f8c | ||
|
5689083393 | ||
|
52d337496f | ||
|
a43f17d9a3 | ||
|
8381483f79 | ||
|
e563de6c81 | ||
|
5f4cd92cc5 | ||
|
e92acca937 | ||
|
0205a08c20 | ||
|
6a4abb7f14 | ||
|
15c0c675b1 | ||
|
123309c648 | ||
|
98921f9faa | ||
|
950672ca6c | ||
|
471111122b | ||
|
22261146be | ||
|
926076bffd | ||
|
e599df03a8 | ||
|
a2d37bc69a | ||
|
81d49ada06 | ||
|
0a063fdeb3 | ||
|
5bbd477a58 | ||
|
3104a830ae | ||
|
5d0934e4d9 | ||
|
95d87f5a22 | ||
|
fd679c329c | ||
|
ad5c2e171a | ||
|
c48f58e968 | ||
|
109c0d5e37 | ||
|
012e1fbc06 | ||
|
f42548ac1c | ||
|
8ad0bc5e61 | ||
|
1a8913e7a0 | ||
|
e4d6829971 | ||
|
ecc527ad3b | ||
|
bc517cae2f | ||
|
8a0d1e8b55 | ||
|
1efc0ceaa5 | ||
|
789b37c21f | ||
|
bd36749f7d | ||
|
975d953cb8 | ||
|
f4fe983b1e | ||
|
92a69c7de4 | ||
|
bd48a5cbe6 | ||
|
0f3ff66485 | ||
|
881137539c | ||
|
8f27425284 | ||
|
ec8f3b4528 | ||
|
8f1682941e | ||
|
ccb9b7ae8e | ||
|
f14fabc90a | ||
|
dcf3f25f93 | ||
|
7d8b231aaf | ||
|
fb04a22088 | ||
|
d91ee440ba | ||
|
4f8697568d | ||
|
c8f4f5210d | ||
|
89d682f49f | ||
|
e21978f11a | ||
|
4e4da7be6d | ||
|
088496efdf | ||
|
28ee6f024c | ||
|
afa5c54a35 | ||
|
2cf233ac23 | ||
|
f9ab2f0de7 | ||
|
487f64e164 | ||
|
8edb9c5f98 | ||
|
87bfb3b02b | ||
|
3e73d414d1 | ||
|
0a3ee6ff8a | ||
|
c31fb9c1cf | ||
|
3119b614ae | ||
|
9254dd2208 | ||
|
2bf4113821 | ||
|
54379fc0ee | ||
|
cd7d9cf079 | ||
|
c52554a2b9 | ||
|
4e499fb9bf | ||
|
8d1a765fd6 | ||
|
754bf5f8af | ||
|
df037564d7 | ||
|
ad87d1de74 | ||
|
82b108de69 | ||
|
6195b8cd52 | ||
|
091c72319a | ||
|
7311d08139 | ||
|
623b989973 | ||
|
6e1fd12930 | ||
|
f40baf8629 | ||
|
9ab934e2ae | ||
|
3f1ea4a0b9 | ||
|
775db4aa3c | ||
|
4e3e31a425 | ||
|
2d18f43b6a | ||
|
e2006d821c | ||
|
6896241933 | ||
|
c9591e9754 | ||
|
b4c95a3b28 | ||
|
28d498f91d | ||
|
b671641a28 | ||
|
c202929413 | ||
|
dcc4bd0d10 | ||
|
ca0ea50cba | ||
|
00194f54a2 | ||
|
31ec421299 | ||
|
0c9c37875d | ||
|
ebfbbca1be | ||
|
fecaa6a71a | ||
|
67371c43bd | ||
|
780c0e0b35 | ||
|
6e3da022e0 | ||
|
614d213432 | ||
|
a5b1c0432f | ||
|
d6e0987dd9 | ||
|
6219aba40c | ||
|
f0829d5961 | ||
|
f1e8cdfe3e | ||
|
4964ca5ffb | ||
|
c010e37374 | ||
|
273ebd61d7 | ||
|
2d8f396eeb | ||
|
ec42fdae6d | ||
|
b26df035f9 | ||
|
37d6df23fa | ||
|
5c12a3406d | ||
|
2cdc1a973f | ||
|
d34c1b6825 | ||
|
d76f059c55 | ||
|
61fe95c44a | ||
|
e44365d265 | ||
|
36f8eeb272 | ||
|
84e75d5994 | ||
|
fe9f69a757 | ||
|
5c155752d2 | ||
|
0070cd322a | ||
|
7fbabbcf16 | ||
|
a0c69568b5 | ||
|
5340a351b7 | ||
|
b0d072270b | ||
|
95a5cfe647 | ||
|
84ea80c1fd | ||
|
4ccce11893 | ||
|
973b54365e | ||
|
b66fc6dcdb | ||
|
516241e406 | ||
|
d10ce19f50 | ||
|
fdd4be80bf | ||
|
9cf9721abe | ||
|
7f9317aa48 | ||
|
b2780b028d | ||
|
005fb19a7b | ||
|
16588033fd | ||
|
ffa50f0758 | ||
|
ae2cba1d23 | ||
|
929dc80091 | ||
|
5a8ef89824 | ||
|
33c0a82fb7 | ||
|
6f47e178e2 | ||
|
d0c3a59f44 | ||
|
f46280c93b | ||
|
2b278bb05f | ||
|
f0afe0d2ea | ||
|
5522423871 | ||
|
db13e3ef15 | ||
|
c706df5218 | ||
|
ea38b9f78e | ||
|
957d8d6ca0 | ||
|
f2e607c70f | ||
|
b218002752 | ||
|
b5e1e5a9e2 | ||
|
5abed63f53 | ||
|
9ddc8b4ca3 | ||
|
80186ecc67 | ||
|
de7aabc8c9 | ||
|
a3067b7b3b | ||
|
93ce4a0e49 | ||
|
d0f5f06159 | ||
|
da08117fcd | ||
|
baef654197 | ||
|
c08792f066 | ||
|
0e17b1018c | ||
|
b70fb2f87f | ||
|
d701f9f081 | ||
|
1cbad0fcab | ||
|
03690daa35 | ||
|
792bb113bc | ||
|
b9206b1844 | ||
|
ceb7b7375c | ||
|
0fd7f2958f | ||
|
df60380793 | ||
|
969599b60b | ||
|
d7d196fe61 | ||
|
03b45162d9 | ||
|
dbc7443c9d | ||
|
8b0d25cde1 | ||
|
75f2152ae3 | ||
|
6c035a70af | ||
|
0335697664 | ||
|
1f261bcc70 | ||
|
6f01a81648 | ||
|
e226606ce8 | ||
|
6b2fb9dfc4 | ||
|
7d149dca0f | ||
|
a88711319c | ||
|
5bdb7bc3f0 | ||
|
81163321ad | ||
|
37a027af67 | ||
|
eace6e37b8 | ||
|
c931234e46 | ||
|
ea94bc7f6d | ||
|
dc41978743 | ||
|
9e3d54fec4 | ||
|
9e00d495c4 | ||
|
a00ec7af40 | ||
|
683119b835 | ||
|
887209bc24 | ||
|
55b23d2bdb | ||
|
34e1caa55a | ||
|
b21041d6f9 | ||
|
7be844351d | ||
|
9550b5f4a5 | ||
|
03f8ad323d | ||
|
6c3300c522 | ||
|
5345eadffa | ||
|
7f249ab7ca | ||
|
a5cbec7b3b | ||
|
00531fe2b0 | ||
|
185ebe3dcf | ||
|
f57bc81ce7 | ||
|
fa4f5a6ae9 | ||
|
256bd18120 | ||
|
a4f7f0634d | ||
|
e2e437dd95 | ||
|
e5bcb8b209 | ||
|
70f6b60d91 | ||
|
d8d3bb5033 | ||
|
6e46a926bb | ||
|
0b02a48a10 | ||
|
03c52f184e | ||
|
2ed08922d9 | ||
|
034542db8f | ||
|
f40805763e | ||
|
fac522d8dd | ||
|
7b2006a883 | ||
|
cebf255d64 | ||
|
391a63f9fa | ||
|
2d2f1b858e | ||
|
63c8cfe4c3 | ||
|
f7b39aaed4 | ||
|
7c65c0cea5 | ||
|
e78658d174 | ||
|
65aa9c11bc | ||
|
59034ca094 | ||
|
d40091a3cb | ||
|
af69e20cc8 | ||
|
ceef4b8362 | ||
|
6f002f4220 | ||
|
ae5e742a12 | ||
|
62f52c829a | ||
|
fcbb37fb66 | ||
|
c934974652 | ||
|
2c56599ca0 | ||
|
cc12a48c24 | ||
|
4cc167a6bd | ||
|
3d9c217627 | ||
|
28737f5c62 | ||
|
7487809476 | ||
|
16c5b3c340 | ||
|
6d94d6a681 | ||
|
5db0fd8846 | ||
|
28711e30ad | ||
|
5a03e29fca | ||
|
9086078ac7 | ||
|
73a279235a | ||
|
f30e0986b6 | ||
|
b16c8ba2e4 | ||
|
8ed4c573db | ||
|
4ac8320f3c | ||
|
d9525a0f27 | ||
|
6e4044b245 | ||
|
2c934d1cfd | ||
|
dc1509ee42 | ||
|
d45bc879c3 | ||
|
a058b7e982 | ||
|
7d1031feda | ||
|
ca263c05bb | ||
|
4cb85186b2 | ||
|
9a871bf3bc | ||
|
afae5b578e | ||
|
5317135416 | ||
|
7cc873a62a | ||
|
31bb6d54c7 | ||
|
23631dc8bb | ||
|
b1e7d70a86 | ||
|
583ad54d86 | ||
|
2ee0195eba | ||
|
0303920da7 | ||
|
df9a6e968e | ||
|
abe1463a73 | ||
|
c96c83e805 | ||
|
0f4371e84c | ||
|
0a8115b149 | ||
|
47a4d4986d | ||
|
5272fd8497 | ||
|
95761908b5 | ||
|
5234727886 | ||
|
5a1c3f7f19 | ||
|
4ee647a951 | ||
|
e8fcdc10a4 | ||
|
99128ab551 | ||
|
062ca285a0 | ||
|
465941b345 | ||
|
7e03ccfa46 | ||
|
9370bc4580 | ||
|
1f92e7acda | ||
|
a9d979e4d7 | ||
|
4108aa72ba | ||
|
4e876fbdba | ||
|
bd4d57c604 | ||
|
c79d1b0d2f | ||
|
fbda13c752 | ||
|
0f9a0ba9cd | ||
|
73d65fce9a | ||
|
1d62dc63f5 | ||
|
05f30740f5 | ||
|
97a89c3476 | ||
|
e0b5476e78 | ||
|
074457fa4e | ||
|
5fc6eaab17 | ||
|
70cdca5d3c | ||
|
8b4387a498 | ||
|
ced8657caa | ||
|
ece0cc5785 | ||
|
a85c080509 | ||
|
a4d68b9b7e | ||
|
ddf1f1c340 | ||
|
e3d2f09988 | ||
|
f819b6a210 | ||
|
1525324384 | ||
|
2c3464081f | ||
|
6a3802de4f | ||
|
a740f96f75 | ||
|
7ac2031cac | ||
|
21c1e66a85 | ||
|
f2cd7b0928 | ||
|
500b6244f8 | ||
|
1851d103f9 | ||
|
032546219c | ||
|
1173877167 | ||
|
cb41c51692 | ||
|
d38b8fa2c4 | ||
|
20a47034a5 | ||
|
01e33e7753 | ||
|
8482b37c14 | ||
|
f131047f1a | ||
|
8d8f6734de | ||
|
c006126d54 |
@ -35,6 +35,7 @@ satellite-web:
|
||||
RUN ./build.sh
|
||||
COPY +wasm/wasm static/wasm
|
||||
SAVE ARTIFACT dist AS LOCAL web/satellite/dist
|
||||
SAVE ARTIFACT dist_vuetify_poc AS LOCAL web/satellite/dist_vuetify_poc
|
||||
SAVE ARTIFACT static AS LOCAL web/satellite/static
|
||||
|
||||
satellite-admin:
|
||||
@ -119,6 +120,7 @@ build-tagged-image:
|
||||
FROM img.dev.storj.io/storjup/base:20230208-1
|
||||
COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist
|
||||
COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist
|
||||
COPY +satellite-web/dist_vuetify_poc /var/lib/storj/storj/web/satellite/dist_vuetify_poc
|
||||
COPY +satellite-admin/build /app/satellite-admin/
|
||||
COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static
|
||||
COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist
|
||||
|
4
Jenkinsfile
vendored
@ -32,7 +32,7 @@ node('node') {
|
||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||
// fetch the remote main branch
|
||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.3'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/testversions/test-sim-versions.sh -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.6'
|
||||
}
|
||||
catch(err){
|
||||
throw err
|
||||
@ -69,7 +69,7 @@ node('node') {
|
||||
sh 'docker exec postgres-$BUILD_NUMBER createdb -U postgres teststorj'
|
||||
// fetch the remote main branch
|
||||
sh 'git fetch --no-tags --progress -- https://github.com/storj/storj.git +refs/heads/main:refs/remotes/origin/main'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS -e STORJ_MIGRATION_DB --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.3'
|
||||
sh 'docker run -u $(id -u):$(id -g) --rm -i -v $PWD:$PWD -w $PWD --entrypoint $PWD/scripts/tests/rollingupgrade/test-sim-rolling-upgrade.sh -e BRANCH_NAME -e STORJ_SIM_POSTGRES -e STORJ_SIM_REDIS -e STORJ_MIGRATION_DB --link redis-$BUILD_NUMBER:redis --link postgres-$BUILD_NUMBER:postgres storjlabs/golang:1.20.6'
|
||||
}
|
||||
catch(err){
|
||||
throw err
|
||||
|
20
Makefile
@ -1,8 +1,8 @@
|
||||
GO_VERSION ?= 1.20.3
|
||||
GO_VERSION ?= 1.20.6
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
NODE_VERSION ?= 16.11.1
|
||||
NODE_VERSION ?= 18.17.0
|
||||
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
|
||||
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD | sed "s!/!-!g")
|
||||
GIT_TAG := $(shell git rev-parse --short HEAD)
|
||||
@ -73,6 +73,8 @@ build-multinode-npm:
|
||||
cd web/multinode && npm ci
|
||||
build-satellite-admin-npm:
|
||||
cd satellite/admin/ui && npm ci
|
||||
# Temporary until the new back-office replaces the current admin API & UI
|
||||
cd satellite/admin/back-office/ui && npm ci
|
||||
|
||||
##@ Simulator
|
||||
|
||||
@ -126,7 +128,7 @@ lint:
|
||||
-v ${GOPATH}/pkg:/go/pkg \
|
||||
-v ${PWD}:/storj \
|
||||
-w /storj \
|
||||
storjlabs/ci-slim \
|
||||
storjlabs/ci:slim \
|
||||
make .lint LINT_TARGET="$(LINT_TARGET)"
|
||||
|
||||
.PHONY: .lint/testsuite/ui
|
||||
@ -286,6 +288,14 @@ satellite-admin-ui:
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
node:${NODE_VERSION} \
|
||||
/bin/bash -c "npm ci && npm run build"
|
||||
# Temporary until the new back-office replaces the current admin API & UI
|
||||
docker run --rm -i \
|
||||
--mount type=bind,src="${PWD}",dst=/go/src/storj.io/storj \
|
||||
-w /go/src/storj.io/storj/satellite/admin/back-office/ui \
|
||||
-e HOME=/tmp \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
node:${NODE_VERSION} \
|
||||
/bin/bash -c "npm ci && npm run build"
|
||||
|
||||
.PHONY: satellite-wasm
|
||||
satellite-wasm:
|
||||
@ -464,7 +474,9 @@ binaries-upload: ## Upload binaries to Google Storage (jenkins)
|
||||
zip -r "$${zipname}.zip" "$${filename}" \
|
||||
; fi \
|
||||
; done
|
||||
cd "release/${TAG}"; gsutil -m cp -r *.zip "gs://storj-v3-alpha-builds/${TAG}/"
|
||||
cd "release/${TAG}" \
|
||||
&& sha256sum *.zip > sha256sums \
|
||||
&& gsutil -m cp -r *.zip sha256sums "gs://storj-v3-alpha-builds/${TAG}/"
|
||||
|
||||
.PHONY: draft-release
|
||||
draft-release:
|
||||
|
@ -1,34 +1,54 @@
|
||||
ARG DOCKER_ARCH
|
||||
# Satellite UI static asset generation
|
||||
FROM node:16.11.1 as ui
|
||||
FROM node:18.17.0 as ui
|
||||
WORKDIR /app
|
||||
COPY web/satellite/ /app
|
||||
# Need to clean up (or ignore) local folders like node_modules, etc...
|
||||
RUN npm install
|
||||
RUN npm run build
|
||||
RUN npm run build-vuetify
|
||||
|
||||
# Fetch ca-certificates file for arch independent builds below
|
||||
FROM debian:buster-slim as ca-cert
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates
|
||||
RUN update-ca-certificates
|
||||
|
||||
# Install storj-up helper (for local/dev runs)
|
||||
FROM --platform=$TARGETPLATFORM golang:1.19 AS storjup
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go install storj.io/storj-up@latest
|
||||
|
||||
# Install dlv (for local/dev runs)
|
||||
FROM --platform=$TARGETPLATFORM golang:1.19 AS dlv
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
FROM ${DOCKER_ARCH:-amd64}/debian:buster-slim
|
||||
ARG TAG
|
||||
ARG GOARCH
|
||||
ENV GOARCH ${GOARCH}
|
||||
ENV CONF_PATH=/root/.local/share/storj/satellite \
|
||||
STORJ_CONSOLE_STATIC_DIR=/app \
|
||||
STORJ_MAIL_TEMPLATE_PATH=/app/static/emails \
|
||||
STORJ_CONSOLE_ADDRESS=0.0.0.0:10100
|
||||
ENV PATH=$PATH:/app
|
||||
EXPOSE 7777
|
||||
EXPOSE 10100
|
||||
WORKDIR /app
|
||||
COPY --from=ui /app/static /app/static
|
||||
COPY --from=ui /app/dist /app/dist
|
||||
COPY --from=ui /app/dist_vuetify_poc /app/dist_vuetify_poc
|
||||
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY release/${TAG}/wasm/access.wasm /app/static/wasm/
|
||||
COPY release/${TAG}/wasm/wasm_exec.js /app/static/wasm/
|
||||
COPY release/${TAG}/wasm/access.wasm.br /app/static/wasm/
|
||||
COPY release/${TAG}/wasm/wasm_exec.js.br /app/static/wasm/
|
||||
COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite
|
||||
COPY --from=storjup /go/bin/storj-up /usr/local/bin/storj-up
|
||||
COPY --from=dlv /go/bin/dlv /usr/local/bin/dlv
|
||||
# test identities for quick-start
|
||||
COPY --from=img.dev.storj.io/storjup/base:20230607-1 /var/lib/storj/identities /var/lib/storj/identities
|
||||
COPY cmd/satellite/entrypoint /entrypoint
|
||||
ENTRYPOINT ["/entrypoint"]
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
## production helpers
|
||||
SETUP_PARAMS=""
|
||||
|
||||
if [ -n "${IDENTITY_ADDR:-}" ]; then
|
||||
@ -21,6 +22,10 @@ if [ "${SATELLITE_API:-}" = "true" ]; then
|
||||
exec ./satellite run api $RUN_PARAMS "$@"
|
||||
fi
|
||||
|
||||
if [ "${SATELLITE_UI:-}" = "true" ]; then
|
||||
exec ./satellite run ui $RUN_PARAMS "$@"
|
||||
fi
|
||||
|
||||
if [ "${SATELLITE_GC:-}" = "true" ]; then
|
||||
exec ./satellite run garbage-collection $RUN_PARAMS "$@"
|
||||
fi
|
||||
@ -37,4 +42,63 @@ if [ "${SATELLITE_AUDITOR:-}" = "true" ]; then
|
||||
exec ./satellite run auditor $RUN_PARAMS "$@"
|
||||
fi
|
||||
|
||||
exec ./satellite run $RUN_PARAMS "$@"
|
||||
## storj-up helpers
|
||||
if [ "${STORJUP_ROLE:-""}" ]; then
|
||||
|
||||
if [ "${STORJ_IDENTITY_DIR:-""}" ]; then
|
||||
#Generate identity if missing
|
||||
if [ ! -f "$STORJ_IDENTITY_DIR/identity.key" ]; then
|
||||
if [ "$STORJ_USE_PREDEFINED_IDENTITY" ]; then
|
||||
# use predictable, pre-generated identity
|
||||
mkdir -p $(dirname $STORJ_IDENTITY_DIR)
|
||||
cp -r /var/lib/storj/identities/$STORJ_USE_PREDEFINED_IDENTITY $STORJ_IDENTITY_DIR
|
||||
else
|
||||
identity --identity-dir $STORJ_IDENTITY_DIR --difficulty 8 create .
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${STORJ_WAIT_FOR_DB:-""}" ]; then
|
||||
storj-up util wait-for-port cockroach:26257
|
||||
storj-up util wait-for-port redis:6379
|
||||
fi
|
||||
|
||||
if [ "${STORJUP_ROLE:-""}" == "satellite-api" ]; then
|
||||
mkdir -p /var/lib/storj/.local
|
||||
|
||||
#only migrate first time
|
||||
if [ ! -f "/var/lib/storj/.local/migrated" ]; then
|
||||
satellite run migration --identity-dir $STORJ_IDENTITY_DIR
|
||||
touch /var/lib/storj/.local/migrated
|
||||
fi
|
||||
fi
|
||||
|
||||
# default config generated without arguments is misleading
|
||||
rm /root/.local/share/storj/satellite/config.yaml
|
||||
|
||||
mkdir -p /var/lib/storj/.local/share/storj/satellite || true
|
||||
|
||||
if [ "${GO_DLV:-""}" ]; then
|
||||
echo "Starting with go dlv"
|
||||
|
||||
#absolute file path is required
|
||||
CMD=$(which $1)
|
||||
shift
|
||||
/usr/local/bin/dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec --check-go-version=false -- $CMD "$@"
|
||||
exit $?
|
||||
fi
|
||||
fi
|
||||
|
||||
# for backward compatibility reason, we use argument as command, only if it's an executable (and use it as satellite flags oterwise)
|
||||
set +eo nounset
|
||||
which "$1" > /dev/null
|
||||
VALID_EXECUTABLE=$?
|
||||
set -eo nounset
|
||||
|
||||
if [ $VALID_EXECUTABLE -eq 0 ]; then
|
||||
# this is a full command (what storj-up uses)
|
||||
exec "$@"
|
||||
else
|
||||
# legacy, run-only parameters
|
||||
exec ./satellite run $RUN_PARAMS "$@"
|
||||
fi
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
|
||||
func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
||||
func generateGracefulExitCSV(ctx context.Context, timeBased bool, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
||||
db, err := satellitedb.Open(ctx, zap.L().Named("db"), reportsGracefulExitCfg.Database, satellitedb.Options{ApplicationName: "satellite-gracefulexit"})
|
||||
if err != nil {
|
||||
return errs.New("error connecting to master database on satellite: %+v", err)
|
||||
@ -67,12 +67,15 @@ func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Tim
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exitProgress, err := db.GracefulExit().GetProgress(ctx, id)
|
||||
exitProgress := &gracefulexit.Progress{}
|
||||
if !timeBased {
|
||||
exitProgress, err = db.GracefulExit().GetProgress(ctx, id)
|
||||
if gracefulexit.ErrNodeNotFound.Has(err) {
|
||||
exitProgress = &gracefulexit.Progress{}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
exitStatus := node.ExitStatus
|
||||
exitFinished := ""
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
"storj.io/storj/satellite/accounting/live"
|
||||
"storj.io/storj/satellite/compensation"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
"storj.io/storj/satellite/payments/stripe"
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
)
|
||||
@ -100,6 +100,11 @@ var (
|
||||
Short: "Run the satellite API",
|
||||
RunE: cmdAPIRun,
|
||||
}
|
||||
runUICmd = &cobra.Command{
|
||||
Use: "ui",
|
||||
Short: "Run the satellite UI",
|
||||
RunE: cmdUIRun,
|
||||
}
|
||||
runRepairerCmd = &cobra.Command{
|
||||
Use: "repair",
|
||||
Short: "Run the repair service",
|
||||
@ -255,12 +260,19 @@ var (
|
||||
Long: "Finalizes all draft stripe invoices known to satellite's stripe account.",
|
||||
RunE: cmdFinalizeCustomerInvoices,
|
||||
}
|
||||
payCustomerInvoicesCmd = &cobra.Command{
|
||||
payInvoicesWithTokenCmd = &cobra.Command{
|
||||
Use: "pay-customer-invoices",
|
||||
Short: "pay open finalized invoices for customer",
|
||||
Long: "attempts payment on any open finalized invoices for a specific user.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdPayCustomerInvoices,
|
||||
}
|
||||
payAllInvoicesCmd = &cobra.Command{
|
||||
Use: "pay-invoices",
|
||||
Short: "pay finalized invoices",
|
||||
Long: "attempts payment on all open finalized invoices according to subscriptions settings.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdPayCustomerInvoices,
|
||||
RunE: cmdPayAllInvoices,
|
||||
}
|
||||
stripeCustomerCmd = &cobra.Command{
|
||||
Use: "ensure-stripe-customer",
|
||||
@ -342,6 +354,7 @@ var (
|
||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||
Output string `help:"destination of report output" default:""`
|
||||
Completed bool `help:"whether to output (initiated and completed) or (initiated and not completed)" default:"false"`
|
||||
TimeBased bool `help:"whether the satellite is using time-based graceful exit (and thus, whether to include piece transfer progress in output)" default:"false"`
|
||||
}
|
||||
reportsVerifyGracefulExitReceiptCfg struct {
|
||||
}
|
||||
@ -366,6 +379,7 @@ func init() {
|
||||
rootCmd.AddCommand(runCmd)
|
||||
runCmd.AddCommand(runMigrationCmd)
|
||||
runCmd.AddCommand(runAPICmd)
|
||||
runCmd.AddCommand(runUICmd)
|
||||
runCmd.AddCommand(runAdminCmd)
|
||||
runCmd.AddCommand(runRepairerCmd)
|
||||
runCmd.AddCommand(runAuditorCmd)
|
||||
@ -398,12 +412,14 @@ func init() {
|
||||
billingCmd.AddCommand(createCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(generateCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(finalizeCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(payCustomerInvoicesCmd)
|
||||
billingCmd.AddCommand(payInvoicesWithTokenCmd)
|
||||
billingCmd.AddCommand(payAllInvoicesCmd)
|
||||
billingCmd.AddCommand(stripeCustomerCmd)
|
||||
consistencyCmd.AddCommand(consistencyGECleanupCmd)
|
||||
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runUICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runAdminCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runRepairerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(runAuditorCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
@ -432,7 +448,8 @@ func init() {
|
||||
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(generateCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(payCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(payInvoicesWithTokenCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(payAllInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
process.Bind(fixLastNetsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
|
||||
@ -644,7 +661,7 @@ func cmdReportsGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
|
||||
// send output to stdout
|
||||
if reportsGracefulExitCfg.Output == "" {
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, os.Stdout)
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.TimeBased, reportsGracefulExitCfg.Completed, start, end, os.Stdout)
|
||||
}
|
||||
|
||||
// send output to file
|
||||
@ -657,7 +674,7 @@ func cmdReportsGracefulExit(cmd *cobra.Command, args []string) (err error) {
|
||||
err = errs.Combine(err, file.Close())
|
||||
}()
|
||||
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.Completed, start, end, file)
|
||||
return generateGracefulExitCSV(ctx, reportsGracefulExitCfg.TimeBased, reportsGracefulExitCfg.Completed, start, end, file)
|
||||
}
|
||||
|
||||
func cmdNodeUsage(cmd *cobra.Command, args []string) (err error) {
|
||||
@ -862,6 +879,18 @@ func cmdFinalizeCustomerInvoices(cmd *cobra.Command, args []string) (err error)
|
||||
func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
|
||||
err := payments.InvoiceApplyCustomerTokenBalance(ctx, args[0])
|
||||
if err != nil {
|
||||
return errs.New("error applying native token payments to invoice for customer: %v", err)
|
||||
}
|
||||
return payments.PayCustomerInvoices(ctx, args[0])
|
||||
})
|
||||
}
|
||||
|
||||
func cmdPayAllInvoices(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
periodStart, err := parseYearMonth(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -885,6 +914,9 @@ func cmdStripeCustomer(cmd *cobra.Command, args []string) (err error) {
|
||||
func cmdConsistencyGECleanup(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
|
||||
if runCfg.GracefulExit.TimeBased {
|
||||
return errs.New("this command is not supported with time-based graceful exit")
|
||||
}
|
||||
before, err := time.Parse("2006-01-02", consistencyGECleanupCfg.Before)
|
||||
if err != nil {
|
||||
return errs.New("before flag value isn't of the expected format. %+v", err)
|
||||
@ -932,7 +964,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
||||
successes := new(int64)
|
||||
failures := new(int64)
|
||||
|
||||
undelete := func(node *overlay.SelectedNode) {
|
||||
undelete := func(node *nodeselection.SelectedNode) {
|
||||
log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
@ -966,9 +998,9 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
||||
log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
|
||||
}
|
||||
|
||||
var nodes []*overlay.SelectedNode
|
||||
var nodes []*nodeselection.SelectedNode
|
||||
if len(args) == 0 {
|
||||
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *overlay.SelectedNode) error {
|
||||
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *nodeselection.SelectedNode) error {
|
||||
nodes = append(nodes, node)
|
||||
return nil
|
||||
})
|
||||
@ -985,7 +1017,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes = append(nodes, &overlay.SelectedNode{
|
||||
nodes = append(nodes, &nodeselection.SelectedNode{
|
||||
ID: dossier.Id,
|
||||
Address: dossier.Address,
|
||||
LastNet: dossier.LastNet,
|
||||
|
@ -94,7 +94,12 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
||||
|
||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
|
||||
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
|
||||
placement, err := config.Placement.Parse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -102,8 +107,9 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
||||
orders, err := orders.NewService(
|
||||
log.Named("orders"),
|
||||
signing.SignerFromFullIdentity(identity),
|
||||
overlay,
|
||||
overlayService,
|
||||
orders.NewNoopDB(),
|
||||
placement.CreateFilters,
|
||||
config.Orders,
|
||||
)
|
||||
if err != nil {
|
||||
@ -122,9 +128,10 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
||||
log.Named("segment-repair"),
|
||||
metabaseDB,
|
||||
orders,
|
||||
overlay,
|
||||
overlayService,
|
||||
nil, // TODO add noop version
|
||||
ecRepairer,
|
||||
placement.CreateFilters,
|
||||
config.Checker.RepairOverrides,
|
||||
config.Repairer,
|
||||
)
|
||||
@ -132,7 +139,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
|
||||
// TODO reorganize to avoid using peer.
|
||||
|
||||
peer := &satellite.Repairer{}
|
||||
peer.Overlay = overlay
|
||||
peer.Overlay = overlayService
|
||||
peer.Orders.Service = orders
|
||||
peer.EcRepairer = ecRepairer
|
||||
peer.SegmentRepairer = segmentRepairer
|
||||
@ -274,10 +281,8 @@ func reuploadSegment(ctx context.Context, log *zap.Logger, peer *satellite.Repai
|
||||
return errs.New("not enough new nodes were found for repair: min %v got %v", redundancy.RepairThreshold(), len(newNodes))
|
||||
}
|
||||
|
||||
optimalThresholdMultiplier := float64(1) // is this value fine?
|
||||
numHealthyInExcludedCountries := 0
|
||||
putLimits, putPrivateKey, err := peer.Orders.Service.CreatePutRepairOrderLimits(ctx, segment, make([]*pb.AddressedOrderLimit, len(newNodes)),
|
||||
make(map[int32]struct{}), newNodes, optimalThresholdMultiplier, numHealthyInExcludedCountries)
|
||||
make(map[uint16]struct{}), newNodes)
|
||||
if err != nil {
|
||||
return errs.New("could not create PUT_REPAIR order limits: %w", err)
|
||||
}
|
||||
|
47
cmd/satellite/ui.go
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/satellite"
|
||||
)
|
||||
|
||||
func cmdUIRun(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
log := zap.L()
|
||||
|
||||
runCfg.Debug.Address = *process.DebugAddrFlag
|
||||
|
||||
identity, err := runCfg.Identity.Load()
|
||||
if err != nil {
|
||||
log.Error("Failed to load identity.", zap.Error(err))
|
||||
return errs.New("Failed to load identity: %+v", err)
|
||||
}
|
||||
|
||||
satAddr := runCfg.Config.Contact.ExternalAddress
|
||||
if satAddr == "" {
|
||||
return errs.New("cannot run satellite ui if contact.external-address is not set")
|
||||
}
|
||||
apiAddress := runCfg.Config.Console.ExternalAddress
|
||||
if apiAddress == "" {
|
||||
apiAddress = runCfg.Config.Console.Address
|
||||
}
|
||||
peer, err := satellite.NewUI(log, identity, &runCfg.Config, process.AtomicLevel(cmd), satAddr, apiAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := process.InitMetricsWithHostname(ctx, log, nil); err != nil {
|
||||
log.Warn("Failed to initialize telemetry batcher on satellite api", zap.Error(err))
|
||||
}
|
||||
|
||||
runError := peer.Run(ctx)
|
||||
closeError := peer.Close()
|
||||
return errs.Combine(runError, closeError)
|
||||
}
|
242
cmd/storagenode/cmd_forget_satellite.go
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/private/cfgstruct"
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/satellites"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
"storj.io/storj/storagenode/trust"
|
||||
)
|
||||
|
||||
// runCfg defines configuration for run command.
|
||||
type forgetSatelliteCfg struct {
|
||||
storagenode.Config
|
||||
|
||||
SatelliteIDs []string `internal:"true"`
|
||||
|
||||
AllUntrusted bool `help:"Clean up all untrusted satellites" default:"false"`
|
||||
Force bool `help:"Force removal of satellite data if not listed in satelliteDB cache or marked as untrusted" default:"false"`
|
||||
}
|
||||
|
||||
func newForgetSatelliteCmd(f *Factory) *cobra.Command {
|
||||
var cfg forgetSatelliteCfg
|
||||
cmd := &cobra.Command{
|
||||
Use: "forget-satellite [satellite_IDs...]",
|
||||
Short: "Remove an untrusted satellite from the trust cache and clean up its data",
|
||||
Long: "Forget a satellite.\n" +
|
||||
"The command shows the list of the available untrusted satellites " +
|
||||
"and removes the selected satellites from the trust cache and clean up the available data",
|
||||
Example: `
|
||||
# Specify satellite ID to forget
|
||||
$ storagenode forget-satellite --identity-dir /path/to/identityDir --config-dir /path/to/configDir satellite_ID
|
||||
|
||||
# Specify multiple satellite IDs to forget
|
||||
$ storagenode forget-satellite satellite_ID1 satellite_ID2 --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||
|
||||
# Clean up all untrusted satellites
|
||||
# This checks for untrusted satellites in both the satelliteDB cache and the excluded satellites list
|
||||
# specified in the config.yaml file
|
||||
$ storagenode forget-satellite --all-untrusted --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||
|
||||
# For force removal of data for untrusted satellites that are not listed in satelliteDB cache or marked as untrusted
|
||||
$ storagenode forget-satellite satellite_ID1 satellite_ID2 --force --identity-dir /path/to/identityDir --config-dir /path/to/configDir
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg.SatelliteIDs = args
|
||||
if len(args) > 0 && cfg.AllUntrusted {
|
||||
return errs.New("cannot specify both satellite IDs and --all-untrusted")
|
||||
}
|
||||
|
||||
if len(args) == 0 && !cfg.AllUntrusted {
|
||||
return errs.New("must specify either satellite ID(s) as arguments or --all-untrusted flag")
|
||||
}
|
||||
|
||||
if cfg.AllUntrusted && cfg.Force {
|
||||
return errs.New("cannot specify both --all-untrusted and --force")
|
||||
}
|
||||
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
return cmdForgetSatellite(ctx, zap.L(), &cfg)
|
||||
},
|
||||
Annotations: map[string]string{"type": "helper"},
|
||||
}
|
||||
|
||||
process.Bind(cmd, &cfg, f.Defaults, cfgstruct.ConfDir(f.ConfDir), cfgstruct.IdentityDir(f.IdentityDir))
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func cmdForgetSatellite(ctx context.Context, log *zap.Logger, cfg *forgetSatelliteCfg) (err error) {
|
||||
// we don't really need the identity, but we load it as a sanity check
|
||||
ident, err := cfg.Identity.Load()
|
||||
if err != nil {
|
||||
log.Fatal("Failed to load identity.", zap.Error(err))
|
||||
} else {
|
||||
log.Info("Identity loaded.", zap.Stringer("Node ID", ident.ID))
|
||||
}
|
||||
|
||||
db, err := storagenodedb.OpenExisting(ctx, log.Named("db"), cfg.DatabaseConfig())
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on storagenode: %+v", err)
|
||||
}
|
||||
|
||||
satelliteDB := db.Satellites()
|
||||
|
||||
// get list of excluded satellites
|
||||
excludedSatellites := make(map[storj.NodeID]bool)
|
||||
for _, rule := range cfg.Storage2.Trust.Exclusions.Rules {
|
||||
url, err := trust.ParseSatelliteURL(rule.String())
|
||||
if err != nil {
|
||||
log.Warn("Failed to parse satellite URL from exclusions list", zap.Error(err), zap.String("rule", rule.String()))
|
||||
continue
|
||||
}
|
||||
excludedSatellites[url.ID] = false // false means the satellite has not been cleaned up yet.
|
||||
}
|
||||
|
||||
if len(cfg.SatelliteIDs) > 0 {
|
||||
for _, satelliteIDStr := range cfg.SatelliteIDs {
|
||||
satelliteID, err := storj.NodeIDFromString(satelliteIDStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
satellite := satellites.Satellite{
|
||||
SatelliteID: satelliteID,
|
||||
Status: satellites.Untrusted,
|
||||
}
|
||||
|
||||
// check if satellite is excluded
|
||||
cleanedUp, isExcluded := excludedSatellites[satelliteID]
|
||||
if !isExcluded {
|
||||
sat, err := satelliteDB.GetSatellite(ctx, satelliteID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !satellite.SatelliteID.IsZero() {
|
||||
satellite = sat
|
||||
}
|
||||
if satellite.SatelliteID.IsZero() && !cfg.Force {
|
||||
return errs.New("satellite %v not found. Specify --force to force data deletion", satelliteID)
|
||||
}
|
||||
log.Warn("Satellite not found in satelliteDB cache. Forcing removal of satellite data.", zap.Stringer("satelliteID", satelliteID))
|
||||
}
|
||||
|
||||
if cleanedUp {
|
||||
log.Warn("Satellite already cleaned up", zap.Stringer("satelliteID", satelliteID))
|
||||
continue
|
||||
}
|
||||
|
||||
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sats, err := satelliteDB.GetSatellites(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasUntrusted := false
|
||||
for _, satellite := range sats {
|
||||
if satellite.Status != satellites.Untrusted {
|
||||
continue
|
||||
}
|
||||
hasUntrusted = true
|
||||
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
excludedSatellites[satellite.SatelliteID] = true // true means the satellite has been cleaned up.
|
||||
}
|
||||
|
||||
// clean up excluded satellites that might not be in the satelliteDB cache.
|
||||
for satelliteID, cleanedUp := range excludedSatellites {
|
||||
if !cleanedUp {
|
||||
satellite := satellites.Satellite{
|
||||
SatelliteID: satelliteID,
|
||||
Status: satellites.Untrusted,
|
||||
}
|
||||
hasUntrusted = true
|
||||
err = cleanupSatellite(ctx, log, cfg, db, satellite)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasUntrusted {
|
||||
log.Info("No untrusted satellites found. You can add satellites to the exclusions list in the config.yaml file.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupSatellite(ctx context.Context, log *zap.Logger, cfg *forgetSatelliteCfg, db *storagenodedb.DB, satellite satellites.Satellite) error {
|
||||
if satellite.Status != satellites.Untrusted && !cfg.Force {
|
||||
log.Error("Satellite is not untrusted. Skipping", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Removing satellite from trust cache.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
cache, err := trust.LoadCache(cfg.Storage2.Trust.CachePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deleted := cache.DeleteSatelliteEntry(satellite.SatelliteID)
|
||||
if deleted {
|
||||
if err := cache.Save(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("Satellite removed from trust cache.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
}
|
||||
|
||||
log.Info("Cleaning up satellite data.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
blobs := pieces.NewBlobsUsageCache(log.Named("blobscache"), db.Pieces())
|
||||
if err := blobs.DeleteNamespace(ctx, satellite.SatelliteID.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Cleaning up the trash.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
err = blobs.DeleteTrashNamespace(ctx, satellite.SatelliteID.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Removing satellite info from reputation DB.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
err = db.Reputation().Delete(ctx, satellite.SatelliteID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// delete v0 pieces for the satellite, if any.
|
||||
log.Info("Removing satellite v0 pieces if any.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
err = db.V0PieceInfo().WalkSatelliteV0Pieces(ctx, db.Pieces(), satellite.SatelliteID, func(access pieces.StoredPieceAccess) error {
|
||||
return db.Pieces().Delete(ctx, access.BlobRef())
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Removing satellite from satellites DB.", zap.Stringer("satelliteID", satellite.SatelliteID))
|
||||
err = db.Satellites().DeleteSatellite(ctx, satellite.SatelliteID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
242
cmd/storagenode/cmd_forget_satellite_test.go
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"storj.io/common/identity"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/storagenode/blobstore"
|
||||
"storj.io/storj/storagenode/blobstore/filestore"
|
||||
"storj.io/storj/storagenode/reputation"
|
||||
"storj.io/storj/storagenode/satellites"
|
||||
)
|
||||
|
||||
func Test_newForgetSatelliteCmd_Error(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "no args",
|
||||
args: "",
|
||||
wantErr: "must specify either satellite ID(s) as arguments or --all-untrusted flag",
|
||||
},
|
||||
{
|
||||
name: "Both satellite ID and --all-untrusted flag specified",
|
||||
args: "--all-untrusted 1234567890123456789012345678901234567890123456789012345678901234",
|
||||
wantErr: "cannot specify both satellite IDs and --all-untrusted",
|
||||
},
|
||||
{
|
||||
name: "--all-untrusted and --force specified",
|
||||
args: "--all-untrusted --force",
|
||||
wantErr: "cannot specify both --all-untrusted and --force",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := newForgetSatelliteCmd(&Factory{})
|
||||
cmd.SetArgs(strings.Fields(tt.args))
|
||||
err := cmd.ExecuteContext(testcontext.New(t))
|
||||
if tt.wantErr == "" {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
require.Equal(t, tt.wantErr, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_cmdForgetSatellite(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 2, StorageNodeCount: 1, UplinkCount: 0,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
address := planet.StorageNodes[0].Server.PrivateAddr().String()
|
||||
db := planet.StorageNodes[0].DB
|
||||
log := zaptest.NewLogger(t)
|
||||
|
||||
store, err := filestore.NewAt(log, db.Config().Pieces, filestore.DefaultConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
satelliteID := planet.Satellites[0].ID()
|
||||
|
||||
blobSize := memory.KB
|
||||
blobRef := blobstore.BlobRef{
|
||||
Namespace: satelliteID.Bytes(),
|
||||
Key: testrand.PieceID().Bytes(),
|
||||
}
|
||||
w, err := store.Create(ctx, blobRef, -1)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(testrand.Bytes(blobSize))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Commit(ctx))
|
||||
|
||||
// create a new satellite reputation
|
||||
timestamp := time.Now().UTC()
|
||||
reputationDB := db.Reputation()
|
||||
|
||||
stats := reputation.Stats{
|
||||
SatelliteID: satelliteID,
|
||||
Audit: reputation.Metric{
|
||||
TotalCount: 6,
|
||||
SuccessCount: 7,
|
||||
Alpha: 8,
|
||||
Beta: 9,
|
||||
Score: 10,
|
||||
UnknownAlpha: 11,
|
||||
UnknownBeta: 12,
|
||||
UnknownScore: 13,
|
||||
},
|
||||
OnlineScore: 14,
|
||||
UpdatedAt: timestamp,
|
||||
JoinedAt: timestamp,
|
||||
}
|
||||
err = reputationDB.Store(ctx, stats)
|
||||
require.NoError(t, err)
|
||||
// test that the reputation was stored correctly
|
||||
rstats, err := reputationDB.Get(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rstats)
|
||||
require.Equal(t, stats, *rstats)
|
||||
|
||||
// insert a new untrusted satellite in the database
|
||||
err = db.Satellites().SetAddressAndStatus(ctx, satelliteID, address, satellites.Untrusted)
|
||||
require.NoError(t, err)
|
||||
// test that the satellite was inserted correctly
|
||||
satellite, err := db.Satellites().GetSatellite(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, satellites.Untrusted, satellites.Status(satellite.Status))
|
||||
|
||||
// set up the identity
|
||||
ident := planet.StorageNodes[0].Identity
|
||||
identConfig := identity.Config{
|
||||
CertPath: ctx.File("identity", "identity.cert"),
|
||||
KeyPath: ctx.File("identity", "identity.Key"),
|
||||
}
|
||||
err = identConfig.Save(ident)
|
||||
require.NoError(t, err)
|
||||
planet.StorageNodes[0].Config.Identity = identConfig
|
||||
|
||||
// run the forget satellite command with All flag
|
||||
err = cmdForgetSatellite(ctx, log, &forgetSatelliteCfg{
|
||||
AllUntrusted: true,
|
||||
Config: planet.StorageNodes[0].Config,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that the blob was deleted
|
||||
blobInfo, err := store.Stat(ctx, blobRef)
|
||||
require.Error(t, err)
|
||||
require.True(t, errs.Is(err, os.ErrNotExist))
|
||||
require.Nil(t, blobInfo)
|
||||
// check that the reputation was deleted
|
||||
rstats, err = reputationDB.Get(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &reputation.Stats{SatelliteID: satelliteID}, rstats)
|
||||
// check that the satellite info was deleted from the database
|
||||
satellite, err = db.Satellites().GetSatellite(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, satellite.SatelliteID.IsZero())
|
||||
})
|
||||
}
|
||||
|
||||
func Test_cmdForgetSatellite_Exclusions(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 2, StorageNodeCount: 1, UplinkCount: 0,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
address := planet.StorageNodes[0].Server.PrivateAddr().String()
|
||||
db := planet.StorageNodes[0].DB
|
||||
log := zaptest.NewLogger(t)
|
||||
|
||||
store, err := filestore.NewAt(log, db.Config().Pieces, filestore.DefaultConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
satelliteID := planet.Satellites[0].ID()
|
||||
|
||||
blobSize := memory.KB
|
||||
blobRef := blobstore.BlobRef{
|
||||
Namespace: satelliteID.Bytes(),
|
||||
Key: testrand.PieceID().Bytes(),
|
||||
}
|
||||
w, err := store.Create(ctx, blobRef, -1)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(testrand.Bytes(blobSize))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Commit(ctx))
|
||||
|
||||
// create a new satellite reputation
|
||||
timestamp := time.Now().UTC()
|
||||
reputationDB := db.Reputation()
|
||||
|
||||
stats := reputation.Stats{
|
||||
SatelliteID: satelliteID,
|
||||
Audit: reputation.Metric{
|
||||
TotalCount: 6,
|
||||
SuccessCount: 7,
|
||||
Alpha: 8,
|
||||
Beta: 9,
|
||||
Score: 10,
|
||||
UnknownAlpha: 11,
|
||||
UnknownBeta: 12,
|
||||
UnknownScore: 13,
|
||||
},
|
||||
OnlineScore: 14,
|
||||
UpdatedAt: timestamp,
|
||||
JoinedAt: timestamp,
|
||||
}
|
||||
err = reputationDB.Store(ctx, stats)
|
||||
require.NoError(t, err)
|
||||
// test that the reputation was stored correctly
|
||||
rstats, err := reputationDB.Get(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rstats)
|
||||
require.Equal(t, stats, *rstats)
|
||||
|
||||
// set up the identity
|
||||
ident := planet.StorageNodes[0].Identity
|
||||
identConfig := identity.Config{
|
||||
CertPath: ctx.File("identity", "identity.cert"),
|
||||
KeyPath: ctx.File("identity", "identity.Key"),
|
||||
}
|
||||
err = identConfig.Save(ident)
|
||||
require.NoError(t, err)
|
||||
planet.StorageNodes[0].Config.Identity = identConfig
|
||||
|
||||
// add the satellite to the exclusion list
|
||||
err = planet.StorageNodes[0].Config.Storage2.Trust.Exclusions.Set(satelliteID.String() + "@" + address)
|
||||
require.NoError(t, err)
|
||||
// run the forget satellite command with All flag
|
||||
err = cmdForgetSatellite(ctx, log, &forgetSatelliteCfg{
|
||||
AllUntrusted: true,
|
||||
Config: planet.StorageNodes[0].Config,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that the blob was deleted
|
||||
blobInfo, err := store.Stat(ctx, blobRef)
|
||||
require.Error(t, err)
|
||||
require.True(t, errs.Is(err, os.ErrNotExist))
|
||||
require.Nil(t, blobInfo)
|
||||
// check that the reputation was deleted
|
||||
rstats, err = reputationDB.Get(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &reputation.Stats{SatelliteID: satelliteID}, rstats)
|
||||
// check that the satellite info was deleted from the database
|
||||
satellite, err := db.Satellites().GetSatellite(ctx, satelliteID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, satellite.SatelliteID.IsZero())
|
||||
})
|
||||
}
|
@ -59,6 +59,7 @@ func newRootCmd(setDefaults bool) (*cobra.Command, *Factory) {
|
||||
newIssueAPIKeyCmd(factory),
|
||||
newGracefulExitInitCmd(factory),
|
||||
newGracefulExitStatusCmd(factory),
|
||||
newForgetSatelliteCmd(factory),
|
||||
// internal hidden commands
|
||||
internalcmd.NewUsedSpaceFilewalkerCmd().Command,
|
||||
internalcmd.NewGCFilewalkerCmd().Command,
|
||||
|
@ -65,11 +65,15 @@ func (ce *consoleEndpoints) Token() string {
|
||||
return ce.appendPath("/api/v0/auth/token")
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) GraphQL() string {
|
||||
return ce.appendPath("/api/v0/graphql")
|
||||
func (ce *consoleEndpoints) Projects() string {
|
||||
return ce.appendPath("/api/v0/projects")
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) graphqlDo(request *http.Request, jsonResponse interface{}) error {
|
||||
func (ce *consoleEndpoints) APIKeys() string {
|
||||
return ce.appendPath("/api/v0/api-keys")
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) httpDo(request *http.Request, jsonResponse interface{}) error {
|
||||
resp, err := ce.client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -81,24 +85,24 @@ func (ce *consoleEndpoints) graphqlDo(request *http.Request, jsonResponse interf
|
||||
return err
|
||||
}
|
||||
|
||||
var response struct {
|
||||
Data json.RawMessage
|
||||
Errors []interface{}
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(bytes.NewReader(b)).Decode(&response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if response.Errors != nil {
|
||||
return errs.New("inner graphql error: %v", response.Errors)
|
||||
}
|
||||
|
||||
if jsonResponse == nil {
|
||||
return errs.New("empty response: %q", b)
|
||||
}
|
||||
|
||||
return json.NewDecoder(bytes.NewReader(response.Data)).Decode(jsonResponse)
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return json.NewDecoder(bytes.NewReader(b)).Decode(jsonResponse)
|
||||
}
|
||||
|
||||
var errResponse struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
err = json.NewDecoder(bytes.NewReader(b)).Decode(&errResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errs.New("request failed with status %d: %s", resp.StatusCode, errResponse.Error)
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) createOrGetAPIKey(ctx context.Context) (string, error) {
|
||||
@ -464,49 +468,41 @@ func (ce *consoleEndpoints) getProject(ctx context.Context, token string) (strin
|
||||
request, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
ce.GraphQL(),
|
||||
ce.Projects(),
|
||||
nil)
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
|
||||
q := request.URL.Query()
|
||||
q.Add("query", `query {myProjects{id}}`)
|
||||
request.URL.RawQuery = q.Encode()
|
||||
|
||||
request.AddCookie(&http.Cookie{
|
||||
Name: ce.cookieName,
|
||||
Value: token,
|
||||
})
|
||||
|
||||
request.Header.Add("Content-Type", "application/graphql")
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
|
||||
var getProjects struct {
|
||||
MyProjects []struct {
|
||||
ID string
|
||||
var projects []struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
}
|
||||
if err := ce.graphqlDo(request, &getProjects); err != nil {
|
||||
if err := ce.httpDo(request, &projects); err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
if len(getProjects.MyProjects) == 0 {
|
||||
if len(projects) == 0 {
|
||||
return "", errs.New("no projects")
|
||||
}
|
||||
|
||||
return getProjects.MyProjects[0].ID, nil
|
||||
return projects[0].ID, nil
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) createProject(ctx context.Context, token string) (string, error) {
|
||||
rng := rand.NewSource(time.Now().UnixNano())
|
||||
createProjectQuery := fmt.Sprintf(
|
||||
`mutation {createProject(input:{name:"TestProject-%d",description:""}){id}}`,
|
||||
rng.Int63())
|
||||
body := fmt.Sprintf(`{"name":"TestProject-%d","description":""}`, rng.Int63())
|
||||
|
||||
request, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
ce.GraphQL(),
|
||||
bytes.NewReader([]byte(createProjectQuery)))
|
||||
ce.Projects(),
|
||||
bytes.NewReader([]byte(body)))
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
@ -516,31 +512,27 @@ func (ce *consoleEndpoints) createProject(ctx context.Context, token string) (st
|
||||
Value: token,
|
||||
})
|
||||
|
||||
request.Header.Add("Content-Type", "application/graphql")
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
|
||||
var createProject struct {
|
||||
CreateProject struct {
|
||||
ID string
|
||||
var createdProject struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
}
|
||||
if err := ce.graphqlDo(request, &createProject); err != nil {
|
||||
if err := ce.httpDo(request, &createdProject); err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
|
||||
return createProject.CreateProject.ID, nil
|
||||
return createdProject.ID, nil
|
||||
}
|
||||
|
||||
func (ce *consoleEndpoints) createAPIKey(ctx context.Context, token, projectID string) (string, error) {
|
||||
rng := rand.NewSource(time.Now().UnixNano())
|
||||
createAPIKeyQuery := fmt.Sprintf(
|
||||
`mutation {createAPIKey(projectID:%q,name:"TestKey-%d"){key}}`,
|
||||
projectID, rng.Int63())
|
||||
apiKeyName := fmt.Sprintf("TestKey-%d", rng.Int63())
|
||||
|
||||
request, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
ce.GraphQL(),
|
||||
bytes.NewReader([]byte(createAPIKeyQuery)))
|
||||
ce.APIKeys()+"/create/"+projectID,
|
||||
bytes.NewReader([]byte(apiKeyName)))
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
@ -550,18 +542,16 @@ func (ce *consoleEndpoints) createAPIKey(ctx context.Context, token, projectID s
|
||||
Value: token,
|
||||
})
|
||||
|
||||
request.Header.Add("Content-Type", "application/graphql")
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
|
||||
var createAPIKey struct {
|
||||
CreateAPIKey struct {
|
||||
Key string
|
||||
var createdKey struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
}
|
||||
if err := ce.graphqlDo(request, &createAPIKey); err != nil {
|
||||
if err := ce.httpDo(request, &createdKey); err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
|
||||
return createAPIKey.CreateAPIKey.Key, nil
|
||||
return createdKey.Key, nil
|
||||
}
|
||||
|
||||
func generateActivationKey(userID uuid.UUID, email string, createdAt time.Time) (string, error) {
|
||||
|
@ -255,7 +255,8 @@ func (process *Process) Exec(ctx context.Context, command string) (err error) {
|
||||
|
||||
if _, ok := process.Arguments[command]; !ok {
|
||||
fmt.Fprintf(process.processes.Output, "%s running: %s\n", process.Name, command)
|
||||
return
|
||||
//TODO: This doesn't look right, but keeping the same behaviour as before.
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, executable, process.Arguments[command]...)
|
||||
|
@ -203,12 +203,12 @@ func verifySegments(cmd *cobra.Command, args []string) error {
|
||||
dialer := rpc.NewDefaultDialer(tlsOptions)
|
||||
|
||||
// setup dependencies for verification
|
||||
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), "", "", satelliteCfg.Overlay)
|
||||
overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), overlay.NewPlacementDefinitions().CreateFilters, "", "", satelliteCfg.Overlay)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlay, orders.NewNoopDB(), satelliteCfg.Orders)
|
||||
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlayService, orders.NewNoopDB(), overlay.NewPlacementDefinitions().CreateFilters, satelliteCfg.Orders)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
@ -243,7 +243,7 @@ func verifySegments(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// setup verifier
|
||||
verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig)
|
||||
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlay, serviceConfig)
|
||||
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlayService, serviceConfig)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
)
|
||||
@ -78,7 +79,7 @@ type NodeCheckConfig struct {
|
||||
|
||||
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
|
||||
type NodeCheckOverlayDB interface {
|
||||
IterateAllContactedNodes(context.Context, func(context.Context, *overlay.SelectedNode) error) error
|
||||
IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
|
||||
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/satellite/audit"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
)
|
||||
|
||||
@ -46,7 +47,7 @@ type Verifier interface {
|
||||
type Overlay interface {
|
||||
// Get looks up the node by nodeID
|
||||
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
|
||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error)
|
||||
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
|
||||
}
|
||||
|
||||
// SegmentWriter allows writing segments to some output.
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
segmentverify "storj.io/storj/cmd/tools/segment-verify"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
)
|
||||
|
||||
@ -344,10 +345,10 @@ func (db *metabaseMock) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error) {
|
||||
var xs []*overlay.SelectedNode
|
||||
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error) {
|
||||
var xs []*nodeselection.SelectedNode
|
||||
for nodeID := range db.nodeIDToAlias {
|
||||
xs = append(xs, &overlay.SelectedNode{
|
||||
xs = append(xs, &nodeselection.SelectedNode{
|
||||
ID: nodeID,
|
||||
Address: &pb.NodeAddress{
|
||||
Address: fmt.Sprintf("nodeid:%v", nodeID),
|
||||
|
201
cmd/tools/tag-signer/main.go
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/identity"
|
||||
"storj.io/common/nodetag"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/signing"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/private/process"
|
||||
)
|
||||
|
||||
var (
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "tag-signer",
|
||||
Short: "Sign key=value pairs with identity",
|
||||
Long: "Node tags are arbitrary key value pairs signed by an authority. If the public key is configured on " +
|
||||
"Satellite side, Satellite will check the signatures and save the tags, which can be used (for example)" +
|
||||
" during node selection. Storagenodes can be configured to send encoded node tags to the Satellite. " +
|
||||
"This utility helps creating/managing the values of this specific configuration value, which is encoded by default.",
|
||||
}
|
||||
|
||||
signCmd = &cobra.Command{
|
||||
Use: "sign <key=value> <key2=value> ...",
|
||||
Short: "Create signed tagset",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
encoded, err := signTags(ctx, config, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(encoded)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
inspectCmd = &cobra.Command{
|
||||
Use: "inspect <encoded string>",
|
||||
Short: "Print out the details from an encoded node set",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
return inspect(ctx, args[0])
|
||||
},
|
||||
}
|
||||
|
||||
config Config
|
||||
)
|
||||
|
||||
// Config contains configuration required for signing.
|
||||
type Config struct {
|
||||
IdentityDir string `help:"location if the identity files" path:"true"`
|
||||
NodeID string `help:"the ID of the node, which will used this tag "`
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(signCmd)
|
||||
rootCmd.AddCommand(inspectCmd)
|
||||
process.Bind(signCmd, &config)
|
||||
}
|
||||
|
||||
func signTags(ctx context.Context, cfg Config, tagPairs []string) (string, error) {
|
||||
|
||||
if cfg.IdentityDir == "" {
|
||||
return "", errs.New("Please specify the identity, used as a signer with --identity-dir")
|
||||
}
|
||||
|
||||
if cfg.NodeID == "" {
|
||||
return "", errs.New("Please specify the --node-id")
|
||||
}
|
||||
|
||||
identityConfig := identity.Config{
|
||||
CertPath: filepath.Join(cfg.IdentityDir, "identity.cert"),
|
||||
KeyPath: filepath.Join(cfg.IdentityDir, "identity.key"),
|
||||
}
|
||||
|
||||
fullIdentity, err := identityConfig.Load()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signer := signing.SignerFromFullIdentity(fullIdentity)
|
||||
|
||||
nodeID, err := storj.NodeIDFromString(cfg.NodeID)
|
||||
if err != nil {
|
||||
return "", errs.New("Wrong NodeID format: %v", err)
|
||||
}
|
||||
tagSet := &pb.NodeTagSet{
|
||||
NodeId: nodeID.Bytes(),
|
||||
SignedAt: time.Now().Unix(),
|
||||
}
|
||||
|
||||
for _, tag := range tagPairs {
|
||||
tag = strings.TrimSpace(tag)
|
||||
if len(tag) == 0 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(tag, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", errs.New("tags should be in KEY=VALUE format, but it was %s", tag)
|
||||
}
|
||||
tagSet.Tags = append(tagSet.Tags, &pb.Tag{
|
||||
Name: parts[0],
|
||||
Value: []byte(parts[1]),
|
||||
})
|
||||
}
|
||||
|
||||
signedMessage, err := nodetag.Sign(ctx, tagSet, signer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
all := &pb.SignedNodeTagSets{
|
||||
Tags: []*pb.SignedNodeTagSet{
|
||||
signedMessage,
|
||||
},
|
||||
}
|
||||
|
||||
raw, err := proto.Marshal(all)
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(raw), nil
|
||||
}
|
||||
|
||||
func inspect(ctx context.Context, s string) error {
|
||||
raw, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return errs.New("Input is not in base64 format")
|
||||
}
|
||||
|
||||
sets := &pb.SignedNodeTagSets{}
|
||||
err = proto.Unmarshal(raw, sets)
|
||||
if err != nil {
|
||||
return errs.New("Input is not a protobuf encoded *pb.SignedNodeTagSets message")
|
||||
}
|
||||
|
||||
for _, msg := range sets.Tags {
|
||||
|
||||
signerNodeID, err := storj.NodeIDFromBytes(msg.SignerNodeId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Signer: ", signerNodeID.String())
|
||||
fmt.Println("Signature: ", hex.EncodeToString(msg.Signature))
|
||||
|
||||
tags := &pb.NodeTagSet{}
|
||||
err = proto.Unmarshal(msg.SerializedTag, tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeID, err := storj.NodeIDFromBytes(tags.NodeId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("SignedAt: ", time.Unix(tags.SignedAt, 0).Format(time.RFC3339))
|
||||
fmt.Println("NodeID: ", nodeID.String())
|
||||
fmt.Println("Tags:")
|
||||
for _, tag := range tags.Tags {
|
||||
fmt.Printf(" %s=%s\n", tag.Name, string(tag.Value))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
process.ExecWithCustomOptions(rootCmd, process.ExecOptions{
|
||||
LoadConfig: func(cmd *cobra.Command, vip *viper.Viper) error {
|
||||
return nil
|
||||
},
|
||||
InitTracing: false,
|
||||
LoggerFactory: func(logger *zap.Logger) *zap.Logger {
|
||||
newLogger, level, err := process.NewLogger("tag-signer")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
level.SetLevel(zap.WarnLevel)
|
||||
return newLogger
|
||||
},
|
||||
})
|
||||
}
|
@ -29,6 +29,8 @@ type accessPermissions struct {
|
||||
|
||||
notBefore *time.Time
|
||||
notAfter *time.Time
|
||||
|
||||
maxObjectTTL *time.Duration
|
||||
}
|
||||
|
||||
func (ap *accessPermissions) Setup(params clingy.Parameters, prefixFlags bool) {
|
||||
@ -65,6 +67,12 @@ func (ap *accessPermissions) Setup(params clingy.Parameters, prefixFlags bool) {
|
||||
"Disallow access after this time (e.g. '+2h', 'now', '2020-01-02T15:04:05Z0700', 'none')",
|
||||
nil, clingy.Transform(parseHumanDateNotAfter), clingy.Type("relative_date"), clingy.Optional).(*time.Time)
|
||||
|
||||
params.Break()
|
||||
|
||||
ap.maxObjectTTL = params.Flag("max-object-ttl",
|
||||
"The object is automatically deleted after this period. (e.g. '1h30m', '24h', '720h')",
|
||||
nil, clingy.Transform(time.ParseDuration), clingy.Type("period"), clingy.Optional).(*time.Duration)
|
||||
|
||||
if !prefixFlags {
|
||||
ap.prefixes = params.Arg("prefix", "Key prefix access will be restricted to",
|
||||
clingy.Transform(ulloc.Parse),
|
||||
@ -93,6 +101,7 @@ func (ap *accessPermissions) Apply(access *uplink.Access) (*uplink.Access, error
|
||||
AllowUpload: ap.AllowUpload(),
|
||||
NotBefore: ap.NotBefore(),
|
||||
NotAfter: ap.NotAfter(),
|
||||
MaxObjectTTL: ap.MaxObjectTTL(),
|
||||
}
|
||||
|
||||
// if we aren't actually restricting anything, then we don't need to Share.
|
||||
@ -126,3 +135,4 @@ func (ap *accessPermissions) AllowDelete() bool { return !defaulted(ap.disall
|
||||
func (ap *accessPermissions) AllowList() bool { return !defaulted(ap.disallowLists, ap.writeonly) }
|
||||
func (ap *accessPermissions) AllowDownload() bool { return !defaulted(ap.disallowReads, ap.writeonly) }
|
||||
func (ap *accessPermissions) AllowUpload() bool { return !defaulted(ap.disallowWrites, ap.readonly) }
|
||||
func (ap *accessPermissions) MaxObjectTTL() *time.Duration { return ap.maxObjectTTL }
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/zeebo/clingy"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/cmd/uplink/ulext"
|
||||
)
|
||||
@ -33,7 +32,7 @@ func (c *cmdAccessUse) Execute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
if _, ok := accesses[c.access]; !ok {
|
||||
return errs.New("unknown access: %q", c.access)
|
||||
return fmt.Errorf("ERROR: access %q does not exist. Use 'uplink access list' to see existing accesses", c.access)
|
||||
}
|
||||
if err := c.ex.SaveAccessInfo(c.access, accesses); err != nil {
|
||||
return err
|
||||
|
@ -85,8 +85,7 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
||||
).(bool)
|
||||
c.byteRange = params.Flag("range", "Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35", "").(string)
|
||||
|
||||
parallelism := params.Flag("parallelism", "Controls how many parallel chunks to upload/download from a file", nil,
|
||||
clingy.Optional,
|
||||
c.parallelism = params.Flag("parallelism", "Controls how many parallel parts to upload/download from a file", 1,
|
||||
clingy.Short('p'),
|
||||
clingy.Transform(strconv.Atoi),
|
||||
clingy.Transform(func(n int) (int, error) {
|
||||
@ -95,8 +94,8 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
||||
}
|
||||
return n, nil
|
||||
}),
|
||||
).(*int)
|
||||
c.parallelismChunkSize = params.Flag("parallelism-chunk-size", "Set the size of the chunks for parallelism, 0 means automatic adjustment", memory.Size(0),
|
||||
).(int)
|
||||
c.parallelismChunkSize = params.Flag("parallelism-chunk-size", "Set the size of the parts for parallelism, 0 means automatic adjustment", memory.Size(0),
|
||||
clingy.Transform(memory.ParseString),
|
||||
clingy.Transform(func(n int64) (memory.Size, error) {
|
||||
if n < 0 {
|
||||
@ -107,17 +106,16 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
||||
).(memory.Size)
|
||||
|
||||
c.uploadConfig = testuplink.DefaultConcurrentSegmentUploadsConfig()
|
||||
maxConcurrent := params.Flag(
|
||||
c.uploadConfig.SchedulerOptions.MaximumConcurrent = params.Flag(
|
||||
"maximum-concurrent-pieces",
|
||||
"Maximum concurrent pieces to upload at once per transfer",
|
||||
nil,
|
||||
clingy.Optional,
|
||||
"Maximum concurrent pieces to upload at once per part",
|
||||
c.uploadConfig.SchedulerOptions.MaximumConcurrent,
|
||||
clingy.Transform(strconv.Atoi),
|
||||
clingy.Advanced,
|
||||
).(*int)
|
||||
).(int)
|
||||
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles = params.Flag(
|
||||
"maximum-concurrent-segments",
|
||||
"Maximum concurrent segments to upload at once per transfer",
|
||||
"Maximum concurrent segments to upload at once per part",
|
||||
c.uploadConfig.SchedulerOptions.MaximumConcurrentHandles,
|
||||
clingy.Transform(strconv.Atoi),
|
||||
clingy.Advanced,
|
||||
@ -133,28 +131,6 @@ func (c *cmdCp) Setup(params clingy.Parameters) {
|
||||
clingy.Advanced,
|
||||
).(string)
|
||||
|
||||
{ // handle backwards compatibility around parallelism and maximum concurrent pieces
|
||||
addr := func(x int) *int { return &x }
|
||||
|
||||
switch {
|
||||
// if neither are actively set, use defaults
|
||||
case parallelism == nil && maxConcurrent == nil:
|
||||
parallelism = addr(1)
|
||||
maxConcurrent = addr(c.uploadConfig.SchedulerOptions.MaximumConcurrent)
|
||||
|
||||
// if parallelism is not set, use a value based on maxConcurrent
|
||||
case parallelism == nil:
|
||||
parallelism = addr((*maxConcurrent + 99) / 100)
|
||||
|
||||
// if maxConcurrent is not set, use a value based on parallelism
|
||||
case maxConcurrent == nil:
|
||||
maxConcurrent = addr(100 * *parallelism)
|
||||
}
|
||||
|
||||
c.uploadConfig.SchedulerOptions.MaximumConcurrent = *maxConcurrent
|
||||
c.parallelism = *parallelism
|
||||
}
|
||||
|
||||
c.inmemoryEC = params.Flag("inmemory-erasure-coding", "Keep erasure-coded pieces in-memory instead of writing them on the disk during upload", false,
|
||||
clingy.Transform(strconv.ParseBool),
|
||||
clingy.Boolean,
|
||||
@ -194,9 +170,10 @@ func (c *cmdCp) Execute(ctx context.Context) error {
|
||||
fs, err := c.ex.OpenFilesystem(ctx, c.access,
|
||||
ulext.ConcurrentSegmentUploadsConfig(c.uploadConfig),
|
||||
ulext.ConnectionPoolOptions(rpcpool.Options{
|
||||
// Add a bit more capacity for connections to the satellite
|
||||
Capacity: c.uploadConfig.SchedulerOptions.MaximumConcurrent + 5,
|
||||
KeyCapacity: 5,
|
||||
// Allow at least as many connections as the maximum concurrent pieces per
|
||||
// parallel part per transfer, plus a few extra for the satellite.
|
||||
Capacity: c.transfers*c.parallelism*c.uploadConfig.SchedulerOptions.MaximumConcurrent + 5,
|
||||
KeyCapacity: 2,
|
||||
IdleExpiration: 2 * time.Minute,
|
||||
}))
|
||||
if err != nil {
|
||||
@ -419,17 +396,6 @@ func (c *cmdCp) copyFile(ctx context.Context, fs ulfs.Filesystem, source, dest u
|
||||
}
|
||||
defer func() { _ = mwh.Abort(ctx) }()
|
||||
|
||||
// if we're uploading, do a single part of maximum size
|
||||
if dest.Remote() {
|
||||
return errs.Wrap(c.singleCopy(
|
||||
ctx,
|
||||
source, dest,
|
||||
mrh, mwh,
|
||||
offset, length,
|
||||
bar,
|
||||
))
|
||||
}
|
||||
|
||||
partSize, err := c.calculatePartSize(mrh.Length(), c.parallelismChunkSize.Int64())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -448,13 +414,15 @@ func (c *cmdCp) copyFile(ctx context.Context, fs ulfs.Filesystem, source, dest u
|
||||
// calculatePartSize returns the needed part size in order to upload the file with size of 'length'.
|
||||
// It hereby respects if the client requests/prefers a certain size and only increases if needed.
|
||||
func (c *cmdCp) calculatePartSize(length, preferredSize int64) (requiredSize int64, err error) {
|
||||
segC := (length / maxPartCount / (memory.MiB * 64).Int64()) + 1
|
||||
requiredSize = segC * (memory.MiB * 64).Int64()
|
||||
segC := (length / maxPartCount / memory.GiB.Int64()) + 1
|
||||
requiredSize = segC * memory.GiB.Int64()
|
||||
switch {
|
||||
case preferredSize == 0:
|
||||
return requiredSize, nil
|
||||
case requiredSize <= preferredSize:
|
||||
return preferredSize, nil
|
||||
case length < 0: // let the user pick their size if we don't have a length to know better
|
||||
return preferredSize, nil
|
||||
default:
|
||||
return 0, errs.New(fmt.Sprintf("the specified chunk size %s is too small, requires %s or larger",
|
||||
memory.FormatBytes(preferredSize), memory.FormatBytes(requiredSize)))
|
||||
@ -535,8 +503,8 @@ func (c *cmdCp) parallelCopy(
|
||||
}
|
||||
|
||||
var readBufs *ulfs.BytesPool
|
||||
if p > 1 && chunkSize > 0 && (source.Std() || dest.Std()) {
|
||||
// Create the read buffer pool only for uploads from stdin and downloads to stdout with parallelism > 1.
|
||||
if p > 1 && chunkSize > 0 && source.Std() {
|
||||
// Create the read buffer pool only for uploads from stdin with parallelism > 1.
|
||||
readBufs = ulfs.NewBytesPool(int(chunkSize))
|
||||
}
|
||||
|
||||
@ -557,6 +525,14 @@ func (c *cmdCp) parallelCopy(
|
||||
break
|
||||
}
|
||||
|
||||
if i == 0 && bar != nil {
|
||||
info, err := src.Info(ctx)
|
||||
if err == nil {
|
||||
bar.SetTotal(info.ContentLength, false)
|
||||
bar.EnableTriggerComplete()
|
||||
}
|
||||
}
|
||||
|
||||
wh, err := dst.NextPart(ctx, chunk)
|
||||
if err != nil {
|
||||
_ = rh.Close()
|
||||
@ -578,12 +554,8 @@ func (c *cmdCp) parallelCopy(
|
||||
|
||||
var w io.Writer = wh
|
||||
if bar != nil {
|
||||
bar.SetTotal(rh.Info().ContentLength, false)
|
||||
bar.EnableTriggerComplete()
|
||||
pw := bar.ProxyWriter(w)
|
||||
defer func() {
|
||||
_ = pw.Close()
|
||||
}()
|
||||
defer func() { _ = pw.Close() }()
|
||||
w = pw
|
||||
}
|
||||
|
||||
@ -619,59 +591,6 @@ func (c *cmdCp) parallelCopy(
|
||||
return errs.Wrap(combineErrs(es))
|
||||
}
|
||||
|
||||
func (c *cmdCp) singleCopy(
|
||||
ctx context.Context,
|
||||
source, dest ulloc.Location,
|
||||
src ulfs.MultiReadHandle,
|
||||
dst ulfs.MultiWriteHandle,
|
||||
offset, length int64,
|
||||
bar *mpb.Bar) error {
|
||||
|
||||
if offset != 0 {
|
||||
if err := src.SetOffset(offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
rh, err := src.NextPart(ctx, length)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
defer func() { _ = rh.Close() }()
|
||||
|
||||
wh, err := dst.NextPart(ctx, length)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
defer func() { _ = wh.Abort() }()
|
||||
|
||||
var w io.Writer = wh
|
||||
if bar != nil {
|
||||
bar.SetTotal(rh.Info().ContentLength, false)
|
||||
bar.EnableTriggerComplete()
|
||||
pw := bar.ProxyWriter(w)
|
||||
defer func() { _ = pw.Close() }()
|
||||
w = pw
|
||||
}
|
||||
|
||||
if _, err := sync2.Copy(ctx, w, rh); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if err := wh.Commit(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if err := dst.Commit(ctx); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newProgressBar(progress *mpb.Progress, name string, which, total int) *mpb.Bar {
|
||||
const counterFmt = " % .2f / % .2f"
|
||||
const percentageFmt = "%.2f "
|
||||
|
@ -99,46 +99,51 @@ func TestCpDownload(t *testing.T) {
|
||||
func TestCpPartSize(t *testing.T) {
|
||||
c := newCmdCp(nil)
|
||||
|
||||
// 1GiB file, should return 64MiB
|
||||
partSize, err := c.calculatePartSize(memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
// 10 GiB file, should return 1 GiB
|
||||
partSize, err := c.calculatePartSize(10*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*64, partSize)
|
||||
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||
|
||||
// 640 GB file, should return 64MiB.
|
||||
partSize, err = c.calculatePartSize(memory.GB.Int64()*640, c.parallelismChunkSize.Int64())
|
||||
// 10000 GB file, should return 1 GiB.
|
||||
partSize, err = c.calculatePartSize(10000*memory.GB.Int64(), c.parallelismChunkSize.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*64, partSize)
|
||||
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||
|
||||
// 640GiB file, should return 128MiB.
|
||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*640, c.parallelismChunkSize.Int64())
|
||||
// 10000 GiB file, should return 2 GiB.
|
||||
partSize, err = c.calculatePartSize(10000*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*128, partSize)
|
||||
require.EqualValues(t, 2*memory.GiB, partSize)
|
||||
|
||||
// 1TiB file, should return 128MiB.
|
||||
partSize, err = c.calculatePartSize(memory.TiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
// 10 TiB file, should return 2 GiB.
|
||||
partSize, err = c.calculatePartSize(10*memory.TiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*128, partSize)
|
||||
require.EqualValues(t, 2*memory.GiB, partSize)
|
||||
|
||||
// 1.3TiB file, should return 192MiB.
|
||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, c.parallelismChunkSize.Int64())
|
||||
// 20001 GiB file, should return 3 GiB.
|
||||
partSize, err = c.calculatePartSize(20001*memory.GiB.Int64(), c.parallelismChunkSize.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*192, partSize)
|
||||
require.EqualValues(t, 3*memory.GiB, partSize)
|
||||
|
||||
// should return 1GiB as requested.
|
||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, memory.GiB.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.GiB, partSize)
|
||||
|
||||
// should return 192 MiB and error, since preferred is too low.
|
||||
partSize, err = c.calculatePartSize(memory.GiB.Int64()*1300, memory.MiB.Int64())
|
||||
// should return 1 GiB and error, since preferred is too low.
|
||||
partSize, err = c.calculatePartSize(1300*memory.GiB.Int64(), memory.MiB.Int64())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "the specified chunk size 1.0 MiB is too small, requires 192.0 MiB or larger", err.Error())
|
||||
require.Equal(t, "the specified chunk size 1.0 MiB is too small, requires 1.0 GiB or larger", err.Error())
|
||||
require.Zero(t, partSize)
|
||||
|
||||
// negative length should return 64MiB part size
|
||||
partSize, err = c.calculatePartSize(-1, c.parallelismChunkSize.Int64())
|
||||
// negative length should return asked for amount
|
||||
partSize, err = c.calculatePartSize(-1, 1*memory.GiB.Int64())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, memory.MiB*64, partSize)
|
||||
require.EqualValues(t, 1*memory.GiB, partSize)
|
||||
|
||||
// negative length should return specified amount
|
||||
partSize, err = c.calculatePartSize(-1, 100)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 100, partSize)
|
||||
}
|
||||
|
||||
func TestCpUpload(t *testing.T) {
|
||||
|
@ -110,6 +110,7 @@ func (c *cmdShare) Execute(ctx context.Context) error {
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "Deletes : %s\n", formatPermission(c.ap.AllowDelete()))
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "NotBefore : %s\n", formatTimeRestriction(c.ap.NotBefore()))
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "NotAfter : %s\n", formatTimeRestriction(c.ap.NotAfter()))
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "MaxObjectTTL : %s\n", formatDuration(c.ap.maxObjectTTL))
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "Paths : %s\n", formatPaths(c.ap.prefixes))
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========\n")
|
||||
fmt.Fprintf(clingy.Stdout(ctx), "Access : %s\n", newAccessData)
|
||||
@ -182,6 +183,13 @@ func formatTimeRestriction(t time.Time) string {
|
||||
return formatTime(true, t)
|
||||
}
|
||||
|
||||
func formatDuration(d *time.Duration) string {
|
||||
if d == nil {
|
||||
return "Not set"
|
||||
}
|
||||
return d.String()
|
||||
}
|
||||
|
||||
func formatPaths(sharePrefixes []uplink.SharePrefix) string {
|
||||
if len(sharePrefixes) == 0 {
|
||||
return "WARNING! The entire project is shared!"
|
||||
|
@ -39,6 +39,7 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
@ -57,6 +58,7 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
@ -75,6 +77,7 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
@ -93,6 +96,7 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
@ -122,13 +126,14 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("share access with --not-after time restriction parameter", func(t *testing.T) {
|
||||
t.Run("share access with --not-after", func(t *testing.T) {
|
||||
state := ultest.Setup(commands)
|
||||
|
||||
state.Succeed(t, "share", "--not-after", "2022-01-01T15:01:01-01:00", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||
@ -140,6 +145,26 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : 2022-01-01 16:01:01
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("share access with --max-object-ttl", func(t *testing.T) {
|
||||
state := ultest.Setup(commands)
|
||||
|
||||
state.Succeed(t, "share", "--max-object-ttl", "720h", "--readonly=false", "sj://some/prefix").RequireStdoutGlob(t, `
|
||||
Sharing access to satellite *
|
||||
=========== ACCESS RESTRICTIONS ==========================================================
|
||||
Download : Allowed
|
||||
Upload : Allowed
|
||||
Lists : Allowed
|
||||
Deletes : Allowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : 720h0m0s
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
@ -190,6 +215,7 @@ func TestShare(t *testing.T) {
|
||||
Deletes : Disallowed
|
||||
NotBefore : No restriction
|
||||
NotAfter : No restriction
|
||||
MaxObjectTTL : Not set
|
||||
Paths : sj://some/prefix
|
||||
=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========
|
||||
Access : *
|
||||
|
@ -43,6 +43,16 @@ func (ex *external) OpenProject(ctx context.Context, accessName string, options
|
||||
UserAgent: uplinkCLIUserAgent,
|
||||
}
|
||||
|
||||
userAgents, err := ex.Dynamic("client.user-agent")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(userAgents) > 0 {
|
||||
if ua := userAgents[len(userAgents)-1]; ua != "" {
|
||||
config.UserAgent = ua
|
||||
}
|
||||
}
|
||||
|
||||
if opts.ConnectionPoolOptions != (rpcpool.Options{}) {
|
||||
if err := transport.SetConnectionPool(ctx, &config, rpcpool.New(opts.ConnectionPoolOptions)); err != nil {
|
||||
return nil, err
|
||||
|
273
docs/blueprints/certified-nodes.md
Normal file
@ -0,0 +1,273 @@
|
||||
# Node and operator certification
|
||||
|
||||
## Abstract
|
||||
|
||||
This is a proposal for a small feature and service that allows for nodes and
|
||||
operators to have signed tags of certain kinds for use in project-specific or
|
||||
Satellite-specific node selection.
|
||||
|
||||
## Background/context
|
||||
|
||||
We have a couple of ongoing needs:
|
||||
|
||||
* 1099 KYC
|
||||
* Private storage node networks
|
||||
* SOC2/HIPAA/etc node certification
|
||||
* Voting and operator signaling
|
||||
|
||||
### 1099 KYC
|
||||
|
||||
The United States has a rule that if node operators earn more than $600/year,
|
||||
we need to file a 1099 for each of them. Our current way of dealing with this
|
||||
is manual and time consuming, and so it would be nice to automate it.
|
||||
|
||||
Ultimately, we should be able to automatically:
|
||||
|
||||
1) keep track of which nodes are run by operators under or over the $600
|
||||
threshold.
|
||||
2) keep track of if an automated KYC service has signed off that we have the
|
||||
necessary information to file a 1099.
|
||||
3) automatically suspend nodes that have earned more than $600 but have not
|
||||
provided legally required information.
|
||||
|
||||
### Private storage node networks
|
||||
|
||||
We have seen growing interest from customers that want to bring their own
|
||||
hard drives, or be extremely choosy about the nodes they are willing to work
|
||||
with. The current way we are solving this is spinning up private Satellites
|
||||
that are configured to only work with the nodes those customers provide, but
|
||||
it would be better if we didn't have to start custom Satellites for this.
|
||||
|
||||
Instead, it would be nice to have a per-project configuration on an existing
|
||||
Satellite that allowed that project to specify a specific subset of verified
|
||||
or validated nodes, e.g., Project A should be able to say only nodes from
|
||||
node providers B and C should be selected. Symmetrically, Nodes from providers
|
||||
B and C may only want to accept data from certain projects, like Project A.
|
||||
|
||||
When nodes from providers B and C are added to the Satellite, they should be
|
||||
able to provide a provider-specific signature, and requirements about
|
||||
customer-specific requirements, if any.
|
||||
|
||||
### SOC2/HIPAA/etc node certification
|
||||
|
||||
This is actually just a slightly different shape of the private storage node
|
||||
network problem, but instead of being provider-specific, it is property
|
||||
specific.
|
||||
|
||||
Perhaps Project D has a compliance requirement. They can only store data
|
||||
on nodes that meet specific requirements.
|
||||
|
||||
Node operators E and F are willing to conform and attest to these compliance
|
||||
requirements, but don't know about project D. It would be nice if Node
|
||||
operators E and F could navigate to a compliance portal and see a list of
|
||||
potential compliance attestations available. For possible compliance
|
||||
attestations, node operators could sign agreements for these, and then receive
|
||||
a verified signature that shows their selected compliance options.
|
||||
|
||||
Then, Project D's node selection process would filter by nodes that had been
|
||||
approved for the necessary compliance requirements.
|
||||
|
||||
### Voting and operator signaling
|
||||
|
||||
As Satellite operators ourselves, we are currently engaged in a discussion about
|
||||
pricing changes with storage node operators. Future Satellite operators may find
|
||||
themselves in similar situations. It would be nice if storage node operators
|
||||
could indicate votes for values. This would potentially be more representative
|
||||
of network sentiment than posts on a forum.
|
||||
|
||||
Note that this isn't a transparent voting scheme, where other voters can see
|
||||
the votes made, so this may not be a great voting solution in general.
|
||||
|
||||
## Design and implementation
|
||||
|
||||
I believe there are two basic building blocks that solves all of the above
|
||||
issues:
|
||||
|
||||
* Signed node tags (with potential values)
|
||||
* A document signing service
|
||||
|
||||
### Signed node tags
|
||||
|
||||
The network representation:
|
||||
|
||||
```
|
||||
message Tag {
|
||||
// Note that there is a signal flat namespace of all names per
|
||||
// signer node id. Signers should be careful to make sure that
|
||||
// there are no name collisions. For self-signed content-hash
|
||||
// based values, the name should have the prefix of the content
|
||||
// hash.
|
||||
string name = 1;
|
||||
bytes value = 2; // optional, representation dependent on name.
|
||||
}
|
||||
|
||||
message TagSet {
|
||||
// must always be set. this is the node the signer is signing for.
|
||||
bytes node_id = 1;
|
||||
|
||||
repeated Tag tags = 2;
|
||||
|
||||
// must always be set. this makes sure the signature is signing the
|
||||
// timestamp inside.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
message SignedTagSet {
|
||||
// this is the seralized form of TagSet, serialized so that
|
||||
// the signature process has something stable to work with.
|
||||
bytes serialized_tag = 1;
|
||||
|
||||
// this is who signed (could be self signed, could be well known).
|
||||
bytes signer_node_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message SignedTagSets {
|
||||
repeated SignedTagSet tags = 1;
|
||||
}
|
||||
```
|
||||
|
||||
Note that every tag is signing a name/value pair (value optional) against
|
||||
a specific node id.
|
||||
|
||||
Note also that names are only unique within the namespace of a given signer.
|
||||
|
||||
The database representation on the Satellite. N.B.: nothing should be entered
|
||||
into this database without validation:
|
||||
|
||||
```
|
||||
model signed_tags (
|
||||
field node_id blob
|
||||
field name text
|
||||
field value blob
|
||||
field timestamp int64
|
||||
field signer_node_id blob
|
||||
)
|
||||
```
|
||||
|
||||
The "signer_node_id" is worth more explanation. Every signer should have a
|
||||
stable node id. Satellites and storage nodes already have one, but any other
|
||||
service that validates node tags would also need one.
|
||||
In particular, the document signing service (below) would have its own unique
|
||||
node id for signing tags, whereas for voting-style tags or tags based on a
|
||||
content-addressed identifier (e.g. a hash of a document), the nodes would
|
||||
self-sign.
|
||||
|
||||
### Document signing service
|
||||
|
||||
We would start a small web service, where users can log in and sign and fill
|
||||
out documents. This web service would then create a unique activation code
|
||||
that storage node operators could run on their storage nodes for activation and
|
||||
signing. They could run `storagenode activate <code>` and then the node would
|
||||
reach out to the signing service and get a `SignedTag` related to that node
|
||||
given the information the user provided. The node could then present these
|
||||
to the satellite.
|
||||
|
||||
Ultimately, the document signing service will require a separate design doc,
|
||||
but here are some considerations for it:
|
||||
|
||||
Activation codes must expire shortly. Even Netflix has two hours of validity
|
||||
for their service code - for a significantly less critical use case. What would
|
||||
be a usable validity time for our use case? 15 minutes? 1 hour? Should we make
|
||||
it configurable?
|
||||
|
||||
We want to still keep usability in mind for a SNO who needs to activate 500
|
||||
nodes.
|
||||
|
||||
It would be even better if the SNO could force invalidating the activation code
|
||||
when they are done with it.
|
||||
|
||||
As activation codes expire, the SNO should be able to generate a new activation
|
||||
code if they want to associate a new node to an already signed document.
|
||||
|
||||
It should be hard to brute-force activation codes. They shouldn't be simple
|
||||
numbers (4-digit or 6-digit) but something as complex as UUID.
|
||||
|
||||
It's also possible that SNO uses some signature mechanism during signing service
|
||||
authentication, and the same signature is used for activation. If the same
|
||||
signature mechanism is used during activation then no token is necessary.
|
||||
|
||||
### Update node selection
|
||||
|
||||
Once the above two building blocks exist, many problems become much more easily
|
||||
solvable.
|
||||
|
||||
We would want to extend node selection to be able to do queries,
|
||||
given project-specific configuration, based on these signed_tag values.
|
||||
|
||||
Because node selection mostly happens in memory from cached node table data,
|
||||
it should be easy to add some denormalized data for certain selected cases,
|
||||
such as:
|
||||
|
||||
* Document hashes nodes have self signed.
|
||||
* Approval states based on well known third party signer nodes (a KYC service).
|
||||
|
||||
Once these fields exist, then node selection can happen as before, filtering
|
||||
for the appropriate value given project settings.
|
||||
|
||||
## How these building blocks work for the example use cases
|
||||
|
||||
### 1099 KYC
|
||||
|
||||
The document signing service would have a KYC (Know Your Customer) form. Once
|
||||
filled out, the document signing service would make a `TagSet` that includes all
|
||||
of the answers to the KYC questions, for the given node id, signed by the
|
||||
document signing service's node id.
|
||||
|
||||
The node would hang on to this `SignedTagSet` and submit it along with others
|
||||
in a `SignedTagSets` to Satellites occasionally (maybe once a month during
|
||||
node CheckIn).
|
||||
|
||||
### Private storage node networks
|
||||
|
||||
Storage node provisioning would provide nodes with a signed `SignedTagSet`
|
||||
from a provisioning service that had its own node id. Then a private Satellite
|
||||
could be configured to require that all nodes present a `SignedTagSet` signed
|
||||
by the configured provisioning service that has that node's id in it.
|
||||
|
||||
Notably - this functionality could also be solved by the older waitlist node
|
||||
identity signing certificate process, but we are slowly removing what remains
|
||||
of that feature over time.
|
||||
|
||||
This functionality could also be solved by setting the Satellite's minimum
|
||||
allowable node id difficulty to the maximum possible difficulty, thus preventing
|
||||
any automatic node registration, and manually inserting node ids into the
|
||||
database. This is what we are currently doing for private network trials, but
|
||||
if `SignedTagSet`s existed, that would be easier.
|
||||
|
||||
### SOC2/HIPAA/etc node certification
|
||||
|
||||
For any type of document that doesn't require any third party service
|
||||
(such as government id validation, etc), the document and its fields can be
|
||||
filled out and self signed by the node, along with a content hash of the
|
||||
document in question.
|
||||
|
||||
The node would create a `TagSet`, where one field is the hash of the legal
|
||||
document that was agreed upon, and the remaining fields (with names prefixed
|
||||
by the document's content hash) would be form fields
|
||||
that the node operator filled in and ascribed to the document. Then, the
|
||||
`TagSet` would be signed by the node itself. The cryptographic nature of the
|
||||
content hash inside the `TagSet` would validate what the node operator had
|
||||
agreed to.
|
||||
|
||||
### Voting and operator signaling
|
||||
|
||||
Node operators could self sign additional `Tag`s inside of a miscellaneous
|
||||
`TagSet`, including `Tag`s such as
|
||||
|
||||
```
|
||||
"storage-node-vote-20230611-network-change": "yes"
|
||||
```
|
||||
|
||||
Or similar.
|
||||
|
||||
## Open problems
|
||||
|
||||
* Revocation? - `TagSets` have a timestamp inside that must be filled out. In
|
||||
The future, certain tags could have an expiry or updated values or similar.
|
||||
|
||||
## Other options
|
||||
|
||||
## Wrapup
|
||||
|
||||
## Related work
|
25
docs/testplan/project-cowbell-testplan.md
Normal file
@ -0,0 +1,25 @@
|
||||
# Mini Cowbell Testplan
|
||||
|
||||
|
||||
|
||||
## Background
|
||||
We want to deploy the entire Storj stack on environments that have kubernetes running on 5 NUCs.
|
||||
|
||||
|
||||
|
||||
## Pre-condition
|
||||
Configuration for satellites that only have 5 node and the recommended RS scheme is [2,3,4,4] where:
|
||||
- 2 is the number of required pieces to reconstitute the segment.
|
||||
- 3 is the repair threshold, i.e. if a segment remains with only 3 healthy pieces, it will be repaired.
|
||||
- 4 is the success threshold, i.e. the number of pieces required for a successful upload or repair.
|
||||
- 4 is the number of total erasure-coded pieces that will be generated.
|
||||
|
||||
|
||||
| Test Scenario | Test Case | Description | Comments |
|
||||
|---------------|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Upload | Upload with all nodes online | Every file is uploaded to 4 nodes with 2x expansion factor. So one node has no files. | Happy path scenario |
|
||||
| | Upload with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for uploads) | Uploads will continue uninterrupted if the client uses the new refactored upload path. This improved upload logic will request the satellite for a new node if the satellite selects the offline node for the upload, unaware it is already offline. If the client uses the old upload logic, uploads may fail if the satellite selects the offline node (20% chance). When the satellite detects the offline node, all uploads will be successful. |
|
||||
| Download | Download with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for downloads) | |
|
||||
| Repair | Repair with 2 nodes disqualified | Disqualify 2 nodes so the repair download are still possible but there is no node available for an upload, shouldn't consume download bandwidth and error out early. Only spend download bandwidth when there is at least one node available for an upload | If two nodes go offline, there are remaining pieces in the worst case, which cannot be repaired and is a de facto data loss if the offline nodes are damaged. |
|
||||
| Audit | | Audits can't identify corrupted pieces with just the minimum number of pieces. Reputation should not increase. Audits should be able to identify corrupted pieces with minumum + 1 pieces. Reputation should decrease. | |
|
||||
| Upgrades | Nodes restart for upgrades | No more than a single node goes offline for maintenance. Otherwise, normal operation of the network cannot be ensured. | Occasionally, nodes may need to restart due to software updates. This brings the node offline for some period of time |
|
58
docs/testplan/storj-private-cloud-testplan.md
Normal file
@ -0,0 +1,58 @@
|
||||
|
||||
## Storj Private Cloud - Test Plan
|
||||
|
||||
## Test Scenarios
|
||||
|
||||
Some test ideas:
|
||||
- Upload and download some data
|
||||
- Server side copy and server side move
|
||||
- Multipart uploads
|
||||
- Versioning (replace and existing file)
|
||||
- Audit identifies a bad node and Repair finds new good nodes for the pieces (integration test inclusing audit reservoier sampling, audit job, reverifier, repair checker, repair worker)
|
||||
- Repair checker and repair worker performance with a million segments in the repair queue (repair queue needs to be ordered by null values first)
|
||||
- ranged loop performance (do we get better performance from running 2 range loops vs a single range?)
|
||||
- Upload, Download, List, Delete performance with a million segments in the DB.
|
||||
- Garbage collection especially the bloom filter creation. Needs to be run from a backup DB and can't be run from the live DB.
|
||||
- Storage nodes and customer accounting
|
||||
- Account upload and download limits (redis cache)
|
||||
- Customer signup with onboarding including creating an access grant
|
||||
- Token payments
|
||||
- Graceful exit
|
||||
- Node selection with geofencing, suspended nodes, disqualified nodes, offline nodes, nodes running outdated versions, nodes out of disk space
|
||||
|
||||
Bonus section (technically out of scope but still interresting questions for other tickets)
|
||||
- Should a private satellite require a stripe account for the billing section? How does the UI look like without a stripe account? How can the customer upgrade to a pro account without having to add a credit card.
|
||||
- Does the satellite need to be able to send out emails? For signup we have a simulation mode but for other features like project member invite we can't skip the email currently. (Other features with similar issues: storage node notifications, account freeze, password reset)
|
||||
- What is the plan for the initial vetting period? A brand new satellite with brand new nodes will not be able to upload any date because not enough vetted nodes. -> config change to upload to unvetted nodes. -> risk about uploading too much data to unvetted nodes by keeping this setting longer than nessesary)
|
||||
|
||||
|
||||
|
||||
|
||||
## [Test Plan Table]
|
||||
|
||||
|
||||
| Test Scenario | Test Case | Description | Comments |
|
||||
|-----------------------------|------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------|
|
||||
| Upload | Small file | Do the upload for 1 KiB, 5 KiB, 1 MiB, 64 MiB files. | |
|
||||
| | Big file | Do the upload 1024Mb files | |
|
||||
| | Multipart upload | Upload big file to check the multipart upload | |
|
||||
| Download | Inline segment | User should download inline segment without any errors | |
|
||||
| | Remote segment | User should download remote segment without any errors | |
|
||||
| | Copy 10000 Or More Segments | If a user uploads an object with 10000 segments or more and server side copies it from the source object to the destination object, it should be possible | |
|
||||
| | Copy inline segment | User should copy inline segment without any errors | |
|
||||
| | Copy remote segment | User should copy remote segment without any errors | |
|
||||
| Move | Move object | Move object from one bucket to another bucket | |
|
||||
| Versioning | Replace and existing file | User should be able to update existing file | |
|
||||
| DB- Table Segment | Expiration Date | If a user uses Server-side copy, then the source object and the destination object must have the same expiration date | Might be redundant test because of segment table removing |
|
||||
| DB - Table `segment_copies` | Ancestor_stream_id negative | If a segment with `stream_id = S` hasn't been copied, then the `segment_copies` table has no row having `ancestor_stream_id = S` | Might be redundant test because of segment table removing |
|
||||
| | Ancestor_stream_id positive | If a segment with `stream_id = S` has been copied, then the `segment_copies` table has at least one row having `ancestor_stream_id = S` | Might be redundant test because of segment table removing |
|
||||
| Repair | Data repair | Upload some data then kill some nodes and disqualify 1 node(should be enough storage nodes to upload repaired segments). Repaired segment should not contain any piece in the killed and DQ nodes. Downloads the data from new nodes and check that it's the same than the uploaded one. | This test should be in the code |
|
||||
| Token payments | Multiple Transactions | If a user has a pending transaction and then performs another transaction with a higher nonce using the same address, the new transaction has to wait until the previous transaction with the lower nonce is confirmed (standard behavior of geth, nothing to test for us) | |
|
||||
| | Invoice Generation | When an invoice is generated and "paid", coupons should be used first, followed by storj balance and then lastly credit card | |
|
||||
| Performance | Repair queue index has to be null value first. | https://storj.slack.com/archives/C01427KSZ1P/p1589815803066100 | |
|
||||
| Garbage Collection | Garbage Collection | Needs to be run from a backup DB and can't be run from the live DB | |
|
||||
| Accounting | Customer | Generate the full invoice cycle | |
|
||||
| | Storage node | Generate the invoice | |
|
||||
| Account limits | Upload | Verify that limits are working | |
|
||||
| | Download | Verify that limits are working | |
|
||||
| Signup | Customer signup | Customer signup with onboarding including creating an access grant | |
|
45
go.mod
@ -1,6 +1,6 @@
|
||||
module storj.io/storj
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/VividCortex/ewma v1.2.0
|
||||
@ -22,21 +22,22 @@ require (
|
||||
github.com/jackc/pgx/v5 v5.3.1
|
||||
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
|
||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d
|
||||
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b
|
||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6
|
||||
github.com/loov/hrtime v1.0.3
|
||||
github.com/mattn/go-sqlite3 v1.14.12
|
||||
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
||||
github.com/oschwald/maxminddb-golang v1.8.0
|
||||
github.com/oschwald/maxminddb-golang v1.12.0
|
||||
github.com/pquerna/otp v1.3.0
|
||||
github.com/redis/go-redis/v9 v9.0.3
|
||||
github.com/shopspring/decimal v1.2.0
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22
|
||||
github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stripe/stripe-go/v72 v72.90.0
|
||||
github.com/vbauerster/mpb/v8 v8.4.0
|
||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3
|
||||
@ -46,25 +47,26 @@ require (
|
||||
github.com/zeebo/errs v1.3.0
|
||||
github.com/zeebo/errs/v2 v2.0.3
|
||||
github.com/zeebo/ini v0.0.0-20210514163846-cc8fbd8d9599
|
||||
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602
|
||||
github.com/zyedidia/generic v1.2.1
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/crypto v0.7.0
|
||||
golang.org/x/crypto v0.12.0
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/net v0.10.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.7.0
|
||||
golang.org/x/term v0.7.0
|
||||
golang.org/x/text v0.9.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.11.0
|
||||
golang.org/x/term v0.11.0
|
||||
golang.org/x/text v0.12.0
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
|
||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
|
||||
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8
|
||||
storj.io/drpc v0.0.33
|
||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
|
||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1
|
||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
|
||||
storj.io/private v0.0.0-20230912093002-ca2d4ab44679
|
||||
storj.io/uplink v1.12.1
|
||||
)
|
||||
|
||||
require (
|
||||
@ -83,7 +85,7 @@ require (
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/flynn/noise v1.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
@ -107,14 +109,12 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.2.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||
github.com/pelletier/go-toml v1.9.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
|
||||
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect
|
||||
github.com/quic-go/quic-go v0.32.0 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.3.2 // indirect
|
||||
github.com/quic-go/quic-go v0.38.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||
@ -128,12 +128,11 @@ require (
|
||||
github.com/zeebo/float16 v0.1.0 // indirect
|
||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 // indirect
|
||||
github.com/zeebo/mwc v0.0.4 // indirect
|
||||
github.com/zeebo/structs v1.0.3-0.20230601144555-f2db46069602 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
google.golang.org/api v0.118.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
@ -141,5 +140,5 @@ require (
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
storj.io/picobuf v0.0.1 // indirect
|
||||
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c // indirect
|
||||
)
|
||||
|
85
go.sum
@ -143,12 +143,14 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-oauth2/oauth2/v4 v4.4.2 h1:tWQlR5I4/qhWiyOME67BAFmo622yi+2mm7DMm8DpMdg=
|
||||
github.com/go-oauth2/oauth2/v4 v4.4.2/go.mod h1:K4DemYzNwwYnIDOPdHtX/7SlO0AHdtlphsTgE7lA3PA=
|
||||
github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@ -324,6 +326,8 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM
|
||||
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
|
||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
|
||||
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
|
||||
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
|
||||
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
|
||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
|
||||
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
@ -420,15 +424,15 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI=
|
||||
github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk=
|
||||
github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
|
||||
github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs=
|
||||
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0=
|
||||
@ -456,14 +460,10 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U=
|
||||
github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc=
|
||||
github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk=
|
||||
github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||
github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI=
|
||||
github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||
github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA=
|
||||
github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
||||
github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc=
|
||||
github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg=
|
||||
github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k=
|
||||
github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@ -526,8 +526,8 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.18/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb h1:kWLHxcYDcloMFEJMngxuKh8wcLl9RjjeAN2a9AtTtCg=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.20-0.20230419135619-fb89f20752cb/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 h1:4/g8IVItBDKLdVnqrdHZrCVPpIrwDBzl1jrV0IHQHDU=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA=
|
||||
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||
@ -565,8 +565,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stripe/stripe-go/v72 v72.90.0 h1:fvJ/aL1rHHWRj5buuayb/2ufJued1UR1HEVavsoZoFs=
|
||||
github.com/stripe/stripe-go/v72 v72.90.0/go.mod h1:QwqJQtduHubZht9mek5sds9CtQcKFdsykV9ZepRWwo0=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
@ -705,8 +705,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -736,8 +736,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -773,8 +773,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@ -794,8 +794,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -821,7 +821,6 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -841,13 +840,13 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
@ -856,8 +855,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -896,8 +895,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1013,16 +1012,16 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
|
||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d h1:AXdJxmg4Jqdz1nmogSrImKOHAU+bn8JCy8lHYnTwP0Y=
|
||||
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d/go.mod h1:zu2L8WdpvfIBrCbBTgPsz4qhHSArYSiDgRcV1RLlIF8=
|
||||
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8 h1:i+bWPhVnNL6z/TLW3vDZytB6/0bsvJM0a1GhLCxrlxQ=
|
||||
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8/go.mod h1:ZmeGPzRb2sm705Nwt/WwuH3e6mliShfvvoUNy1bb9v4=
|
||||
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
|
||||
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
|
||||
storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4=
|
||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1AmlVLitdGXTVrvmbzN4Z9C9Ms40=
|
||||
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
|
||||
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
|
||||
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
|
||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
|
||||
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
|
||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
|
||||
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
|
||||
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c h1:or/DtG5uaZpzimL61ahlgAA+MTYn/U3txz4fe+XBFUg=
|
||||
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c/go.mod h1:JCuc3C0gzCJHQ4J6SOx/Yjg+QTpX0D+Fvs5H46FETCk=
|
||||
storj.io/private v0.0.0-20230912093002-ca2d4ab44679 h1:58rShZRrm14tDqc71bnyoFZDvdNIcJ7iBwQWEQZl60U=
|
||||
storj.io/private v0.0.0-20230912093002-ca2d4ab44679/go.mod h1:6+MGr4KUXEBIOsOstFz1efPkA+8wVVfzsO8RpuAhhB4=
|
||||
storj.io/uplink v1.12.1 h1:bDc2dI6Q7EXcvPJLZuH9jIOTIf2oKxvW3xKEA+Y5EI0=
|
||||
storj.io/uplink v1.12.1/go.mod h1:1+czctHG25pMzcUp4Mds6QnoJ7LvbgYA5d1qlpFFexg=
|
||||
|
@ -128,7 +128,6 @@ storj.io/storj/satellite/repair/repairer."repair_too_many_nodes_failed" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repair_unnecessary" Meter
|
||||
storj.io/storj/satellite/repair/repairer."repairer_segments_below_min_req" Counter
|
||||
storj.io/storj/satellite/repair/repairer."segment_deleted_before_repair" Meter
|
||||
storj.io/storj/satellite/repair/repairer."segment_repair_count" IntVal
|
||||
storj.io/storj/satellite/repair/repairer."segment_time_until_repair" IntVal
|
||||
storj.io/storj/satellite/repair/repairer."time_for_repair" FloatVal
|
||||
storj.io/storj/satellite/repair/repairer."time_since_checker_queue" FloatVal
|
||||
|
@ -202,10 +202,6 @@ func (obj *DB) Open(ctx context.Context) (*Tx, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (obj *DB) NewRx() *Rx {
|
||||
return &Rx{db: obj}
|
||||
}
|
||||
|
||||
func DeleteAll(ctx context.Context, db *DB) (int64, error) {
|
||||
tx, err := db.Open(ctx)
|
||||
if err != nil {
|
||||
@ -1365,132 +1361,6 @@ func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error)
|
||||
|
||||
}
|
||||
|
||||
type Rx struct {
|
||||
db *DB
|
||||
tx *Tx
|
||||
}
|
||||
|
||||
func (rx *Rx) UnsafeTx(ctx context.Context) (unsafe_tx tagsql.Tx, err error) {
|
||||
tx, err := rx.getTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tx.Tx, nil
|
||||
}
|
||||
|
||||
func (rx *Rx) getTx(ctx context.Context) (tx *Tx, err error) {
|
||||
if rx.tx == nil {
|
||||
if rx.tx, err = rx.db.Open(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return rx.tx, nil
|
||||
}
|
||||
|
||||
func (rx *Rx) Rebind(s string) string {
|
||||
return rx.db.Rebind(s)
|
||||
}
|
||||
|
||||
func (rx *Rx) Commit() (err error) {
|
||||
if rx.tx != nil {
|
||||
err = rx.tx.Commit()
|
||||
rx.tx = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (rx *Rx) Rollback() (err error) {
|
||||
if rx.tx != nil {
|
||||
err = rx.tx.Rollback()
|
||||
rx.tx = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (rx *Rx) All_Node(ctx context.Context) (
|
||||
rows []*Node, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.All_Node(ctx)
|
||||
}
|
||||
|
||||
func (rx *Rx) Count_Node(ctx context.Context) (
|
||||
count int64, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Count_Node(ctx)
|
||||
}
|
||||
|
||||
func (rx *Rx) Create_Node(ctx context.Context,
|
||||
node_id Node_Id_Field,
|
||||
node_name Node_Name_Field,
|
||||
node_public_address Node_PublicAddress_Field,
|
||||
node_api_secret Node_ApiSecret_Field) (
|
||||
node *Node, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Create_Node(ctx, node_id, node_name, node_public_address, node_api_secret)
|
||||
|
||||
}
|
||||
|
||||
func (rx *Rx) Delete_Node_By_Id(ctx context.Context,
|
||||
node_id Node_Id_Field) (
|
||||
deleted bool, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Delete_Node_By_Id(ctx, node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Get_Node_By_Id(ctx context.Context,
|
||||
node_id Node_Id_Field) (
|
||||
node *Node, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Get_Node_By_Id(ctx, node_id)
|
||||
}
|
||||
|
||||
func (rx *Rx) Limited_Node(ctx context.Context,
|
||||
limit int, offset int64) (
|
||||
rows []*Node, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Limited_Node(ctx, limit, offset)
|
||||
}
|
||||
|
||||
func (rx *Rx) UpdateNoReturn_Node_By_Id(ctx context.Context,
|
||||
node_id Node_Id_Field,
|
||||
update Node_Update_Fields) (
|
||||
err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.UpdateNoReturn_Node_By_Id(ctx, node_id, update)
|
||||
}
|
||||
|
||||
func (rx *Rx) Update_Node_By_Id(ctx context.Context,
|
||||
node_id Node_Id_Field,
|
||||
update Node_Update_Fields) (
|
||||
node *Node, err error) {
|
||||
var tx *Tx
|
||||
if tx, err = rx.getTx(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
return tx.Update_Node_By_Id(ctx, node_id, update)
|
||||
}
|
||||
|
||||
type Methods interface {
|
||||
All_Node(ctx context.Context) (
|
||||
rows []*Node, err error)
|
||||
|
@ -69,7 +69,9 @@ type DiskSpace struct {
|
||||
Allocated int64 `json:"allocated"`
|
||||
Used int64 `json:"usedPieces"`
|
||||
Trash int64 `json:"usedTrash"`
|
||||
// Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
|
||||
Free int64 `json:"free"`
|
||||
// Available is the amount of free space on the allocated disk space, in bytes.
|
||||
Available int64 `json:"available"`
|
||||
Overused int64 `json:"overused"`
|
||||
}
|
||||
|
@ -5,23 +5,62 @@ package apigen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"storj.io/storj/private/api"
|
||||
)
|
||||
|
||||
var (
|
||||
groupNameRegExp = regexp.MustCompile(`^([A-Z0-9]\w*)?$`)
|
||||
groupPrefixRegExp = regexp.MustCompile(`^\w*$`)
|
||||
)
|
||||
|
||||
// API represents specific API's configuration.
|
||||
type API struct {
|
||||
// Version is the corresponding version of the API.
|
||||
// It's concatenated to the BasePath, so assuming the base path is "/api" and the version is "v1"
|
||||
// the API paths will begin with `/api/v1`.
|
||||
// When empty, the version doesn't appear in the API paths. If it starts or ends with one or more
|
||||
// "/", they are stripped from the API endpoint paths.
|
||||
Version string
|
||||
Description string
|
||||
// The package name to use for the Go generated code.
|
||||
PackageName string
|
||||
// BasePath is the base path for the API endpoints. E.g. "/api".
|
||||
// It doesn't require to begin with "/". When empty, "/" is used.
|
||||
BasePath string
|
||||
Auth api.Auth
|
||||
EndpointGroups []*EndpointGroup
|
||||
}
|
||||
|
||||
// Group adds new endpoints group to API.
|
||||
// name must be `^([A-Z0-9]\w*)?$“
|
||||
// prefix must be `^\w*$`.
|
||||
func (a *API) Group(name, prefix string) *EndpointGroup {
|
||||
if !groupNameRegExp.MatchString(name) {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"invalid name for API Endpoint Group. name must fulfill the regular expression `^([A-Z0-9]\\w*)?$``, got %q",
|
||||
name,
|
||||
),
|
||||
)
|
||||
}
|
||||
if !groupPrefixRegExp.MatchString(prefix) {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"invalid prefix for API Endpoint Group %q. prefix must fulfill the regular expression `^\\w*$`, got %q",
|
||||
name,
|
||||
prefix,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
group := &EndpointGroup{
|
||||
Name: name,
|
||||
Prefix: prefix,
|
||||
@ -32,6 +71,14 @@ func (a *API) Group(name, prefix string) *EndpointGroup {
|
||||
return group
|
||||
}
|
||||
|
||||
func (a *API) endpointBasePath() string {
|
||||
if strings.HasPrefix(a.BasePath, "/") {
|
||||
return path.Join(a.BasePath, a.Version)
|
||||
}
|
||||
|
||||
return "/" + path.Join(a.BasePath, a.Version)
|
||||
}
|
||||
|
||||
// StringBuilder is an extension of strings.Builder that allows for writing formatted lines.
|
||||
type StringBuilder struct{ strings.Builder }
|
||||
|
||||
@ -41,6 +88,17 @@ func (s *StringBuilder) Writelnf(format string, a ...interface{}) {
|
||||
s.WriteString(fmt.Sprintf(format+"\n", a...))
|
||||
}
|
||||
|
||||
// typeCustomName is a reflect.Type with a customized type's name.
|
||||
type typeCustomName struct {
|
||||
reflect.Type
|
||||
|
||||
name string
|
||||
}
|
||||
|
||||
func (t typeCustomName) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// getElementaryType simplifies a Go type.
|
||||
func getElementaryType(t reflect.Type) reflect.Type {
|
||||
switch t.Kind() {
|
||||
@ -70,3 +128,15 @@ func isNillableType(t reflect.Type) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// compoundTypeName create a name composed with base and parts, by joining base as it's and
|
||||
// capitalizing each part.
|
||||
func compoundTypeName(base string, parts ...string) string {
|
||||
caser := cases.Title(language.Und)
|
||||
titled := make([]string, len(parts))
|
||||
for i := 0; i < len(parts); i++ {
|
||||
titled[i] = caser.String(parts[i])
|
||||
}
|
||||
|
||||
return base + strings.Join(titled, "")
|
||||
}
|
||||
|
47
private/apigen/common_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package apigen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPI_endpointBasePath(t *testing.T) {
|
||||
cases := []struct {
|
||||
version string
|
||||
basePath string
|
||||
expected string
|
||||
}{
|
||||
{version: "", basePath: "", expected: "/"},
|
||||
{version: "v1", basePath: "", expected: "/v1"},
|
||||
{version: "v0", basePath: "/", expected: "/v0"},
|
||||
{version: "", basePath: "api", expected: "/api"},
|
||||
{version: "v2", basePath: "api", expected: "/api/v2"},
|
||||
{version: "v2", basePath: "/api", expected: "/api/v2"},
|
||||
{version: "v2", basePath: "api/", expected: "/api/v2"},
|
||||
{version: "v2", basePath: "/api/", expected: "/api/v2"},
|
||||
{version: "/v3", basePath: "api", expected: "/api/v3"},
|
||||
{version: "/v3/", basePath: "api", expected: "/api/v3"},
|
||||
{version: "v3/", basePath: "api", expected: "/api/v3"},
|
||||
{version: "//v3/", basePath: "api", expected: "/api/v3"},
|
||||
{version: "v3///", basePath: "api", expected: "/api/v3"},
|
||||
{version: "/v3///", basePath: "/api/test/", expected: "/api/test/v3"},
|
||||
{version: "/v4.2", basePath: "api/test", expected: "/api/test/v4.2"},
|
||||
{version: "/v4/2", basePath: "/api/test", expected: "/api/test/v4/2"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("version:%s basePath: %s", c.version, c.basePath), func(t *testing.T) {
|
||||
a := API{
|
||||
Version: c.version,
|
||||
BasePath: c.basePath,
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expected, a.endpointBasePath())
|
||||
})
|
||||
}
|
||||
}
|
@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -37,11 +38,27 @@ func (api *API) generateDocumentation() string {
|
||||
wf("**Description:** %s\n\n", api.Description)
|
||||
wf("**Version:** `%s`\n\n", api.Version)
|
||||
|
||||
wf("<h2 id='list-of-endpoints'>List of Endpoints</h2>\n\n")
|
||||
getEndpointLink := func(group, endpoint string) string {
|
||||
fullName := group + "-" + endpoint
|
||||
fullName = strings.ReplaceAll(fullName, " ", "-")
|
||||
var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z0-9-]+`)
|
||||
fullName = nonAlphanumericRegex.ReplaceAllString(fullName, "")
|
||||
return strings.ToLower(fullName)
|
||||
}
|
||||
for _, group := range api.EndpointGroups {
|
||||
wf("* %s\n", group.Name)
|
||||
for _, endpoint := range group.endpoints {
|
||||
wf(" * [%s](#%s)\n", endpoint.Name, getEndpointLink(group.Name, endpoint.Name))
|
||||
}
|
||||
}
|
||||
wf("\n")
|
||||
|
||||
for _, group := range api.EndpointGroups {
|
||||
for _, endpoint := range group.endpoints {
|
||||
wf("## %s\n\n", endpoint.Name)
|
||||
wf("<h3 id='%s'>%s (<a href='#list-of-endpoints'>go to full list</a>)</h3>\n\n", getEndpointLink(group.Name, endpoint.Name), endpoint.Name)
|
||||
wf("%s\n\n", endpoint.Description)
|
||||
wf("`%s /%s%s`\n\n", endpoint.Method, group.Prefix, endpoint.Path)
|
||||
wf("`%s %s/%s%s`\n\n", endpoint.Method, api.endpointBasePath(), group.Prefix, endpoint.Path)
|
||||
|
||||
if len(endpoint.QueryParams) > 0 {
|
||||
wf("**Query Params:**\n\n")
|
||||
@ -66,13 +83,13 @@ func (api *API) generateDocumentation() string {
|
||||
requestType := reflect.TypeOf(endpoint.Request)
|
||||
if requestType != nil {
|
||||
wf("**Request body:**\n\n")
|
||||
wf("```json\n%s\n```\n\n", getTypeNameRecursively(requestType, 0))
|
||||
wf("```typescript\n%s\n```\n\n", getTypeNameRecursively(requestType, 0))
|
||||
}
|
||||
|
||||
responseType := reflect.TypeOf(endpoint.Response)
|
||||
if responseType != nil {
|
||||
wf("**Response body:**\n\n")
|
||||
wf("```json\n%s\n```\n\n", getTypeNameRecursively(responseType, 0))
|
||||
wf("```typescript\n%s\n```\n\n", getTypeNameRecursively(responseType, 0))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -123,7 +140,6 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
||||
elemType := t.Elem()
|
||||
if elemType.Kind() == reflect.Uint8 { // treat []byte as string in docs
|
||||
return prefix + "string"
|
||||
|
||||
}
|
||||
return fmt.Sprintf("%s[\n%s\n%s]\n", prefix, getTypeNameRecursively(elemType, level+1), prefix)
|
||||
case reflect.Struct:
|
||||
@ -132,7 +148,7 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
||||
if typeName != "unknown" {
|
||||
toReturn := typeName
|
||||
if len(elaboration) > 0 {
|
||||
toReturn += " (" + elaboration + ")"
|
||||
toReturn += " // " + elaboration
|
||||
}
|
||||
return toReturn
|
||||
}
|
||||
@ -150,7 +166,7 @@ func getTypeNameRecursively(t reflect.Type, level int) string {
|
||||
typeName, elaboration := getDocType(t)
|
||||
toReturn := typeName
|
||||
if len(elaboration) > 0 {
|
||||
toReturn += " (" + elaboration + ")"
|
||||
toReturn += " // " + elaboration
|
||||
}
|
||||
return toReturn
|
||||
}
|
||||
|
@ -4,21 +4,39 @@
|
||||
package apigen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Endpoint represents endpoint's configuration.
|
||||
type Endpoint struct {
|
||||
// Name is a free text used to name the endpoint for documentation purpose.
|
||||
// It cannot be empty.
|
||||
Name string
|
||||
// Description is a free text to describe the endpoint for documentation purpose.
|
||||
Description string
|
||||
// MethodName is the name of method of the service interface which handles the business logic of
|
||||
// this endpoint.
|
||||
// It must fulfill the Go language specification for method names
|
||||
// (https://go.dev/ref/spec#MethodName)
|
||||
// TODO: Should we rename this field to be something like ServiceMethodName?
|
||||
MethodName string
|
||||
// RequestName is the name of the method used to name the method in the client side code. When not
|
||||
// set, MethodName is used.
|
||||
// TODO: Should we delete this field in favor of always using MethodName?
|
||||
RequestName string
|
||||
NoCookieAuth bool
|
||||
NoAPIAuth bool
|
||||
// Request is the type that defines the format of the request body.
|
||||
Request interface{}
|
||||
// Response is the type that defines the format of the response body.
|
||||
Response interface{}
|
||||
// QueryParams is the list of query parameters that the endpoint accepts.
|
||||
QueryParams []Param
|
||||
// PathParams is the list of path parameters that appear in the path associated with this
|
||||
// endpoint.
|
||||
PathParams []Param
|
||||
}
|
||||
|
||||
@ -39,7 +57,34 @@ type fullEndpoint struct {
|
||||
Method string
|
||||
}
|
||||
|
||||
// requestType guarantees to return a named Go type associated to the Endpoint.Request field.
|
||||
func (fe fullEndpoint) requestType() reflect.Type {
|
||||
t := reflect.TypeOf(fe.Request)
|
||||
if t.Name() == "" {
|
||||
name := fe.RequestName
|
||||
if name == "" {
|
||||
name = fe.MethodName
|
||||
}
|
||||
|
||||
t = typeCustomName{Type: t, name: compoundTypeName(name, "Request")}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// responseType guarantees to return a named Go type associated to the Endpoint.Response field.
|
||||
func (fe fullEndpoint) responseType() reflect.Type {
|
||||
t := reflect.TypeOf(fe.Response)
|
||||
if t.Name() == "" {
|
||||
t = typeCustomName{Type: t, name: compoundTypeName(fe.MethodName, "Response")}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// EndpointGroup represents endpoints group.
|
||||
// You should always create a group using API.Group because it validates the field values to
|
||||
// guarantee correct code generation.
|
||||
type EndpointGroup struct {
|
||||
Name string
|
||||
Prefix string
|
||||
@ -47,27 +92,43 @@ type EndpointGroup struct {
|
||||
}
|
||||
|
||||
// Get adds new GET endpoint to endpoints group.
|
||||
// It panics if path doesn't begin with '/'.
|
||||
func (eg *EndpointGroup) Get(path string, endpoint *Endpoint) {
|
||||
eg.addEndpoint(path, http.MethodGet, endpoint)
|
||||
}
|
||||
|
||||
// Patch adds new PATCH endpoint to endpoints group.
|
||||
// It panics if path doesn't begin with '/'.
|
||||
func (eg *EndpointGroup) Patch(path string, endpoint *Endpoint) {
|
||||
eg.addEndpoint(path, http.MethodPatch, endpoint)
|
||||
}
|
||||
|
||||
// Post adds new POST endpoint to endpoints group.
|
||||
// It panics if path doesn't begin with '/'.
|
||||
func (eg *EndpointGroup) Post(path string, endpoint *Endpoint) {
|
||||
eg.addEndpoint(path, http.MethodPost, endpoint)
|
||||
}
|
||||
|
||||
// Delete adds new DELETE endpoint to endpoints group.
|
||||
// It panics if path doesn't begin with '/'.
|
||||
func (eg *EndpointGroup) Delete(path string, endpoint *Endpoint) {
|
||||
eg.addEndpoint(path, http.MethodDelete, endpoint)
|
||||
}
|
||||
|
||||
// addEndpoint adds new endpoint to endpoints list.
|
||||
// It panics if path doesn't begin with '/'.
|
||||
func (eg *EndpointGroup) addEndpoint(path, method string, endpoint *Endpoint) {
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"invalid path for method %q of EndpointGroup %q. path must start with slash, got %q",
|
||||
method,
|
||||
eg.Name,
|
||||
path,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
ep := &fullEndpoint{*endpoint, path, method}
|
||||
for i, e := range eg.endpoints {
|
||||
if e.Path == path && e.Method == method {
|
||||
@ -91,3 +152,16 @@ func NewParam(name string, instance interface{}) Param {
|
||||
Type: reflect.TypeOf(instance),
|
||||
}
|
||||
}
|
||||
|
||||
// namedType guarantees to return a named Go type. where defines where the param is defined (e.g.
|
||||
// path, query, etc.).
|
||||
func (p Param) namedType(ep Endpoint, where string) reflect.Type {
|
||||
if p.Type.Name() == "" {
|
||||
return typeCustomName{
|
||||
Type: p.Type,
|
||||
name: compoundTypeName(ep.MethodName, where, "param", p.Name),
|
||||
}
|
||||
}
|
||||
|
||||
return p.Type
|
||||
}
|
||||
|
@ -16,44 +16,81 @@ import (
|
||||
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/private/api"
|
||||
"storj.io/storj/private/apigen/example/myapi"
|
||||
)
|
||||
|
||||
const dateLayout = "2006-01-02T15:04:05.999Z"
|
||||
|
||||
var ErrTestapiAPI = errs.Class("example testapi api")
|
||||
var ErrDocsAPI = errs.Class("example docs api")
|
||||
|
||||
type TestAPIService interface {
|
||||
GenTestAPI(ctx context.Context, path string, id uuid.UUID, date time.Time, request struct{ Content string }) (*struct {
|
||||
ID uuid.UUID
|
||||
Date time.Time
|
||||
PathParam string
|
||||
Body string
|
||||
type DocumentsService interface {
|
||||
GetOne(ctx context.Context, path string) (*myapi.Document, api.HTTPError)
|
||||
UpdateContent(ctx context.Context, path string, id uuid.UUID, date time.Time, request struct {
|
||||
Content string "json:\"content\""
|
||||
}) (*struct {
|
||||
ID uuid.UUID "json:\"id\""
|
||||
Date time.Time "json:\"date\""
|
||||
PathParam string "json:\"pathParam\""
|
||||
Body string "json:\"body\""
|
||||
}, api.HTTPError)
|
||||
}
|
||||
|
||||
// TestAPIHandler is an api handler that exposes all testapi related functionality.
|
||||
type TestAPIHandler struct {
|
||||
// DocumentsHandler is an api handler that exposes all docs related functionality.
|
||||
type DocumentsHandler struct {
|
||||
log *zap.Logger
|
||||
mon *monkit.Scope
|
||||
service TestAPIService
|
||||
service DocumentsService
|
||||
auth api.Auth
|
||||
}
|
||||
|
||||
func NewTestAPI(log *zap.Logger, mon *monkit.Scope, service TestAPIService, router *mux.Router, auth api.Auth) *TestAPIHandler {
|
||||
handler := &TestAPIHandler{
|
||||
func NewDocuments(log *zap.Logger, mon *monkit.Scope, service DocumentsService, router *mux.Router, auth api.Auth) *DocumentsHandler {
|
||||
handler := &DocumentsHandler{
|
||||
log: log,
|
||||
mon: mon,
|
||||
service: service,
|
||||
auth: auth,
|
||||
}
|
||||
|
||||
testapiRouter := router.PathPrefix("/api/v0/testapi").Subrouter()
|
||||
testapiRouter.HandleFunc("/{path}", handler.handleGenTestAPI).Methods("POST")
|
||||
docsRouter := router.PathPrefix("/api/v0/docs").Subrouter()
|
||||
docsRouter.HandleFunc("/{path}", handler.handleGetOne).Methods("GET")
|
||||
docsRouter.HandleFunc("/{path}", handler.handleUpdateContent).Methods("POST")
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request) {
|
||||
func (h *DocumentsHandler) handleGetOne(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var err error
|
||||
defer h.mon.Task()(&ctx)(&err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
path, ok := mux.Vars(r)["path"]
|
||||
if !ok {
|
||||
api.ServeError(h.log, w, http.StatusBadRequest, errs.New("missing path route param"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx, err = h.auth.IsAuthenticated(ctx, r, true, true)
|
||||
if err != nil {
|
||||
h.auth.RemoveAuthCookie(w)
|
||||
api.ServeError(h.log, w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
|
||||
retVal, httpErr := h.service.GetOne(ctx, path)
|
||||
if httpErr.Err != nil {
|
||||
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||
return
|
||||
}
|
||||
|
||||
err = json.NewEncoder(w).Encode(retVal)
|
||||
if err != nil {
|
||||
h.log.Debug("failed to write json GetOne response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||
}
|
||||
}
|
||||
|
||||
func (h *DocumentsHandler) handleUpdateContent(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
var err error
|
||||
defer h.mon.Task()(&ctx)(&err)
|
||||
@ -90,7 +127,9 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
payload := struct{ Content string }{}
|
||||
payload := struct {
|
||||
Content string "json:\"content\""
|
||||
}{}
|
||||
if err = json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||
api.ServeError(h.log, w, http.StatusBadRequest, err)
|
||||
return
|
||||
@ -103,7 +142,7 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
retVal, httpErr := h.service.GenTestAPI(ctx, path, id, date, payload)
|
||||
retVal, httpErr := h.service.UpdateContent(ctx, path, id, date, payload)
|
||||
if httpErr.Err != nil {
|
||||
api.ServeError(h.log, w, httpErr.Status, httpErr.Err)
|
||||
return
|
||||
@ -111,6 +150,6 @@ func (h *TestAPIHandler) handleGenTestAPI(w http.ResponseWriter, r *http.Request
|
||||
|
||||
err = json.NewEncoder(w).Encode(retVal)
|
||||
if err != nil {
|
||||
h.log.Debug("failed to write json GenTestAPI response", zap.Error(ErrTestapiAPI.Wrap(err)))
|
||||
h.log.Debug("failed to write json UpdateContent response", zap.Error(ErrDocsAPI.Wrap(err)))
|
||||
}
|
||||
}
|
||||
|
77
private/apigen/example/apidocs.gen.md
Normal file
@ -0,0 +1,77 @@
|
||||
# API Docs
|
||||
|
||||
**Description:**
|
||||
|
||||
**Version:** `v0`
|
||||
|
||||
<h2 id='list-of-endpoints'>List of Endpoints</h2>
|
||||
|
||||
* Documents
|
||||
* [Get One](#documents-get-one)
|
||||
* [Update Content](#documents-update-content)
|
||||
|
||||
<h3 id='documents-get-one'>Get One (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||
|
||||
Get one document with the specified version
|
||||
|
||||
`GET /api/v0/docs/{path}`
|
||||
|
||||
**Path Params:**
|
||||
|
||||
| name | type | elaboration |
|
||||
|---|---|---|
|
||||
| `path` | `string` | |
|
||||
|
||||
**Response body:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
id: string // UUID formatted as `00000000-0000-0000-0000-000000000000`
|
||||
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||
pathParam: string
|
||||
body: string
|
||||
version: number
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
<h3 id='documents-update-content'>Update Content (<a href='#list-of-endpoints'>go to full list</a>)</h3>
|
||||
|
||||
Update the content of the document with the specified path and ID if the last update is before the indicated date
|
||||
|
||||
`POST /api/v0/docs/{path}`
|
||||
|
||||
**Query Params:**
|
||||
|
||||
| name | type | elaboration |
|
||||
|---|---|---|
|
||||
| `id` | `string` | UUID formatted as `00000000-0000-0000-0000-000000000000` |
|
||||
| `date` | `string` | Date timestamp formatted as `2006-01-02T15:00:00Z` |
|
||||
|
||||
**Path Params:**
|
||||
|
||||
| name | type | elaboration |
|
||||
|---|---|---|
|
||||
| `path` | `string` | |
|
||||
|
||||
**Request body:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
content: string
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
**Response body:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
id: string // UUID formatted as `00000000-0000-0000-0000-000000000000`
|
||||
date: string // Date timestamp formatted as `2006-01-02T15:00:00Z`
|
||||
pathParam: string
|
||||
body: string
|
||||
}
|
||||
|
||||
```
|
||||
|
52
private/apigen/example/client-api.gen.ts
Normal file
@ -0,0 +1,52 @@
|
||||
// AUTOGENERATED BY private/apigen
|
||||
// DO NOT EDIT.
|
||||
|
||||
import { HttpClient } from '@/utils/httpClient';
|
||||
import { Time, UUID } from '@/types/common';
|
||||
|
||||
export class Document {
|
||||
id: UUID;
|
||||
date: Time;
|
||||
pathParam: string;
|
||||
body: string;
|
||||
version: number;
|
||||
}
|
||||
|
||||
export class UpdateContentRequest {
|
||||
content: string;
|
||||
}
|
||||
|
||||
export class UpdateContentResponse {
|
||||
id: UUID;
|
||||
date: Time;
|
||||
pathParam: string;
|
||||
body: string;
|
||||
}
|
||||
|
||||
export class docsHttpApiV0 {
|
||||
private readonly http: HttpClient = new HttpClient();
|
||||
private readonly ROOT_PATH: string = '/api/v0/docs';
|
||||
|
||||
public async GetOne(path: string): Promise<Document> {
|
||||
const fullPath = `${this.ROOT_PATH}/${path}`;
|
||||
const response = await this.http.get(fullPath);
|
||||
if (response.ok) {
|
||||
return response.json().then((body) => body as Document);
|
||||
}
|
||||
const err = await response.json();
|
||||
throw new Error(err.error);
|
||||
}
|
||||
|
||||
public async UpdateContent(request: UpdateContentRequest, path: string, id: UUID, date: Time): Promise<UpdateContentResponse> {
|
||||
const u = new URL(`${this.ROOT_PATH}/${path}`);
|
||||
u.searchParams.set('id', id);
|
||||
u.searchParams.set('date', date);
|
||||
const fullPath = u.toString();
|
||||
const response = await this.http.post(fullPath, JSON.stringify(request));
|
||||
if (response.ok) {
|
||||
return response.json().then((body) => body as UpdateContentResponse);
|
||||
}
|
||||
const err = await response.json();
|
||||
throw new Error(err.error);
|
||||
}
|
||||
}
|
@ -11,22 +11,37 @@ import (
|
||||
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/private/apigen"
|
||||
"storj.io/storj/private/apigen/example/myapi"
|
||||
)
|
||||
|
||||
func main() {
|
||||
a := &apigen.API{PackageName: "example"}
|
||||
a := &apigen.API{PackageName: "example", Version: "v0", BasePath: "/api"}
|
||||
|
||||
g := a.Group("TestAPI", "testapi")
|
||||
g := a.Group("Documents", "docs")
|
||||
|
||||
g.Get("/{path}", &apigen.Endpoint{
|
||||
Name: "Get One",
|
||||
Description: "Get one document with the specified version",
|
||||
MethodName: "GetOne",
|
||||
Response: myapi.Document{},
|
||||
PathParams: []apigen.Param{
|
||||
apigen.NewParam("path", ""),
|
||||
},
|
||||
})
|
||||
|
||||
g.Post("/{path}", &apigen.Endpoint{
|
||||
MethodName: "GenTestAPI",
|
||||
Name: "Update Content",
|
||||
Description: "Update the content of the document with the specified path and ID if the last update is before the indicated date",
|
||||
MethodName: "UpdateContent",
|
||||
Response: struct {
|
||||
ID uuid.UUID
|
||||
Date time.Time
|
||||
PathParam string
|
||||
Body string
|
||||
ID uuid.UUID `json:"id"`
|
||||
Date time.Time `json:"date"`
|
||||
PathParam string `json:"pathParam"`
|
||||
Body string `json:"body"`
|
||||
}{},
|
||||
Request: struct {
|
||||
Content string `json:"content"`
|
||||
}{},
|
||||
Request: struct{ Content string }{},
|
||||
QueryParams: []apigen.Param{
|
||||
apigen.NewParam("id", uuid.UUID{}),
|
||||
apigen.NewParam("date", time.Time{}),
|
||||
@ -37,4 +52,6 @@ func main() {
|
||||
})
|
||||
|
||||
a.MustWriteGo("api.gen.go")
|
||||
a.MustWriteTS("client-api.gen.ts")
|
||||
a.MustWriteDocs("apidocs.gen.md")
|
||||
}
|
||||
|
19
private/apigen/example/myapi/types.go
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package myapi
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"storj.io/common/uuid"
|
||||
)
|
||||
|
||||
// Document is a retrieved document.
|
||||
type Document struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
Date time.Time `json:"date"`
|
||||
PathParam string `json:"pathParam"`
|
||||
Body string `json:"body"`
|
||||
Version uint `json:"version"`
|
||||
}
|
@ -22,10 +22,11 @@ import (
|
||||
const DateFormat = "2006-01-02T15:04:05.999Z"
|
||||
|
||||
// MustWriteGo writes generated Go code into a file.
|
||||
// If an error occurs, it panics.
|
||||
func (a *API) MustWriteGo(path string) {
|
||||
generated, err := a.generateGo()
|
||||
if err != nil {
|
||||
panic(errs.Wrap(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(path, generated, 0644)
|
||||
@ -41,7 +42,12 @@ func (a *API) generateGo() ([]byte, error) {
|
||||
|
||||
getPackageName := func(path string) string {
|
||||
pathPackages := strings.Split(path, "/")
|
||||
return pathPackages[len(pathPackages)-1]
|
||||
name := pathPackages[len(pathPackages)-1]
|
||||
if name == "main" {
|
||||
panic(errs.New(`invalid package name. Your types cannot be defined in a package named "main"`))
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
imports := struct {
|
||||
@ -100,7 +106,12 @@ func (a *API) generateGo() ([]byte, error) {
|
||||
|
||||
for _, group := range a.EndpointGroups {
|
||||
i("github.com/zeebo/errs")
|
||||
pf("var Err%sAPI = errs.Class(\"%s %s api\")", cases.Title(language.Und).String(group.Prefix), a.PackageName, group.Prefix)
|
||||
pf(
|
||||
"var Err%sAPI = errs.Class(\"%s %s api\")",
|
||||
cases.Title(language.Und).String(group.Prefix),
|
||||
a.PackageName,
|
||||
group.Prefix,
|
||||
)
|
||||
}
|
||||
|
||||
pf("")
|
||||
@ -167,10 +178,16 @@ func (a *API) generateGo() ([]byte, error) {
|
||||
pf("auth: auth,")
|
||||
pf("}")
|
||||
pf("")
|
||||
pf("%sRouter := router.PathPrefix(\"/api/v0/%s\").Subrouter()", group.Prefix, group.Prefix)
|
||||
pf("%sRouter := router.PathPrefix(\"%s/%s\").Subrouter()", group.Prefix, a.endpointBasePath(), group.Prefix)
|
||||
for _, endpoint := range group.endpoints {
|
||||
handlerName := "handle" + endpoint.MethodName
|
||||
pf("%sRouter.HandleFunc(\"%s\", handler.%s).Methods(\"%s\")", group.Prefix, endpoint.Path, handlerName, endpoint.Method)
|
||||
pf(
|
||||
"%sRouter.HandleFunc(\"%s\", handler.%s).Methods(\"%s\")",
|
||||
group.Prefix,
|
||||
endpoint.Path,
|
||||
handlerName,
|
||||
endpoint.Method,
|
||||
)
|
||||
}
|
||||
pf("")
|
||||
pf("return handler")
|
||||
@ -242,7 +259,11 @@ func (a *API) generateGo() ([]byte, error) {
|
||||
pf("")
|
||||
pf("err = json.NewEncoder(w).Encode(retVal)")
|
||||
pf("if err != nil {")
|
||||
pf("h.log.Debug(\"failed to write json %s response\", zap.Error(Err%sAPI.Wrap(err)))", endpoint.MethodName, cases.Title(language.Und).String(group.Prefix))
|
||||
pf(
|
||||
"h.log.Debug(\"failed to write json %s response\", zap.Error(Err%sAPI.Wrap(err)))",
|
||||
endpoint.MethodName,
|
||||
cases.Title(language.Und).String(group.Prefix),
|
||||
)
|
||||
pf("}")
|
||||
pf("}")
|
||||
}
|
||||
@ -282,7 +303,7 @@ func (a *API) generateGo() ([]byte, error) {
|
||||
|
||||
output, err := format.Source([]byte(result.String()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
return output, nil
|
||||
|
@ -25,16 +25,17 @@ import (
|
||||
"storj.io/storj/private/api"
|
||||
"storj.io/storj/private/apigen"
|
||||
"storj.io/storj/private/apigen/example"
|
||||
"storj.io/storj/private/apigen/example/myapi"
|
||||
)
|
||||
|
||||
type (
|
||||
auth struct{}
|
||||
service struct{}
|
||||
response = struct {
|
||||
ID uuid.UUID
|
||||
Date time.Time
|
||||
PathParam string
|
||||
Body string
|
||||
ID uuid.UUID `json:"id"`
|
||||
Date time.Time `json:"date"`
|
||||
PathParam string `json:"pathParam"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
)
|
||||
|
||||
@ -44,7 +45,22 @@ func (a auth) IsAuthenticated(ctx context.Context, r *http.Request, isCookieAuth
|
||||
|
||||
func (a auth) RemoveAuthCookie(w http.ResponseWriter) {}
|
||||
|
||||
func (s service) GenTestAPI(ctx context.Context, pathParam string, id uuid.UUID, date time.Time, body struct{ Content string }) (*response, api.HTTPError) {
|
||||
func (s service) GetOne(
|
||||
ctx context.Context,
|
||||
pathParam string,
|
||||
) (*myapi.Document, api.HTTPError) {
|
||||
return &myapi.Document{}, api.HTTPError{}
|
||||
}
|
||||
|
||||
func (s service) UpdateContent(
|
||||
ctx context.Context,
|
||||
pathParam string,
|
||||
id uuid.UUID,
|
||||
date time.Time,
|
||||
body struct {
|
||||
Content string `json:"content"`
|
||||
},
|
||||
) (*response, api.HTTPError) {
|
||||
return &response{
|
||||
ID: id,
|
||||
Date: date,
|
||||
@ -90,7 +106,7 @@ func TestAPIServer(t *testing.T) {
|
||||
defer ctx.Cleanup()
|
||||
|
||||
router := mux.NewRouter()
|
||||
example.NewTestAPI(zaptest.NewLogger(t), monkit.Package(), service{}, router, auth{})
|
||||
example.NewDocuments(zaptest.NewLogger(t), monkit.Package(), service{}, router, auth{})
|
||||
|
||||
server := httptest.NewServer(router)
|
||||
defer server.Close()
|
||||
@ -106,7 +122,7 @@ func TestAPIServer(t *testing.T) {
|
||||
}
|
||||
|
||||
resp, err := send(ctx, http.MethodPost,
|
||||
fmt.Sprintf("%s/api/v0/testapi/%s?id=%s&date=%s",
|
||||
fmt.Sprintf("%s/api/v0/docs/%s?id=%s&date=%s",
|
||||
server.URL,
|
||||
expected.PathParam,
|
||||
url.QueryEscape(expected.ID.String()),
|
||||
@ -118,10 +134,11 @@ func TestAPIServer(t *testing.T) {
|
||||
var actual map[string]string
|
||||
require.NoError(t, json.Unmarshal(resp, &actual))
|
||||
|
||||
for _, key := range []string{"ID", "Date", "PathParam", "Body"} {
|
||||
for _, key := range []string{"id", "date", "pathParam", "body"} {
|
||||
require.Contains(t, actual, key)
|
||||
}
|
||||
require.Equal(t, expected.ID.String(), actual["ID"])
|
||||
require.Equal(t, expected.Date.Format(apigen.DateFormat), actual["Date"])
|
||||
require.Equal(t, expected.Body, actual["Body"])
|
||||
require.Equal(t, expected.ID.String(), actual["id"])
|
||||
require.Equal(t, expected.Date.Format(apigen.DateFormat), actual["date"])
|
||||
require.Equal(t, expected.PathParam, actual["pathParam"])
|
||||
require.Equal(t, expected.Body, actual["body"])
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
// MustWriteTS writes generated TypeScript code into a file.
|
||||
// If an error occurs, it panics.
|
||||
func (a *API) MustWriteTS(path string) {
|
||||
f := newTSGenFile(path, a)
|
||||
|
||||
@ -64,17 +65,19 @@ func (f *tsGenFile) generateTS() {
|
||||
}
|
||||
|
||||
func (f *tsGenFile) registerTypes() {
|
||||
// TODO: what happen with path parameters?
|
||||
for _, group := range f.api.EndpointGroups {
|
||||
for _, method := range group.endpoints {
|
||||
if method.Request != nil {
|
||||
f.types.Register(reflect.TypeOf(method.Request))
|
||||
f.types.Register(method.requestType())
|
||||
}
|
||||
if method.Response != nil {
|
||||
f.types.Register(reflect.TypeOf(method.Response))
|
||||
f.types.Register(method.responseType())
|
||||
}
|
||||
if len(method.QueryParams) > 0 {
|
||||
for _, p := range method.QueryParams {
|
||||
t := getElementaryType(p.Type)
|
||||
// TODO: Is this call needed? this breaks the named type for slices and arrays and pointers.
|
||||
t := getElementaryType(p.namedType(method.Endpoint, "query"))
|
||||
f.types.Register(t)
|
||||
}
|
||||
}
|
||||
@ -85,7 +88,7 @@ func (f *tsGenFile) registerTypes() {
|
||||
func (f *tsGenFile) createAPIClient(group *EndpointGroup) {
|
||||
f.pf("\nexport class %sHttpApi%s {", group.Prefix, strings.ToUpper(f.api.Version))
|
||||
f.pf("\tprivate readonly http: HttpClient = new HttpClient();")
|
||||
f.pf("\tprivate readonly ROOT_PATH: string = '/api/%s/%s';", f.api.Version, group.Prefix)
|
||||
f.pf("\tprivate readonly ROOT_PATH: string = '%s/%s';", f.api.endpointBasePath(), group.Prefix)
|
||||
for _, method := range group.endpoints {
|
||||
f.pf("")
|
||||
|
||||
@ -94,21 +97,36 @@ func (f *tsGenFile) createAPIClient(group *EndpointGroup) {
|
||||
returnStmt := "return"
|
||||
returnType := "void"
|
||||
if method.Response != nil {
|
||||
returnType = TypescriptTypeName(getElementaryType(reflect.TypeOf(method.Response)))
|
||||
if v := reflect.ValueOf(method.Response); v.Kind() == reflect.Array || v.Kind() == reflect.Slice {
|
||||
respType := method.responseType()
|
||||
returnType = TypescriptTypeName(getElementaryType(respType))
|
||||
// TODO: see if this is needed after we are creating types for array and slices
|
||||
if respType.Kind() == reflect.Array || respType.Kind() == reflect.Slice {
|
||||
returnType = fmt.Sprintf("Array<%s>", returnType)
|
||||
}
|
||||
returnStmt += fmt.Sprintf(" response.json().then((body) => body as %s)", returnType)
|
||||
}
|
||||
returnStmt += ";"
|
||||
|
||||
f.pf("\tpublic async %s(%s): Promise<%s> {", method.RequestName, funcArgs, returnType)
|
||||
f.pf("\t\tconst path = `%s`;", path)
|
||||
methodName := method.RequestName
|
||||
if methodName == "" {
|
||||
methodName = method.MethodName
|
||||
}
|
||||
|
||||
f.pf("\tpublic async %s(%s): Promise<%s> {", methodName, funcArgs, returnType)
|
||||
if len(method.QueryParams) > 0 {
|
||||
f.pf("\t\tconst u = new URL(`%s`);", path)
|
||||
for _, p := range method.QueryParams {
|
||||
f.pf("\t\tu.searchParams.set('%s', %s);", p.Name, p.Name)
|
||||
}
|
||||
f.pf("\t\tconst fullPath = u.toString();")
|
||||
} else {
|
||||
f.pf("\t\tconst fullPath = `%s`;", path)
|
||||
}
|
||||
|
||||
if method.Request != nil {
|
||||
f.pf("\t\tconst response = await this.http.%s(path, JSON.stringify(request));", strings.ToLower(method.Method))
|
||||
f.pf("\t\tconst response = await this.http.%s(fullPath, JSON.stringify(request));", strings.ToLower(method.Method))
|
||||
} else {
|
||||
f.pf("\t\tconst response = await this.http.%s(path);", strings.ToLower(method.Method))
|
||||
f.pf("\t\tconst response = await this.http.%s(fullPath);", strings.ToLower(method.Method))
|
||||
}
|
||||
|
||||
f.pf("\t\tif (response.ok) {")
|
||||
@ -131,24 +149,18 @@ func (f *tsGenFile) getArgsAndPath(method *fullEndpoint) (funcArgs, path string)
|
||||
path = "${this.ROOT_PATH}" + path
|
||||
|
||||
if method.Request != nil {
|
||||
t := getElementaryType(reflect.TypeOf(method.Request))
|
||||
// TODO: This should map slices and arrays because a request could be one of them.
|
||||
t := getElementaryType(method.requestType())
|
||||
funcArgs += fmt.Sprintf("request: %s, ", TypescriptTypeName(t))
|
||||
}
|
||||
|
||||
for _, p := range method.PathParams {
|
||||
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.Type))
|
||||
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.namedType(method.Endpoint, "path")))
|
||||
path += fmt.Sprintf("/${%s}", p.Name)
|
||||
}
|
||||
|
||||
for i, p := range method.QueryParams {
|
||||
if i == 0 {
|
||||
path += "?"
|
||||
} else {
|
||||
path += "&"
|
||||
}
|
||||
|
||||
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.Type))
|
||||
path += fmt.Sprintf("%s=${%s}", p.Name, p.Name)
|
||||
for _, p := range method.QueryParams {
|
||||
funcArgs += fmt.Sprintf("%s: %s, ", p.Name, TypescriptTypeName(p.namedType(method.Endpoint, "query")))
|
||||
}
|
||||
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
@ -36,46 +36,76 @@ type Types struct {
|
||||
|
||||
// Register registers a type for generation.
|
||||
func (types *Types) Register(t reflect.Type) {
|
||||
if t.Name() == "" {
|
||||
panic("register an anonymous type is not supported. All the types must have a name")
|
||||
}
|
||||
types.top[t] = struct{}{}
|
||||
}
|
||||
|
||||
// All returns a slice containing every top-level type and their dependencies.
|
||||
//
|
||||
// TODO: see how to have a better implementation for adding to seen, uniqueNames, and all.
|
||||
func (types *Types) All() []reflect.Type {
|
||||
seen := map[reflect.Type]struct{}{}
|
||||
uniqueNames := map[string]struct{}{}
|
||||
all := []reflect.Type{}
|
||||
|
||||
var walk func(t reflect.Type)
|
||||
walk = func(t reflect.Type) {
|
||||
var walk func(t reflect.Type, alternateTypeName string)
|
||||
walk = func(t reflect.Type, altTypeName string) {
|
||||
if _, ok := seen[t]; ok {
|
||||
return
|
||||
}
|
||||
seen[t] = struct{}{}
|
||||
all = append(all, t)
|
||||
|
||||
// Type isn't seen it but it has the same name than a seen it one.
|
||||
// This cannot be because we would generate more than one TypeScript type with the same name.
|
||||
if _, ok := uniqueNames[t.Name()]; ok {
|
||||
panic(fmt.Sprintf("Found different types with the same name (%s)", t.Name()))
|
||||
}
|
||||
|
||||
if _, ok := commonClasses[t]; ok {
|
||||
seen[t] = struct{}{}
|
||||
uniqueNames[t.Name()] = struct{}{}
|
||||
all = append(all, t)
|
||||
return
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
switch k := t.Kind(); k {
|
||||
// TODO: Does reflect.Ptr to be registered?, I believe that could skip it and only register
|
||||
// the type that points to.
|
||||
case reflect.Array, reflect.Ptr, reflect.Slice:
|
||||
walk(t.Elem())
|
||||
t = typeCustomName{Type: t, name: compoundTypeName(altTypeName, k.String())}
|
||||
seen[t] = struct{}{}
|
||||
uniqueNames[t.Name()] = struct{}{}
|
||||
all = append(all, t)
|
||||
walk(t.Elem(), altTypeName)
|
||||
case reflect.Struct:
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
walk(t.Field(i).Type)
|
||||
if t.Name() == "" {
|
||||
t = typeCustomName{Type: t, name: altTypeName}
|
||||
}
|
||||
case reflect.Bool:
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
case reflect.Float32, reflect.Float64:
|
||||
case reflect.String:
|
||||
break
|
||||
|
||||
seen[t] = struct{}{}
|
||||
uniqueNames[t.Name()] = struct{}{}
|
||||
all = append(all, t)
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
walk(field.Type, compoundTypeName(altTypeName, field.Name))
|
||||
}
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64,
|
||||
reflect.String:
|
||||
seen[t] = struct{}{}
|
||||
uniqueNames[t.Name()] = struct{}{}
|
||||
all = append(all, t)
|
||||
default:
|
||||
panic(fmt.Sprintf("type '%s' is not supported", t.Kind().String()))
|
||||
}
|
||||
}
|
||||
|
||||
for t := range types.top {
|
||||
walk(t)
|
||||
walk(t, t.Name())
|
||||
}
|
||||
|
||||
sort.Slice(all, func(i, j int) bool {
|
||||
@ -96,6 +126,8 @@ func (types *Types) GenerateTypescriptDefinitions() string {
|
||||
if _, ok := commonClasses[t]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO, we should be able to handle arrays and slices as defined types now
|
||||
return t.Kind() == reflect.Struct
|
||||
})
|
||||
|
||||
@ -154,6 +186,7 @@ func (types *Types) getTypescriptImports() string {
|
||||
}
|
||||
|
||||
// TypescriptTypeName gets the corresponding TypeScript type for a provided reflect.Type.
|
||||
// If the type is an anonymous struct, it returns an empty string.
|
||||
func TypescriptTypeName(t reflect.Type) string {
|
||||
if override, ok := commonClasses[t]; ok {
|
||||
return override
|
||||
|
129
private/apigen/tstypes_test.go
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package apigen
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testTypesValoration struct {
|
||||
Points uint
|
||||
}
|
||||
|
||||
func TestTypes(t *testing.T) {
|
||||
t.Run("Register panics with anonymous types", func(t *testing.T) {
|
||||
types := NewTypes()
|
||||
require.Panics(t, func() {
|
||||
types.Register(reflect.TypeOf([2]int{}))
|
||||
}, "array")
|
||||
|
||||
require.Panics(t, func() {
|
||||
types.Register(reflect.TypeOf([]float64{}))
|
||||
}, "slice")
|
||||
|
||||
require.Panics(t, func() {
|
||||
types.Register(reflect.TypeOf(struct{}{}))
|
||||
}, "struct")
|
||||
})
|
||||
|
||||
t.Run("All returns nested types", func(t *testing.T) {
|
||||
typesList := []reflect.Type{
|
||||
reflect.TypeOf(true),
|
||||
reflect.TypeOf(int64(10)),
|
||||
reflect.TypeOf(uint8(9)),
|
||||
reflect.TypeOf(float64(99.9)),
|
||||
reflect.TypeOf("this is a test"),
|
||||
reflect.TypeOf(testTypesValoration{}),
|
||||
}
|
||||
|
||||
types := NewTypes()
|
||||
for _, li := range typesList {
|
||||
types.Register(li)
|
||||
}
|
||||
|
||||
allTypes := types.All()
|
||||
|
||||
require.Len(t, allTypes, 7, "total number of types")
|
||||
require.Subset(t, allTypes, typesList, "all types contains at least the registered ones")
|
||||
})
|
||||
|
||||
t.Run("All nested structs and slices", func(t *testing.T) {
|
||||
types := NewTypes()
|
||||
types.Register(
|
||||
typeCustomName{
|
||||
Type: reflect.TypeOf(struct {
|
||||
Name string
|
||||
Addresses []struct {
|
||||
Address string
|
||||
PO string
|
||||
}
|
||||
Job struct {
|
||||
Company string
|
||||
Position string
|
||||
StartingYear uint
|
||||
}
|
||||
Documents []struct {
|
||||
Path string
|
||||
Content string
|
||||
Valoration testTypesValoration
|
||||
}
|
||||
}{}),
|
||||
name: "Response",
|
||||
})
|
||||
|
||||
allTypes := types.All()
|
||||
require.Len(t, allTypes, 9, "total number of types")
|
||||
|
||||
typesNames := []string{}
|
||||
for _, tp := range allTypes {
|
||||
typesNames = append(typesNames, tp.Name())
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, []string{
|
||||
"string", "uint",
|
||||
"Response",
|
||||
"ResponseAddressesSlice", "ResponseAddresses",
|
||||
"ResponseJob",
|
||||
"ResponseDocumentsSlice", "ResponseDocuments", "testTypesValoration",
|
||||
}, typesNames)
|
||||
})
|
||||
|
||||
t.Run("All panic types without unique names", func(t *testing.T) {
|
||||
types := NewTypes()
|
||||
types.Register(typeCustomName{
|
||||
Type: reflect.TypeOf(struct {
|
||||
Name string
|
||||
Addresses []struct {
|
||||
Address string
|
||||
PO string
|
||||
}
|
||||
Job struct {
|
||||
Company string
|
||||
Position string
|
||||
StartingYear uint
|
||||
}
|
||||
Documents []struct {
|
||||
Path string
|
||||
Content string
|
||||
Valoration testTypesValoration
|
||||
}
|
||||
}{}),
|
||||
name: "Response",
|
||||
})
|
||||
|
||||
types.Register(typeCustomName{
|
||||
Type: reflect.TypeOf(struct {
|
||||
Reference string
|
||||
}{}),
|
||||
name: "Response",
|
||||
})
|
||||
|
||||
require.Panics(t, func() {
|
||||
types.All()
|
||||
})
|
||||
})
|
||||
}
|
@ -27,7 +27,9 @@ message DiskSpaceResponse {
|
||||
int64 allocated = 1;
|
||||
int64 used_pieces = 2;
|
||||
int64 used_trash = 3;
|
||||
// Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
|
||||
int64 free = 4;
|
||||
// Available is the amount of free space on the allocated disk space, in bytes.
|
||||
int64 available = 5;
|
||||
int64 overused = 6;
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client,
|
||||
// before creating SMTPSender
|
||||
host, _, _ := net.SplitHostPort(sender.ServerAddress)
|
||||
|
||||
if sender.Auth != nil {
|
||||
// send smtp hello or ehlo msg and establish connection over tls
|
||||
err := client.StartTLS(&tls.Config{ServerName: host})
|
||||
if err != nil {
|
||||
@ -65,8 +66,9 @@ func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = client.Mail(sender.From.Address)
|
||||
err := client.Mail(sender.From.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
51
private/server/fastopen_freebsd.go
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const tcpFastOpen = 1025
|
||||
|
||||
func setTCPFastOpen(fd uintptr, _queue int) error {
|
||||
return syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, tcpFastOpen, 1)
|
||||
}
|
||||
|
||||
var tryInitFastOpenOnce sync.Once
|
||||
var initFastOpenPossiblyEnabled bool
|
||||
|
||||
// tryInitFastOpen returns true if fastopen support is possibly enabled.
|
||||
func tryInitFastOpen(log *zap.Logger) bool {
|
||||
tryInitFastOpenOnce.Do(func() {
|
||||
initFastOpenPossiblyEnabled = true
|
||||
output, err := exec.Command("sysctl", "-n", "net.inet.tcp.fastopen.server_enable").Output()
|
||||
if err != nil {
|
||||
log.Sugar().Infof("kernel support for tcp fast open unknown")
|
||||
initFastOpenPossiblyEnabled = true
|
||||
return
|
||||
}
|
||||
enabled, err := strconv.ParseBool(strings.TrimSpace(string(output)))
|
||||
if err != nil {
|
||||
log.Sugar().Infof("kernel support for tcp fast open unparsable")
|
||||
initFastOpenPossiblyEnabled = true
|
||||
return
|
||||
}
|
||||
if enabled {
|
||||
log.Sugar().Infof("kernel support for server-side tcp fast open enabled.")
|
||||
} else {
|
||||
log.Sugar().Infof("kernel support for server-side tcp fast open not enabled.")
|
||||
log.Sugar().Infof("enable with: sysctl net.inet.tcp.fastopen.server_enable=1")
|
||||
log.Sugar().Infof("enable on-boot by setting net.inet.tcp.fastopen.server_enable=1 in /etc/sysctl.conf")
|
||||
}
|
||||
initFastOpenPossiblyEnabled = enabled
|
||||
})
|
||||
return initFastOpenPossiblyEnabled
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
//go:build !linux && !windows
|
||||
// +build !linux,!windows
|
||||
//go:build !linux && !windows && !freebsd
|
||||
// +build !linux,!windows,!freebsd
|
||||
|
||||
package server
|
||||
|
||||
|
@ -4,22 +4,44 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const tcpFastOpenServer = 15
|
||||
const tcpFastOpen = 15 // Corresponds to TCP_FASTOPEN from MS SDK
|
||||
|
||||
func setTCPFastOpen(fd uintptr, queue int) error {
|
||||
return syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_TCP, tcpFastOpenServer, 1)
|
||||
return syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_TCP, tcpFastOpen, 1)
|
||||
}
|
||||
|
||||
var tryInitFastOpenOnce sync.Once
|
||||
var initFastOpenPossiblyEnabled bool
|
||||
|
||||
// tryInitFastOpen returns true if fastopen support is possibly enabled.
|
||||
func tryInitFastOpen(*zap.Logger) bool {
|
||||
// should we log or check something along the lines of
|
||||
// netsh int tcp set global fastopen=enabled
|
||||
// netsh int tcp set global fastopenfallback=disabled
|
||||
// ?
|
||||
return false
|
||||
tryInitFastOpenOnce.Do(func() {
|
||||
// TCP-FASTOPEN is supported as of Windows 10 build 1607, but is
|
||||
// enabled per socket. If the socket option isn't supported then the
|
||||
// call to opt-in will fail. So as long as we can set up a listening
|
||||
// socket with the right socket option set, we should be good.
|
||||
if listener, err := (&net.ListenConfig{
|
||||
Control: func(network, addr string, c syscall.RawConn) error {
|
||||
var sockOptErr error
|
||||
if controlErr := c.Control(func(fd uintptr) {
|
||||
sockOptErr = setTCPFastOpen(fd, 0) // queue is unused
|
||||
}); controlErr != nil {
|
||||
return controlErr
|
||||
}
|
||||
return sockOptErr
|
||||
},
|
||||
}).Listen(context.Background(), "tcp", "127.0.0.1:0"); err == nil {
|
||||
listener.Close()
|
||||
initFastOpenPossiblyEnabled = true
|
||||
}
|
||||
})
|
||||
return initFastOpenPossiblyEnabled
|
||||
}
|
||||
|
@ -95,6 +95,8 @@ func TestHybridConnector_Basic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHybridConnector_QUICOnly(t *testing.T) {
|
||||
t.Skip("QUIC is currently broken")
|
||||
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1,
|
||||
StorageNodeCount: 0,
|
||||
|
@ -66,10 +66,10 @@ type Satellite struct {
|
||||
|
||||
Core *satellite.Core
|
||||
API *satellite.API
|
||||
UI *satellite.UI
|
||||
Repairer *satellite.Repairer
|
||||
Auditor *satellite.Auditor
|
||||
Admin *satellite.Admin
|
||||
GC *satellite.GarbageCollection
|
||||
GCBF *satellite.GarbageCollectionBF
|
||||
RangedLoop *satellite.RangedLoop
|
||||
|
||||
@ -173,12 +173,17 @@ type Satellite struct {
|
||||
Service *mailservice.Service
|
||||
}
|
||||
|
||||
Console struct {
|
||||
ConsoleBackend struct {
|
||||
Listener net.Listener
|
||||
Service *console.Service
|
||||
Endpoint *consoleweb.Server
|
||||
}
|
||||
|
||||
ConsoleFrontend struct {
|
||||
Listener net.Listener
|
||||
Endpoint *consoleweb.Server
|
||||
}
|
||||
|
||||
NodeStats struct {
|
||||
Endpoint *nodestats.Endpoint
|
||||
}
|
||||
@ -256,7 +261,7 @@ func (system *Satellite) AddProject(ctx context.Context, ownerID uuid.UUID, name
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
project, err := system.API.Console.Service.CreateProject(ctx, console.ProjectInfo{
|
||||
project, err := system.API.Console.Service.CreateProject(ctx, console.UpsertProjectInfo{
|
||||
Name: name,
|
||||
})
|
||||
if err != nil {
|
||||
@ -285,7 +290,6 @@ func (system *Satellite) Close() error {
|
||||
system.Repairer.Close(),
|
||||
system.Auditor.Close(),
|
||||
system.Admin.Close(),
|
||||
system.GC.Close(),
|
||||
system.GCBF.Close(),
|
||||
)
|
||||
}
|
||||
@ -300,6 +304,11 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.API.Run(ctx))
|
||||
})
|
||||
if system.UI != nil {
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.UI.Run(ctx))
|
||||
})
|
||||
}
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.Repairer.Run(ctx))
|
||||
})
|
||||
@ -309,9 +318,6 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.Admin.Run(ctx))
|
||||
})
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.GC.Run(ctx))
|
||||
})
|
||||
group.Go(func() error {
|
||||
return errs2.IgnoreCanceled(system.GCBF.Run(ctx))
|
||||
})
|
||||
@ -524,6 +530,15 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
// only run if front-end endpoints on console back-end server are disabled.
|
||||
var ui *satellite.UI
|
||||
if !config.Console.FrontendEnable {
|
||||
ui, err = planet.newUI(ctx, index, identity, config, api.ExternalAddress, api.Console.Listener.Addr().String())
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
adminPeer, err := planet.newAdmin(ctx, index, identity, db, metabaseDB, config, versionInfo)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
@ -539,11 +554,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, metabaseDB, config, versionInfo)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
|
||||
gcBFPeer, err := planet.newGarbageCollectionBF(ctx, index, db, metabaseDB, config, versionInfo)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
@ -558,23 +568,23 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
peer.Mail.EmailReminders.TestSetLinkAddress("http://" + api.Console.Listener.Addr().String() + "/")
|
||||
}
|
||||
|
||||
return createNewSystem(prefix, log, config, peer, api, repairerPeer, auditorPeer, adminPeer, gcPeer, gcBFPeer, rangedLoopPeer), nil
|
||||
return createNewSystem(prefix, log, config, peer, api, ui, repairerPeer, auditorPeer, adminPeer, gcBFPeer, rangedLoopPeer), nil
|
||||
}
|
||||
|
||||
// createNewSystem makes a new Satellite System and exposes the same interface from
|
||||
// before we split out the API. In the short term this will help keep all the tests passing
|
||||
// without much modification needed. However long term, we probably want to rework this
|
||||
// so it represents how the satellite will run when it is made up of many processes.
|
||||
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcPeer *satellite.GarbageCollection, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite {
|
||||
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, ui *satellite.UI, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite {
|
||||
system := &Satellite{
|
||||
Name: name,
|
||||
Config: config,
|
||||
Core: peer,
|
||||
API: api,
|
||||
UI: ui,
|
||||
Repairer: repairerPeer,
|
||||
Auditor: auditorPeer,
|
||||
Admin: adminPeer,
|
||||
GC: gcPeer,
|
||||
GCBF: gcBFPeer,
|
||||
RangedLoop: rangedLoopPeer,
|
||||
}
|
||||
@ -622,7 +632,7 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
||||
system.Audit.Reporter = auditorPeer.Audit.Reporter
|
||||
system.Audit.ContainmentSyncChore = peer.Audit.ContainmentSyncChore
|
||||
|
||||
system.GarbageCollection.Sender = gcPeer.GarbageCollection.Sender
|
||||
system.GarbageCollection.Sender = peer.GarbageCollection.Sender
|
||||
|
||||
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
|
||||
system.ZombieDeletion.Chore = peer.ZombieDeletion.Chore
|
||||
@ -666,6 +676,15 @@ func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.
|
||||
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
|
||||
}
|
||||
|
||||
func (planet *Planet) newUI(ctx context.Context, index int, identity *identity.FullIdentity, config satellite.Config, satelliteAddr, consoleAPIAddr string) (_ *satellite.UI, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
prefix := "satellite-ui" + strconv.Itoa(index)
|
||||
log := planet.log.Named(prefix)
|
||||
|
||||
return satellite.NewUI(log, identity, &config, nil, satelliteAddr, consoleAPIAddr)
|
||||
}
|
||||
|
||||
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Admin, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -713,20 +732,6 @@ func (cache rollupsWriteCacheCloser) Close() error {
|
||||
return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
|
||||
}
|
||||
|
||||
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollection, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
prefix := "satellite-gc" + strconv.Itoa(index)
|
||||
log := planet.log.Named(prefix)
|
||||
|
||||
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
planet.databases = append(planet.databases, revocationDB)
|
||||
return satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, versionInfo, &config, nil)
|
||||
}
|
||||
|
||||
func (planet *Planet) newGarbageCollectionBF(ctx context.Context, index int, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollectionBF, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -746,7 +751,6 @@ func (planet *Planet) newRangedLoop(ctx context.Context, index int, db satellite
|
||||
|
||||
prefix := "satellite-ranged-loop" + strconv.Itoa(index)
|
||||
log := planet.log.Named(prefix)
|
||||
|
||||
return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil)
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"storj.io/common/peertls/tlsopts"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/private/debug"
|
||||
"storj.io/storj/cmd/storagenode/internalcmd"
|
||||
"storj.io/storj/private/revocation"
|
||||
"storj.io/storj/private/server"
|
||||
"storj.io/storj/storagenode"
|
||||
@ -215,6 +216,10 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
|
||||
MinDownloadTimeout: 2 * time.Minute,
|
||||
},
|
||||
}
|
||||
|
||||
// enable the lazy filewalker
|
||||
config.Pieces.EnableLazyFilewalker = true
|
||||
|
||||
if planet.config.Reconfigure.StorageNode != nil {
|
||||
planet.config.Reconfigure.StorageNode(index, &config)
|
||||
}
|
||||
@ -275,6 +280,21 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
|
||||
return nil, errs.New("error while trying to issue new api key: %v", err)
|
||||
}
|
||||
|
||||
{
|
||||
// set up the used space lazyfilewalker filewalker
|
||||
cmd := internalcmd.NewUsedSpaceFilewalkerCmd()
|
||||
cmd.Logger = log.Named("used-space-filewalker")
|
||||
cmd.Ctx = ctx
|
||||
peer.Storage2.LazyFileWalker.TestingSetUsedSpaceCmd(cmd)
|
||||
}
|
||||
{
|
||||
// set up the GC lazyfilewalker filewalker
|
||||
cmd := internalcmd.NewGCFilewalkerCmd()
|
||||
cmd.Logger = log.Named("gc-filewalker")
|
||||
cmd.Ctx = ctx
|
||||
peer.Storage2.LazyFileWalker.TestingSetGCCmd(cmd)
|
||||
}
|
||||
|
||||
return &StorageNode{
|
||||
Name: prefix,
|
||||
Config: config,
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"storj.io/storj/private/revocation"
|
||||
"storj.io/storj/private/server"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite/nodeselection"
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/private/metaclient"
|
||||
)
|
||||
@ -105,9 +106,15 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
|
||||
}
|
||||
|
||||
// confirm that we marked the correct number of storage nodes as offline
|
||||
nodes, err := satellite.Overlay.Service.Reliable(ctx)
|
||||
allNodes, err := satellite.Overlay.Service.GetParticipatingNodes(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, len(planet.StorageNodes)-toKill)
|
||||
online := make([]nodeselection.SelectedNode, 0, len(allNodes))
|
||||
for _, node := range allNodes {
|
||||
if node.Online {
|
||||
online = append(online, node)
|
||||
}
|
||||
}
|
||||
require.Len(t, online, len(planet.StorageNodes)-toKill)
|
||||
|
||||
// we should be able to download data without any of the original nodes
|
||||
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
||||
|
@ -6,16 +6,16 @@ package version
|
||||
import _ "unsafe" // needed for go:linkname
|
||||
|
||||
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
|
||||
var buildTimestamp string
|
||||
var buildTimestamp string = "1696509940"
|
||||
|
||||
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
|
||||
var buildCommitHash string
|
||||
var buildCommitHash string = "98e778e26c859329ff016e7ef0ed2aaed40e52d0"
|
||||
|
||||
//go:linkname buildVersion storj.io/private/version.buildVersion
|
||||
var buildVersion string
|
||||
var buildVersion string = "v1.89.5"
|
||||
|
||||
//go:linkname buildRelease storj.io/private/version.buildRelease
|
||||
var buildRelease string
|
||||
var buildRelease string = "true"
|
||||
|
||||
// ensure that linter understands that the variables are being used.
|
||||
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }
|
||||
|
@ -4,24 +4,32 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/http/requestid"
|
||||
)
|
||||
|
||||
// ServeJSONError writes a JSON error to the response output stream.
|
||||
func ServeJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error) {
|
||||
ServeCustomJSONError(log, w, status, err, err.Error())
|
||||
func ServeJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error) {
|
||||
ServeCustomJSONError(ctx, log, w, status, err, err.Error())
|
||||
}
|
||||
|
||||
// ServeCustomJSONError writes a JSON error with a custom message to the response output stream.
|
||||
func ServeCustomJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) {
|
||||
func ServeCustomJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) {
|
||||
fields := []zap.Field{
|
||||
zap.Int("code", status),
|
||||
zap.String("message", msg),
|
||||
zap.Error(err),
|
||||
}
|
||||
|
||||
if requestID := requestid.FromContext(ctx); requestID != "" {
|
||||
fields = append(fields, zap.String("requestID", requestID))
|
||||
}
|
||||
|
||||
switch status {
|
||||
case http.StatusNoContent:
|
||||
return
|
||||
|
@ -87,12 +87,12 @@ func (rl *RateLimiter) Limit(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key, err := rl.keyFunc(r)
|
||||
if err != nil {
|
||||
ServeCustomJSONError(rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg)
|
||||
ServeCustomJSONError(r.Context(), rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg)
|
||||
return
|
||||
}
|
||||
limit := rl.getUserLimit(key)
|
||||
if !limit.Allow() {
|
||||
ServeJSONError(rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg))
|
||||
ServeJSONError(r.Context(), rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
|
@ -111,16 +111,16 @@ type ProjectUsageByDay struct {
|
||||
|
||||
// BucketUsage consist of total bucket usage for period.
|
||||
type BucketUsage struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName string
|
||||
ProjectID uuid.UUID `json:"projectID"`
|
||||
BucketName string `json:"bucketName"`
|
||||
|
||||
Storage float64
|
||||
Egress float64
|
||||
ObjectCount int64
|
||||
SegmentCount int64
|
||||
Storage float64 `json:"storage"`
|
||||
Egress float64 `json:"egress"`
|
||||
ObjectCount int64 `json:"objectCount"`
|
||||
SegmentCount int64 `json:"segmentCount"`
|
||||
|
||||
Since time.Time
|
||||
Before time.Time
|
||||
Since time.Time `json:"since"`
|
||||
Before time.Time `json:"before"`
|
||||
}
|
||||
|
||||
// BucketUsageCursor holds info for bucket usage
|
||||
@ -133,15 +133,15 @@ type BucketUsageCursor struct {
|
||||
|
||||
// BucketUsagePage represents bucket usage page result.
|
||||
type BucketUsagePage struct {
|
||||
BucketUsages []BucketUsage
|
||||
BucketUsages []BucketUsage `json:"bucketUsages"`
|
||||
|
||||
Search string
|
||||
Limit uint
|
||||
Offset uint64
|
||||
Search string `json:"search"`
|
||||
Limit uint `json:"limit"`
|
||||
Offset uint64 `json:"offset"`
|
||||
|
||||
PageCount uint
|
||||
CurrentPage uint
|
||||
TotalCount uint64
|
||||
PageCount uint `json:"pageCount"`
|
||||
CurrentPage uint `json:"currentPage"`
|
||||
TotalCount uint64 `json:"totalCount"`
|
||||
}
|
||||
|
||||
// BucketUsageRollup is total bucket usage info
|
||||
@ -219,6 +219,8 @@ type ProjectAccounting interface {
|
||||
GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error)
|
||||
// GetProjectBandwidth returns project allocated bandwidth for the specified year, month and day.
|
||||
GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (int64, error)
|
||||
// GetProjectSettledBandwidth returns the used settled bandwidth for the specified year and month.
|
||||
GetProjectSettledBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, asOfSystemInterval time.Duration) (int64, error)
|
||||
// GetProjectDailyBandwidth returns bandwidth (allocated and settled) for the specified day.
|
||||
GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (int64, int64, int64, error)
|
||||
// DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time
|
||||
|
@ -26,6 +26,7 @@ type Config struct {
|
||||
StorageBackend string `help:"what to use for storing real-time accounting data"`
|
||||
BandwidthCacheTTL time.Duration `default:"5m" help:"bandwidth cache key time to live"`
|
||||
AsOfSystemInterval time.Duration `default:"-10s" help:"as of system interval"`
|
||||
BatchSize int `default:"5000" help:"how much projects usage should be requested from redis cache at once"`
|
||||
}
|
||||
|
||||
// OpenCache creates a new accounting.Cache instance using the type specified backend in
|
||||
@ -49,7 +50,7 @@ func OpenCache(ctx context.Context, log *zap.Logger, config Config) (accounting.
|
||||
backendType = parts[0]
|
||||
switch backendType {
|
||||
case "redis":
|
||||
return openRedisLiveAccounting(ctx, config.StorageBackend)
|
||||
return openRedisLiveAccounting(ctx, config.StorageBackend, config.BatchSize)
|
||||
default:
|
||||
return nil, Error.New("unrecognized live accounting backend specifier %q. Currently only redis is supported", backendType)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ package live_test
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -136,22 +137,31 @@ func TestGetAllProjectTotals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
usage, err := cache.GetAllProjectTotals(ctx)
|
||||
for _, batchSize := range []int{1, 2, 3, 10, 13, 10000} {
|
||||
t.Run("batch-size-"+strconv.Itoa(batchSize), func(t *testing.T) {
|
||||
config.BatchSize = batchSize
|
||||
testCache, err := live.OpenCache(ctx, zaptest.NewLogger(t).Named("live-accounting"), config)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(testCache.Close)
|
||||
|
||||
usage, err := testCache.GetAllProjectTotals(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, usage, len(projectIDs))
|
||||
|
||||
// make sure each project ID and total was received
|
||||
for _, projID := range projectIDs {
|
||||
totalStorage, err := cache.GetProjectStorageUsage(ctx, projID)
|
||||
totalStorage, err := testCache.GetProjectStorageUsage(ctx, projID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, totalStorage, usage[projID].Storage)
|
||||
|
||||
totalSegments, err := cache.GetProjectSegmentUsage(ctx, projID)
|
||||
totalSegments, err := testCache.GetProjectSegmentUsage(ctx, projID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, totalSegments, usage[projID].Segments)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLiveAccountingCache_ProjectBandwidthUsage_expiration(t *testing.T) {
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/zeebo/errs/v2"
|
||||
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/satellite/accounting"
|
||||
@ -18,6 +19,8 @@ import (
|
||||
|
||||
type redisLiveAccounting struct {
|
||||
client *redis.Client
|
||||
|
||||
batchSize int
|
||||
}
|
||||
|
||||
// openRedisLiveAccounting returns a redisLiveAccounting cache instance.
|
||||
@ -29,7 +32,7 @@ type redisLiveAccounting struct {
|
||||
// it fails then it returns an instance and accounting.ErrSystemOrNetError
|
||||
// because it means that Redis may not be operative at this precise moment but
|
||||
// it may be in future method calls as it handles automatically reconnects.
|
||||
func openRedisLiveAccounting(ctx context.Context, address string) (*redisLiveAccounting, error) {
|
||||
func openRedisLiveAccounting(ctx context.Context, address string, batchSize int) (*redisLiveAccounting, error) {
|
||||
opts, err := redis.ParseURL(address)
|
||||
if err != nil {
|
||||
return nil, accounting.ErrInvalidArgument.Wrap(err)
|
||||
@ -37,6 +40,7 @@ func openRedisLiveAccounting(ctx context.Context, address string) (*redisLiveAcc
|
||||
|
||||
cache := &redisLiveAccounting{
|
||||
client: redis.NewClient(opts),
|
||||
batchSize: batchSize,
|
||||
}
|
||||
|
||||
// ping here to verify we are able to connect to Redis with the initialized client.
|
||||
@ -52,7 +56,7 @@ func openRedisLiveAccounting(ctx context.Context, address string) (*redisLiveAcc
|
||||
func (cache *redisLiveAccounting) GetProjectStorageUsage(ctx context.Context, projectID uuid.UUID) (totalUsed int64, err error) {
|
||||
defer mon.Task()(&ctx, projectID)(&err)
|
||||
|
||||
return cache.getInt64(ctx, string(projectID[:]))
|
||||
return cache.getInt64(ctx, createStorageProjectIDKey(projectID))
|
||||
}
|
||||
|
||||
// GetProjectBandwidthUsage returns the current bandwidth usage
|
||||
@ -175,7 +179,7 @@ func (cache *redisLiveAccounting) AddProjectSegmentUsageUpToLimit(ctx context.Co
|
||||
func (cache *redisLiveAccounting) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) (err error) {
|
||||
defer mon.Task()(&ctx, projectID, spaceUsed)(&err)
|
||||
|
||||
_, err = cache.client.IncrBy(ctx, string(projectID[:]), spaceUsed).Result()
|
||||
_, err = cache.client.IncrBy(ctx, createStorageProjectIDKey(projectID), spaceUsed).Result()
|
||||
if err != nil {
|
||||
return accounting.ErrSystemOrNetError.New("Redis incrby failed: %w", err)
|
||||
}
|
||||
@ -216,6 +220,7 @@ func (cache *redisLiveAccounting) GetAllProjectTotals(ctx context.Context) (_ ma
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
projects := make(map[uuid.UUID]accounting.Usage)
|
||||
|
||||
it := cache.client.Scan(ctx, 0, "*", 0).Iterator()
|
||||
for it.Next(ctx) {
|
||||
key := it.Val()
|
||||
@ -231,58 +236,112 @@ func (cache *redisLiveAccounting) GetAllProjectTotals(ctx context.Context) (_ ma
|
||||
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
||||
}
|
||||
|
||||
usage := accounting.Usage{}
|
||||
if seenUsage, seen := projects[projectID]; seen {
|
||||
if seenUsage.Segments != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
usage = seenUsage
|
||||
}
|
||||
|
||||
segmentUsage, err := cache.GetProjectSegmentUsage(ctx, projectID)
|
||||
if err != nil {
|
||||
if accounting.ErrKeyNotFound.Has(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage.Segments = segmentUsage
|
||||
projects[projectID] = usage
|
||||
projects[projectID] = accounting.Usage{}
|
||||
} else {
|
||||
projectID, err := uuid.FromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
|
||||
}
|
||||
|
||||
usage := accounting.Usage{}
|
||||
if seenUsage, seen := projects[projectID]; seen {
|
||||
if seenUsage.Storage != 0 {
|
||||
continue
|
||||
projects[projectID] = accounting.Usage{}
|
||||
}
|
||||
}
|
||||
|
||||
usage = seenUsage
|
||||
return cache.fillUsage(ctx, projects)
|
||||
}
|
||||
|
||||
func (cache *redisLiveAccounting) fillUsage(ctx context.Context, projects map[uuid.UUID]accounting.Usage) (_ map[uuid.UUID]accounting.Usage, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if len(projects) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
storageUsage, err := cache.getInt64(ctx, key)
|
||||
projectIDs := make([]uuid.UUID, 0, cache.batchSize)
|
||||
segmentKeys := make([]string, 0, cache.batchSize)
|
||||
storageKeys := make([]string, 0, cache.batchSize)
|
||||
|
||||
fetchProjectsUsage := func() error {
|
||||
if len(projectIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
segmentResult, err := cache.client.MGet(ctx, segmentKeys...).Result()
|
||||
if err != nil {
|
||||
if accounting.ErrKeyNotFound.Has(err) {
|
||||
continue
|
||||
return accounting.ErrGetProjectLimitCache.Wrap(err)
|
||||
}
|
||||
|
||||
storageResult, err := cache.client.MGet(ctx, storageKeys...).Result()
|
||||
if err != nil {
|
||||
return accounting.ErrGetProjectLimitCache.Wrap(err)
|
||||
}
|
||||
|
||||
// Note, because we are using a cache, it might be empty and not contain the
|
||||
// information we are looking for -- or they might be still empty for some reason.
|
||||
|
||||
for i, projectID := range projectIDs {
|
||||
segmentsUsage, err := parseAnyAsInt64(segmentResult[i])
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
storageUsage, err := parseAnyAsInt64(storageResult[i])
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
projects[projectID] = accounting.Usage{
|
||||
Segments: segmentsUsage,
|
||||
Storage: storageUsage,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for projectID := range projects {
|
||||
projectIDs = append(projectIDs, projectID)
|
||||
segmentKeys = append(segmentKeys, createSegmentProjectIDKey(projectID))
|
||||
storageKeys = append(storageKeys, createStorageProjectIDKey(projectID))
|
||||
|
||||
if len(projectIDs) >= cache.batchSize {
|
||||
err := fetchProjectsUsage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage.Storage = storageUsage
|
||||
projects[projectID] = usage
|
||||
projectIDs = projectIDs[:0]
|
||||
segmentKeys = segmentKeys[:0]
|
||||
storageKeys = storageKeys[:0]
|
||||
}
|
||||
}
|
||||
|
||||
err = fetchProjectsUsage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return projects, nil
|
||||
}
|
||||
|
||||
func parseAnyAsInt64(v any) (int64, error) {
|
||||
if v == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
return 0, accounting.ErrUnexpectedValue.New("cannot parse the value as int64; val=%q", v)
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, accounting.ErrUnexpectedValue.New("cannot parse the value as int64; val=%q", v)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// Close the DB connection.
|
||||
func (cache *redisLiveAccounting) Close() error {
|
||||
err := cache.client.Close()
|
||||
@ -325,3 +384,8 @@ func createBandwidthProjectIDKey(projectID uuid.UUID, now time.Time) string {
|
||||
func createSegmentProjectIDKey(projectID uuid.UUID) string {
|
||||
return string(projectID[:]) + ":segment"
|
||||
}
|
||||
|
||||
// createStorageProjectIDKey creates the storage project key.
|
||||
func createStorageProjectIDKey(projectID uuid.UUID) string {
|
||||
return string(projectID[:])
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ type ProjectLimitConfig struct {
|
||||
// each project ID if they differ from the default limits.
|
||||
type ProjectLimitCache struct {
|
||||
projectLimitDB ProjectLimitDB
|
||||
|
||||
defaultMaxUsage memory.Size
|
||||
defaultMaxBandwidth memory.Size
|
||||
defaultMaxSegments int64
|
||||
@ -121,10 +122,6 @@ func (c *ProjectLimitCache) getProjectLimits(ctx context.Context, projectID uuid
|
||||
defaultSegments := c.defaultMaxSegments
|
||||
projectLimits.Segments = &defaultSegments
|
||||
}
|
||||
if projectLimits.Segments == nil {
|
||||
defaultSegments := c.defaultMaxSegments
|
||||
projectLimits.Segments = &defaultSegments
|
||||
}
|
||||
|
||||
return projectLimits, nil
|
||||
}
|
||||
|
@ -218,6 +218,17 @@ func (usage *Service) GetProjectBandwidthTotals(ctx context.Context, projectID u
|
||||
return total, ErrProjectUsage.Wrap(err)
|
||||
}
|
||||
|
||||
// GetProjectSettledBandwidth returns total amount of settled bandwidth used for past 30 days.
|
||||
func (usage *Service) GetProjectSettledBandwidth(ctx context.Context, projectID uuid.UUID) (_ int64, err error) {
|
||||
defer mon.Task()(&ctx, projectID)(&err)
|
||||
|
||||
// from the beginning of the current month
|
||||
year, month, _ := usage.nowFn().Date()
|
||||
|
||||
total, err := usage.projectAccountingDB.GetProjectSettledBandwidth(ctx, projectID, year, month, usage.asOfSystemInterval)
|
||||
return total, ErrProjectUsage.Wrap(err)
|
||||
}
|
||||
|
||||
// GetProjectSegmentTotals returns total amount of allocated segments used for past 30 days.
|
||||
func (usage *Service) GetProjectSegmentTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) {
|
||||
defer mon.Task()(&ctx, projectID)(&err)
|
||||
@ -319,3 +330,8 @@ func (usage *Service) AddProjectStorageUsage(ctx context.Context, projectID uuid
|
||||
func (usage *Service) SetNow(now func() time.Time) {
|
||||
usage.nowFn = now
|
||||
}
|
||||
|
||||
// TestSetAsOfSystemInterval allows tests to set Service asOfSystemInterval value.
|
||||
func (usage *Service) TestSetAsOfSystemInterval(asOfSystemInterval time.Duration) {
|
||||
usage.asOfSystemInterval = asOfSystemInterval
|
||||
}
|
||||
|
@ -182,7 +182,8 @@ func TestProjectSegmentLimit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
data := testrand.Bytes(160 * memory.KiB)
|
||||
// tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
|
||||
planet.Satellites[0].Accounting.Tally.Loop.Pause()
|
||||
|
||||
// set limit manually to 10 segments
|
||||
accountingDB := planet.Satellites[0].DB.ProjectAccounting()
|
||||
@ -190,6 +191,7 @@ func TestProjectSegmentLimit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// successful upload
|
||||
data := testrand.Bytes(160 * memory.KiB)
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/0", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -203,14 +205,17 @@ func TestProjectSegmentLimit(t *testing.T) {
|
||||
|
||||
func TestProjectSegmentLimitInline(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, UplinkCount: 1}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
data := testrand.Bytes(1 * memory.KiB)
|
||||
SatelliteCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
|
||||
planet.Satellites[0].Accounting.Tally.Loop.Pause()
|
||||
|
||||
// set limit manually to 10 segments
|
||||
accountingDB := planet.Satellites[0].DB.ProjectAccounting()
|
||||
err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 10)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := testrand.Bytes(1 * memory.KiB)
|
||||
for i := 0; i < 10; i++ {
|
||||
// successful upload
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data)
|
||||
@ -260,14 +265,17 @@ func TestProjectBandwidthLimitWithoutCache(t *testing.T) {
|
||||
|
||||
func TestProjectSegmentLimitMultipartUpload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, UplinkCount: 1}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
data := testrand.Bytes(1 * memory.KiB)
|
||||
SatelliteCount: 1, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
|
||||
planet.Satellites[0].Accounting.Tally.Loop.Pause()
|
||||
|
||||
// set limit manually to 10 segments
|
||||
accountingDB := planet.Satellites[0].DB.ProjectAccounting()
|
||||
err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := testrand.Bytes(1 * memory.KiB)
|
||||
for i := 0; i < 4; i++ {
|
||||
// successful upload
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data)
|
||||
|
@ -174,11 +174,32 @@ Disables the user's mfa.
|
||||
#### PUT /api/users/{user-email}/freeze
|
||||
|
||||
Freezes a user account so no uploads or downloads may occur.
|
||||
This is a billing freeze the user can exit automatically by paying their invoice.
|
||||
|
||||
#### DELETE /api/users/{user-email}/freeze
|
||||
|
||||
Unfreezes a user account so uploads and downloads may resume.
|
||||
|
||||
#### DELETE /api/users/{user-email}/warning
|
||||
|
||||
Removes the warning status from a user's account.
|
||||
|
||||
#### PATCH /api/users/{user-email}/geofence
|
||||
|
||||
Sets the account level geofence for the user.
|
||||
|
||||
Example request:
|
||||
|
||||
```json
|
||||
{
|
||||
"region": "US"
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /api/users/{user-email}/geofence
|
||||
|
||||
Removes the account level geofence for the user.
|
||||
|
||||
### OAuth Client Management
|
||||
|
||||
Manages oauth clients known to the Satellite.
|
||||
|
@ -29,10 +29,15 @@ func (server *Server) addAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
projectUUID, err := uuid.FromString(projectUUIDString)
|
||||
project, err := server.getProjectByAnyID(ctx, projectUUIDString)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
sendJSONError(w, "project with specified uuid does not exist",
|
||||
"", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
sendJSONError(w, "invalid project-uuid",
|
||||
err.Error(), http.StatusBadRequest)
|
||||
sendJSONError(w, "error getting project",
|
||||
err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@ -60,7 +65,7 @@ func (server *Server) addAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = server.db.Console().APIKeys().GetByNameAndProjectID(ctx, input.Name, projectUUID)
|
||||
_, err = server.db.Console().APIKeys().GetByNameAndProjectID(ctx, input.Name, project.ID)
|
||||
if err == nil {
|
||||
sendJSONError(w, "api-key with given name already exists",
|
||||
"", http.StatusConflict)
|
||||
@ -83,7 +88,7 @@ func (server *Server) addAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
apikey := console.APIKeyInfo{
|
||||
Name: input.Name,
|
||||
ProjectID: projectUUID,
|
||||
ProjectID: project.ID,
|
||||
Secret: secret,
|
||||
}
|
||||
|
||||
@ -248,10 +253,15 @@ func (server *Server) deleteAPIKeyByName(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
projectUUID, err := uuid.FromString(projectUUIDString)
|
||||
project, err := server.getProjectByAnyID(ctx, projectUUIDString)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
sendJSONError(w, "project with specified uuid does not exist",
|
||||
"", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
sendJSONError(w, "invalid project-uuid",
|
||||
err.Error(), http.StatusBadRequest)
|
||||
sendJSONError(w, "error getting project",
|
||||
err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@ -262,7 +272,7 @@ func (server *Server) deleteAPIKeyByName(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := server.db.Console().APIKeys().GetByNameAndProjectID(ctx, apikeyName, projectUUID)
|
||||
info, err := server.db.Console().APIKeys().GetByNameAndProjectID(ctx, apikeyName, project.ID)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
sendJSONError(w, "API key with specified name does not exist",
|
||||
"", http.StatusNotFound)
|
||||
@ -293,10 +303,15 @@ func (server *Server) listAPIKeys(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
projectUUID, err := uuid.FromString(projectUUIDString)
|
||||
project, err := server.getProjectByAnyID(ctx, projectUUIDString)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
sendJSONError(w, "project with specified uuid does not exist",
|
||||
"", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
sendJSONError(w, "invalid project-uuid",
|
||||
err.Error(), http.StatusBadRequest)
|
||||
sendJSONError(w, "error getting project",
|
||||
err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@ -304,7 +319,7 @@ func (server *Server) listAPIKeys(w http.ResponseWriter, r *http.Request) {
|
||||
var apiKeys []console.APIKeyInfo
|
||||
for i := uint(1); true; i++ {
|
||||
page, err := server.db.Console().APIKeys().GetPagedByProjectID(
|
||||
ctx, projectUUID, console.APIKeyCursor{
|
||||
ctx, project.ID, console.APIKeyCursor{
|
||||
Limit: apiKeysPerPage,
|
||||
Page: i,
|
||||
Order: console.KeyName,
|
||||
|
5
satellite/admin/back-office/ui/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
node_modules
|
||||
.DS_Store
|
||||
.idea
|
||||
/build/*
|
||||
!/build/.keep
|
57
satellite/admin/back-office/ui/README.md
Normal file
@ -0,0 +1,57 @@
|
||||
# essentials
|
||||
|
||||
## Project setup
|
||||
|
||||
```
|
||||
# yarn
|
||||
yarn
|
||||
|
||||
# npm
|
||||
npm install
|
||||
|
||||
# pnpm
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Compiles and hot-reloads for development
|
||||
|
||||
```
|
||||
# yarn
|
||||
yarn dev
|
||||
|
||||
# npm
|
||||
npm run dev
|
||||
|
||||
# pnpm
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Compiles and minifies for production
|
||||
|
||||
```
|
||||
# yarn
|
||||
yarn build
|
||||
|
||||
# npm
|
||||
npm run build
|
||||
|
||||
# pnpm
|
||||
pnpm build
|
||||
```
|
||||
|
||||
### Lints and fixes files
|
||||
|
||||
```
|
||||
# yarn
|
||||
yarn lint
|
||||
|
||||
# npm
|
||||
npm run lint
|
||||
|
||||
# pnpm
|
||||
pnpm lint
|
||||
```
|
||||
|
||||
### Customize configuration
|
||||
|
||||
See [Configuration Reference](https://vitejs.dev/config/).
|
25
satellite/admin/back-office/ui/assets.go
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
//go:build !noembed
|
||||
// +build !noembed
|
||||
|
||||
package backofficeui
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
//go:embed all:build/*
|
||||
var assets embed.FS
|
||||
|
||||
// Assets contains either the built admin/back-office/ui or it is empty.
|
||||
var Assets = func() fs.FS {
|
||||
build, err := fs.Sub(assets, "build")
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid embedding: %w", err))
|
||||
}
|
||||
return build
|
||||
}()
|
18
satellite/admin/back-office/ui/assets_noembed.go
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
//go:build noembed
|
||||
// +build noembed
|
||||
|
||||
package backofficeui
|
||||
|
||||
import "io/fs"
|
||||
|
||||
// Assets contains either the built admin/back-office/ui or it is empty.
|
||||
var Assets fs.FS = emptyFS{}
|
||||
|
||||
// emptyFS implements an empty filesystem
|
||||
type emptyFS struct{}
|
||||
|
||||
// Open implements fs.FS method.
|
||||
func (emptyFS) Open(name string) (fs.File, error) { return nil, fs.ErrNotExist }
|
0
satellite/admin/back-office/ui/build/.keep
Normal file
16
satellite/admin/back-office/ui/index.html
Normal file
@ -0,0 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Storj - Admin</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.js"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
19
satellite/admin/back-office/ui/jsconfig.json
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "es5",
|
||||
"module": "esnext",
|
||||
"baseUrl": "./",
|
||||
"moduleResolution": "node",
|
||||
"paths": {
|
||||
"@/*": [
|
||||
"src/*"
|
||||
]
|
||||
},
|
||||
"lib": [
|
||||
"esnext",
|
||||
"dom",
|
||||
"dom.iterable",
|
||||
"scripthost"
|
||||
]
|
||||
}
|
||||
}
|
2475
satellite/admin/back-office/ui/package-lock.json
Normal file
30
satellite/admin/back-office/ui/package.json
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "admin-ui",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"lint": "eslint . --fix --ignore-path .gitignore"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fontsource-variable/inter": "^5.0.8",
|
||||
"@mdi/font": "7.0.96",
|
||||
"core-js": "^3.8.3",
|
||||
"pinia": "^2.0.23",
|
||||
"roboto-fontface": "*",
|
||||
"vue": "^3.2.13",
|
||||
"vue-router": "^4.1.6",
|
||||
"vuetify": "^3.3.5",
|
||||
"webfontloader": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitejs/plugin-vue": "^3.0.3",
|
||||
"eslint": "^8.22.0",
|
||||
"eslint-plugin-vue": "^9.3.0",
|
||||
"sass": "^1.63.6",
|
||||
"vite": "^3.1.9",
|
||||
"vite-plugin-vuetify": "^1.0.0-alpha.12"
|
||||
}
|
||||
}
|
0
satellite/admin/back-office/ui/public/.keep
Normal file
BIN
satellite/admin/back-office/ui/public/favicon.ico
Normal file
After Width: | Height: | Size: 1.2 KiB |
6
satellite/admin/back-office/ui/public/logo.svg
Normal file
After Width: | Height: | Size: 39 KiB |
@ -2,9 +2,9 @@
|
||||
// See LICENSE for copying information.
|
||||
|
||||
<template>
|
||||
<v-main>
|
||||
<router-view />
|
||||
</v-main>
|
||||
</template>
|
||||
|
||||
<script setup lang="ts" />
|
||||
<script setup>
|
||||
//
|
||||
</script>
|
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4.83987 16.8886L1.47448 17.099C1.17636 17.1176 0.919588 16.891 0.900956 16.5929C0.899551 16.5704 0.899551 16.5479 0.900956 16.5254L1.11129 13.16C1.11951 13.0285 1.17546 12.9045 1.26864 12.8114L5.58927 8.49062L5.57296 8.43619C4.98999 6.44548 5.49345 4.26201 6.96116 2.72323L7.00936 2.67328L7.05933 2.62271C9.35625 0.325796 13.0803 0.325796 15.3772 2.62271C17.6741 4.91963 17.6741 8.64366 15.3772 10.9406C13.8503 12.4674 11.6456 13.0112 9.62856 12.4455L9.56357 12.4269L9.50918 12.4107L5.18856 16.7313C5.09538 16.8244 4.97139 16.8804 4.83987 16.8886ZM2.45229 15.5477L4.38997 15.4266L9.13372 10.6827L9.58862 10.864C11.2073 11.5091 13.072 11.1424 14.3255 9.88889C16.0416 8.17281 16.0416 5.39048 14.3255 3.6744C12.6094 1.95831 9.8271 1.95831 8.11101 3.6744C6.87177 4.91364 6.49924 6.7502 7.11424 8.3559L7.13584 8.41118L7.31711 8.86605L2.57342 13.61L2.45229 15.5477ZM10.7858 7.21411C11.3666 7.79494 12.3083 7.79494 12.8892 7.21411C13.47 6.63328 13.47 5.69157 12.8892 5.11074C12.3083 4.52991 11.3666 4.52991 10.7858 5.11074C10.205 5.69157 10.205 6.63328 10.7858 7.21411Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.2 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M9.06942 1.34998C11.7077 1.34998 13.8465 3.48875 13.8465 6.12706C13.8465 7.71017 13.0764 9.11342 11.8904 9.98265C14.2697 10.8565 16.1436 12.7789 16.9535 15.1905C16.9786 15.265 17.0026 15.34 17.0255 15.4154L17.0592 15.5289C17.2168 16.0738 16.9028 16.6434 16.3578 16.801C16.2651 16.8278 16.169 16.8414 16.0724 16.8414H1.91857C1.35598 16.8414 0.899902 16.3853 0.899902 15.8227C0.899902 15.7434 0.909173 15.6644 0.927488 15.5873L0.93956 15.5412C0.972672 15.4261 1.00818 15.3119 1.04602 15.1988C1.86483 12.7523 3.77818 10.8081 6.2038 9.94935C5.04316 9.0781 4.29233 7.69026 4.29233 6.12706C4.29233 3.48875 6.4311 1.34998 9.06942 1.34998ZM9.00117 10.9724C6.16785 10.9724 3.66499 12.7017 2.62214 15.264L2.59168 15.34H15.4107L15.4101 15.3388C14.3965 12.7624 11.9142 11.0092 9.09046 10.973L9.00117 10.9724ZM9.06942 2.85135C7.26029 2.85135 5.7937 4.31793 5.7937 6.12706C5.7937 7.93619 7.26029 9.40278 9.06942 9.40278C10.8785 9.40278 12.3451 7.93619 12.3451 6.12706C12.3451 4.31793 10.8785 2.85135 9.06942 2.85135Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.1 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10.5335 14.5335C10.2461 14.821 9.78432 14.828 9.48835 14.5546L9.46647 14.5335L1.221 6.28806C0.926335 5.9934 0.926335 5.51566 1.221 5.221C1.50847 4.93352 1.9702 4.92651 2.26617 5.19996L2.28806 5.221L9.99991 12.933L17.7119 5.221C17.9994 4.93352 18.4611 4.92651 18.7571 5.19996L18.779 5.221C19.0665 5.50847 19.0735 5.9702 18.8 6.26617L18.779 6.28806L10.5335 14.5335Z" fill="black"/>
|
||||
</svg>
|
After Width: | Height: | Size: 493 B |
@ -0,0 +1,3 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10.9809 3.39991L11.0028 3.42094L14.9791 7.39723C15.2665 7.68463 15.2735 8.14625 15.0001 8.44216L14.9791 8.46404L11.0028 12.4403C10.7082 12.7349 10.2306 12.7349 9.93596 12.4403C9.64856 12.1529 9.64155 11.6913 9.91493 11.3954L9.93596 11.3735L12.6243 8.68493L1.55435 8.68498C1.13774 8.68498 0.800003 8.34725 0.800003 7.93063C0.800003 7.52418 1.12146 7.19281 1.52401 7.17688L1.55435 7.17628L12.6243 7.17623L9.93596 4.48775C9.64856 4.20034 9.64155 3.73872 9.91493 3.44282L9.93596 3.42094C10.2234 3.13353 10.685 3.12652 10.9809 3.39991Z" fill="black"/>
|
||||
</svg>
|
After Width: | Height: | Size: 660 B |
@ -0,0 +1,4 @@
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect x="0.5" y="0.5" width="31" height="31" rx="7.5" fill="white" stroke="#EBEEF1"/>
|
||||
<path d="M23.0565 6.96903C23.1425 7.03306 23.2123 7.11631 23.2603 7.21212C23.3084 7.30794 23.3334 7.41367 23.3333 7.52086V19.8959C23.3334 19.92 23.3321 19.9442 23.3297 19.9683C23.3649 20.692 23.1542 21.4064 22.7317 21.995C22.3093 22.5837 21.7 23.0121 21.0031 23.2103C20.3061 23.4086 19.5626 23.3651 18.8935 23.087C18.2245 22.8089 17.6693 22.3124 17.3183 21.6785C16.9673 21.0446 16.8413 20.3106 16.9606 19.5959C17.08 18.8812 17.4378 18.228 17.9757 17.7425C18.5137 17.2571 19.2001 16.9681 19.9232 16.9225C20.6464 16.8769 21.3637 17.0774 21.9583 17.4914V12.1115L14.1667 14.449V21.7292C14.1667 21.7534 14.1655 21.7776 14.163 21.8016C14.1983 22.5253 13.9875 23.2397 13.5651 23.8284C13.1426 24.4171 12.5333 24.8454 11.8364 25.0437C11.1395 25.242 10.3959 25.1985 9.72688 24.9203C9.05781 24.6422 8.50259 24.1458 8.15162 23.5118C7.80065 22.8779 7.67459 22.1439 7.79397 21.4292C7.91335 20.7145 8.27113 20.0613 8.80907 19.5759C9.34701 19.0905 10.0334 18.8014 10.7566 18.7558C11.4797 18.7102 12.197 18.9108 12.7917 19.3248V10.2709C12.7917 10.1231 12.8393 9.97933 12.9275 9.86078C13.0157 9.74224 13.1397 9.65524 13.2812 9.6127L22.4478 6.8627C22.5505 6.8317 22.6591 6.82517 22.7648 6.84363C22.8705 6.8621 22.9704 6.90504 23.0565 6.96903ZM14.1667 13.0135L21.9583 10.676V8.44486L14.1667 10.7824V13.0135ZM10.9583 20.125C10.4721 20.125 10.0058 20.3182 9.66196 20.662C9.31814 21.0058 9.12499 21.4721 9.12499 21.9584C9.12499 22.4446 9.31814 22.9109 9.66196 23.2547C10.0058 23.5985 10.4721 23.7917 10.9583 23.7917C11.4446 23.7917 11.9109 23.5985 12.2547 23.2547C12.5985 22.9109 12.7917 22.4446 12.7917 21.9584C12.7917 21.4721 12.5985 21.0058 12.2547 20.662C11.9109 20.3182 11.4446 20.125 10.9583 20.125ZM18.2917 20.125C18.2917 20.6113 18.4848 21.0776 18.8286 21.4214C19.1724 21.7652 19.6388 21.9584 20.125 21.9584C20.6112 21.9584 21.0775 21.7652 21.4214 21.4214C21.7652 21.0776 21.9583 20.6113 21.9583 20.125C21.9583 19.6388 21.7652 19.1725 21.4214 18.8287C21.0775 18.4848 20.6112 18.2917 20.125 18.2917C19.6388 18.2917 19.1724 18.4848 18.8286 18.8287C18.4848 19.1725 18.2917 19.6388 18.2917 20.125Z" fill="#FF8A00"/>
|
||||
</svg>
|
After Width: | Height: | Size: 2.2 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" rx="8" fill="#FFA800" fill-opacity="0.1"/>
|
||||
<rect x="0.5" y="0.5" width="31" height="31" rx="7.5" stroke="#FFA800" stroke-opacity="0.2"/>
|
||||
<path d="M23.0565 6.96903C23.1425 7.03306 23.2123 7.11631 23.2603 7.21212C23.3084 7.30794 23.3334 7.41367 23.3333 7.52086V19.8959C23.3334 19.92 23.3321 19.9442 23.3297 19.9683C23.3649 20.692 23.1542 21.4064 22.7317 21.995C22.3093 22.5837 21.7 23.0121 21.0031 23.2103C20.3061 23.4086 19.5626 23.3651 18.8936 23.087C18.2245 22.8089 17.6693 22.3124 17.3183 21.6785C16.9673 21.0446 16.8413 20.3106 16.9606 19.5959C17.08 18.8812 17.4378 18.228 17.9757 17.7425C18.5137 17.2571 19.2001 16.9681 19.9232 16.9225C20.6464 16.8769 21.3637 17.0774 21.9583 17.4914V12.1115L14.1667 14.449V21.7292C14.1667 21.7534 14.1655 21.7776 14.163 21.8016C14.1983 22.5253 13.9875 23.2397 13.5651 23.8284C13.1426 24.4171 12.5333 24.8454 11.8364 25.0437C11.1395 25.242 10.396 25.1985 9.72688 24.9203C9.05782 24.6422 8.5026 24.1458 8.15163 23.5118C7.80066 22.8779 7.6746 22.1439 7.79398 21.4292C7.91336 20.7145 8.27114 20.0613 8.80908 19.5759C9.34702 19.0905 10.0334 18.8014 10.7566 18.7558C11.4797 18.7102 12.197 18.9108 12.7917 19.3248V10.2709C12.7917 10.1231 12.8393 9.97933 12.9275 9.86078C13.0157 9.74224 13.1397 9.65524 13.2812 9.6127L22.4478 6.8627C22.5505 6.8317 22.6591 6.82517 22.7648 6.84363C22.8705 6.8621 22.9704 6.90504 23.0565 6.96903V6.96903ZM14.1667 13.0135L21.9583 10.676V8.44486L14.1667 10.7824V13.0135ZM10.9583 20.125C10.4721 20.125 10.0058 20.3182 9.66197 20.662C9.31815 21.0058 9.125 21.4721 9.125 21.9584C9.125 22.4446 9.31815 22.9109 9.66197 23.2547C10.0058 23.5985 10.4721 23.7917 10.9583 23.7917C11.4446 23.7917 11.9109 23.5985 12.2547 23.2547C12.5985 22.9109 12.7917 22.4446 12.7917 21.9584C12.7917 21.4721 12.5985 21.0058 12.2547 20.662C11.9109 20.3182 11.4446 20.125 10.9583 20.125ZM18.2917 20.125C18.2917 20.6113 18.4848 21.0776 18.8286 21.4214C19.1725 21.7652 19.6388 21.9584 20.125 21.9584C20.6112 21.9584 21.0775 21.7652 21.4214 21.4214C21.7652 21.0776 21.9583 20.6113 21.9583 20.125C21.9583 19.6388 21.7652 19.1725 21.4214 18.8287C21.0775 18.4848 20.6112 18.2917 20.125 18.2917C19.6388 18.2917 19.1725 18.4848 18.8286 18.8287C18.4848 19.1725 18.2917 19.6388 18.2917 20.125Z" fill="#FFA800"/>
|
||||
</svg>
|
After Width: | Height: | Size: 2.3 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" rx="8" fill="#537CFF" fill-opacity="0.1"/>
|
||||
<rect x="0.5" y="0.5" width="31" height="31" rx="7.5" stroke="#537CFF" stroke-opacity="0.2"/>
|
||||
<path d="M11.6989 16.4802C11.4402 16.2215 11.4339 15.8059 11.68 15.5395L11.6989 15.5198L19.1198 8.09892C19.385 7.83373 19.815 7.83373 20.0802 8.09892C20.3389 8.35765 20.3452 8.77321 20.0991 9.03958L20.0802 9.05928L13.1393 15.9999L20.0802 22.9408C20.3389 23.1995 20.3452 23.6151 20.0991 23.8814L20.0802 23.9011C19.8215 24.1599 19.4059 24.1662 19.1395 23.9201L19.1198 23.9011L11.6989 16.4802Z" fill="#537CFF"/>
|
||||
</svg>
|
After Width: | Height: | Size: 678 B |
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M13.9519 0.900024C14.9905 0.900024 15.8325 1.74202 15.8325 2.78067V16.5214C15.8325 16.8409 15.5734 17.1 15.2539 17.1C15.0952 17.1 14.9436 17.0349 14.8343 16.9199L9.44551 11.2481C9.11532 10.9006 8.56592 10.8865 8.21839 11.2167C8.20766 11.2269 8.19719 11.2374 8.18699 11.2481L2.79816 16.9199C2.57803 17.1516 2.21176 17.161 1.98007 16.9409C1.86509 16.8316 1.79999 16.68 1.79999 16.5214V2.78067C1.79999 1.74202 2.64198 0.900024 3.68064 0.900024H13.9519ZM13.9519 2.49134H3.68064C3.52811 2.49134 3.40314 2.60937 3.3921 2.75908L3.39131 2.78067V13.9854L7.03335 10.152C7.04779 10.1368 7.06243 10.1218 7.07726 10.107L7.1223 10.0631C8.09402 9.13985 9.62275 9.16649 10.5619 10.1137L10.5992 10.152L14.2412 13.9853V2.78067C14.2412 2.62814 14.1232 2.50318 13.9735 2.49214L13.9519 2.49134Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 904 B |
@ -0,0 +1,6 @@
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect x="0.5" y="0.5" width="31" height="31" rx="7.5" fill="white" stroke="#EBEEF1"/>
|
||||
<path d="M10 12L16 13.2L22 12L20 22.5L18.5 23L16 23.4L13.5 23L12 22.5L10 12Z" fill="#D7E8FF"/>
|
||||
<ellipse cx="16" cy="11" rx="6" ry="2" fill="white"/>
|
||||
<path d="M23.3549 14.5467C24.762 15.9538 24.7748 18.227 23.3778 19.624C22.8117 20.1901 22.1018 20.5247 21.3639 20.6289L21.1106 22.0638C21.092 23.1897 18.7878 24.1 15.9481 24.1C13.1254 24.1 10.8318 23.2006 10.7863 22.0841L10.7858 22.0638L8.84465 11.066C8.8228 10.9882 8.80882 10.9095 8.80303 10.8299L8.79999 10.8122L8.80189 10.8123C8.80062 10.7903 8.79999 10.7682 8.79999 10.746C8.79999 9.17422 12.0003 7.90002 15.9481 7.90002C19.8959 7.90002 23.0962 9.17422 23.0962 10.746C23.0962 10.7682 23.0955 10.7903 23.0943 10.8123L23.0962 10.8122L23.093 10.8311C23.0872 10.9098 23.0734 10.9876 23.0519 11.0645L22.5749 13.7666L23.3549 14.5467ZM21.2961 12.6344C19.9867 13.2218 18.076 13.592 15.9481 13.592C13.8203 13.592 11.9096 13.2219 10.6001 12.6344L12.0072 20.6077L12.2373 21.8286L12.2586 21.8452C12.3789 21.9354 12.5651 22.0371 12.807 22.1351L12.8561 22.1546C13.6355 22.4594 14.7462 22.6439 15.9481 22.6439C17.1569 22.6439 18.2733 22.4573 19.0527 22.1497C19.3337 22.0388 19.5431 21.9223 19.6661 21.8231L19.6761 21.8148L19.9019 20.5348C19.3338 20.3787 18.7955 20.0812 18.3429 19.6429L18.3004 19.6011L15.3749 16.6756C15.0906 16.3913 15.0906 15.9303 15.3749 15.646C15.6523 15.3686 16.0978 15.3618 16.3834 15.6257L16.4045 15.646L19.33 18.5715C19.5717 18.8132 19.8555 18.9861 20.1569 19.0901L21.2961 12.6344ZM22.2661 15.517L21.6408 19.0597C21.8989 18.9575 22.1402 18.8024 22.3482 18.5944C23.1641 17.7784 23.1664 16.4494 22.3549 15.6065L22.3253 15.5763L22.2661 15.517ZM15.9481 9.35612C14.2013 9.35612 12.5813 9.62893 11.4322 10.0864C10.9385 10.283 10.5712 10.4995 10.3598 10.6985C10.3463 10.7112 10.334 10.7232 10.3228 10.7347L10.3122 10.7459L10.3314 10.7661L10.3598 10.7936C10.5712 10.9926 10.9385 11.2091 11.4322 11.4056C12.5813 11.8631 14.2013 12.1359 15.9481 12.1359C17.6949 12.1359 19.3149 11.8631 20.4639 11.4056C20.9576 11.2091 21.325 10.9926 21.5364 10.7936C21.5499 10.7809 21.5622 10.7688 21.5733 10.7574L21.5841 10.7459L21.5647 10.726L21.5364 10.6985C21.325 10.4995 20.9576 10.283 20.4639 10.0864C19.3149 9.62893 17.6949 9.35612 15.9481 9.35612Z" fill="#0149FF"/>
|
||||
</svg>
|
After Width: | Height: | Size: 2.3 KiB |
@ -0,0 +1,6 @@
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" rx="8" fill="#0149FF" fill-opacity="0.05"/>
|
||||
<rect x="0.5" y="0.5" width="31" height="31" rx="7.5" stroke="#0149FF" stroke-opacity="0.2"/>
|
||||
<path d="M10 12L16 13.2L22 12L20 22.5L18.5 23L16 23.4L13.5 23L12 22.5L10 12Z" fill="#0149FF" fill-opacity="0.2"/>
|
||||
<path d="M23.3549 14.5467C24.762 15.9538 24.7748 18.227 23.3778 19.624C22.8117 20.1901 22.1018 20.5247 21.3639 20.6289L21.1106 22.0638C21.092 23.1897 18.7878 24.1 15.9481 24.1C13.1254 24.1 10.8319 23.2006 10.7863 22.0841L10.7858 22.0638L8.84466 11.066C8.82281 10.9882 8.80883 10.9095 8.80304 10.8299L8.8 10.8122L8.8019 10.8123C8.80063 10.7903 8.8 10.7682 8.8 10.746C8.8 9.17422 12.0003 7.90002 15.9481 7.90002C19.8959 7.90002 23.0962 9.17422 23.0962 10.746C23.0962 10.7682 23.0955 10.7903 23.0943 10.8123L23.0962 10.8122L23.093 10.8311C23.0872 10.9098 23.0734 10.9876 23.0519 11.0645L22.5749 13.7666L23.3549 14.5467ZM21.2962 12.6344C19.9867 13.2218 18.076 13.592 15.9481 13.592C13.8203 13.592 11.9096 13.2219 10.6001 12.6344L12.0072 20.6077L12.2373 21.8286L12.2586 21.8452C12.3789 21.9354 12.5652 22.0371 12.807 22.1351L12.8561 22.1546C13.6355 22.4594 14.7462 22.6439 15.9481 22.6439C17.1569 22.6439 18.2733 22.4573 19.0528 22.1497C19.3337 22.0388 19.5431 21.9223 19.6661 21.8231L19.6761 21.8148L19.9019 20.5348C19.3338 20.3787 18.7955 20.0812 18.3429 19.6429L18.3004 19.6011L15.3749 16.6756C15.0906 16.3913 15.0906 15.9303 15.3749 15.646C15.6523 15.3686 16.0978 15.3618 16.3834 15.6257L16.4045 15.646L19.33 18.5715C19.5717 18.8132 19.8555 18.9861 20.1569 19.0901L21.2962 12.6344ZM22.2661 15.517L21.6408 19.0597C21.8989 18.9575 22.1402 18.8024 22.3482 18.5944C23.1641 17.7784 23.1664 16.4494 22.355 15.6065L22.3253 15.5763L22.2661 15.517ZM15.9481 9.35612C14.2013 9.35612 12.5813 9.62893 11.4322 10.0864C10.9385 10.283 10.5712 10.4995 10.3598 10.6985C10.3463 10.7112 10.334 10.7232 10.3228 10.7347L10.3122 10.7459L10.3314 10.7661L10.3598 10.7936C10.5712 10.9926 10.9385 11.2091 11.4322 11.4056C12.5813 11.8631 14.2013 12.1359 15.9481 12.1359C17.6949 12.1359 19.3149 11.8631 20.4639 11.4056C20.9577 11.2091 21.325 10.9926 21.5364 10.7936C21.5499 10.7809 21.5622 10.7688 21.5733 10.7574L21.5841 10.7459L21.5647 10.726L21.5364 10.6985C21.325 10.4995 20.9577 10.283 20.4639 10.0864C19.3149 9.62893 17.6949 9.35612 15.9481 9.35612Z" fill="#0149FF"/>
|
||||
</svg>
|
After Width: | Height: | Size: 2.4 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M16.3549 7.54666C17.762 8.95376 17.7749 11.227 16.3778 12.624C15.8118 13.1901 15.1018 13.5247 14.3639 13.6289L14.1106 15.0638C14.092 16.1897 11.7878 17.1 8.94814 17.1C6.12547 17.1 3.83191 16.2006 3.78632 15.0841L3.78589 15.0638L1.84471 4.06597C1.82286 3.98819 1.80888 3.90946 1.8031 3.82992L1.80005 3.81221L1.80195 3.81231C1.80068 3.79028 1.80005 3.76818 1.80005 3.74602C1.80005 2.17422 5.00036 0.900024 8.94814 0.900024C12.8959 0.900024 16.0962 2.17422 16.0962 3.74602C16.0962 3.76818 16.0956 3.79028 16.0943 3.81231L16.0962 3.81221L16.0931 3.83111C16.0873 3.90975 16.0735 3.98759 16.052 4.06451L15.5749 6.76662L16.3549 7.54666ZM14.2962 5.63437C12.9868 6.22183 11.076 6.59202 8.94814 6.59202C6.82032 6.59202 4.90965 6.22185 3.6002 5.63443L5.00729 13.6077L5.23735 14.8286L5.25867 14.8452C5.37899 14.9354 5.56521 15.0371 5.80702 15.1351L5.85612 15.1546C6.63558 15.4594 7.74625 15.6439 8.94814 15.6439C10.157 15.6439 11.2733 15.4573 12.0528 15.1497C12.3338 15.0388 12.5432 14.9223 12.6661 14.8231L12.6761 14.8148L12.902 13.5348C12.3339 13.3787 11.7956 13.0812 11.3429 12.6429L11.3005 12.6011L8.37494 9.67559C8.09062 9.39127 8.09062 8.93029 8.37494 8.64597C8.65232 8.36859 9.09785 8.36182 9.38344 8.62568L9.40455 8.64597L12.3301 11.5715C12.5718 11.8132 12.8556 11.9861 13.157 12.0901L14.2962 5.63437ZM15.2661 8.51698L14.6409 12.0597C14.899 11.9575 15.1403 11.8024 15.3482 11.5944C16.1642 10.7784 16.1664 9.44942 15.355 8.60652L15.3253 8.57627L15.2661 8.51698ZM8.94814 2.35612C7.20131 2.35612 5.58131 2.62893 4.43229 3.08641C3.93857 3.28298 3.57123 3.49947 3.35982 3.69848C3.34635 3.71116 3.33405 3.72325 3.32289 3.73469L3.31227 3.74589L3.33148 3.76606L3.35982 3.79357C3.57123 3.99258 3.93857 4.20906 4.43229 4.40564C5.58131 4.86312 7.20131 5.13593 8.94814 5.13593C10.695 5.13593 12.315 4.86312 13.464 4.40564C13.9577 4.20906 14.325 3.99258 14.5365 3.79357C14.5499 3.78089 14.5622 3.7688 14.5734 3.75735L14.5841 3.74589L14.5648 3.72599L14.5365 3.69848C14.325 3.49947 13.9577 3.28298 13.464 3.08641C12.315 2.62893 10.695 2.35612 8.94814 2.35612Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 2.1 KiB |
3
satellite/admin/back-office/ui/src/assets/icon-card.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M14.6971 2.69995L14.8103 2.70051C15.5609 2.70821 15.8499 2.79454 16.1411 2.95031C16.4466 3.11366 16.6863 3.35339 16.8496 3.65884L16.8719 3.7015C17.0209 3.99363 17.1 4.30614 17.1 5.1028V12.2128C17.1 13.0483 17.013 13.3513 16.8496 13.6568C16.6863 13.9622 16.4466 14.2019 16.1411 14.3653L16.0984 14.3876C15.8063 14.5366 15.4938 14.6157 14.6971 14.6157H3.30284L3.18966 14.6151C2.43913 14.6074 2.15013 14.5211 1.85888 14.3653C1.55343 14.2019 1.31371 13.9622 1.15035 13.6568L1.12805 13.6141C0.979085 13.322 0.899994 13.0095 0.899994 12.2128V5.1028L0.900557 4.98962C0.908252 4.23909 0.994587 3.95009 1.15035 3.65884C1.31371 3.35339 1.55343 3.11366 1.85888 2.95031L1.90154 2.92801C2.19367 2.77904 2.50618 2.69995 3.30284 2.69995H14.6971ZM15.6273 8.72474H2.37286L2.37291 12.2988C2.37509 12.7639 2.39618 12.8634 2.44902 12.9622C2.47512 13.011 2.50462 13.0405 2.55342 13.0666L2.57984 13.0799C2.66664 13.1203 2.77736 13.1384 3.13851 13.1422L3.30284 13.1429L14.7831 13.1427C15.2483 13.1406 15.3478 13.1195 15.4466 13.0666C15.4954 13.0405 15.5249 13.011 15.551 12.9622L15.5642 12.9358C15.6102 12.837 15.6273 12.7073 15.6273 12.2128V8.72474ZM3.25888 4.17273C2.75755 4.17384 2.65513 4.19458 2.55342 4.24898C2.50462 4.27508 2.47512 4.30457 2.44902 4.35338L2.43576 4.3798C2.39539 4.4666 2.37729 4.57732 2.3735 4.93847L2.37272 5.1028V6.31483H15.6273V5.05884C15.6261 4.55751 15.6054 4.45509 15.551 4.35338C15.5249 4.30457 15.4954 4.27508 15.4466 4.24898L15.4201 4.23572C15.3274 4.19256 15.2072 4.17486 14.7831 4.17287L3.25888 4.17273Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.6 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M14.0566 3.47049C14.4084 3.82234 14.417 4.38746 14.0823 4.74971L14.0566 4.77649L7.13827 11.6948C7.00266 11.8304 6.8361 11.9169 6.66103 11.9539C6.35998 12.0545 6.01478 11.991 5.76673 11.7589L5.75596 11.7487L1.87048 7.86338C1.50984 7.50274 1.50984 6.91803 1.87048 6.55739C2.22232 6.20555 2.78744 6.19696 3.14969 6.53165L3.17647 6.55739L6.42021 9.80089L12.7506 3.47049C13.1112 3.10985 13.696 3.10985 14.0566 3.47049Z" fill="#00D36E"/>
|
||||
</svg>
|
After Width: | Height: | Size: 544 B |
3
satellite/admin/back-office/ui/src/assets/icon-close.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M16.8429 1.15699C17.1857 1.49977 17.1857 2.05553 16.8429 2.39831L10.2412 8.99979L16.7865 15.5451C17.1293 15.8878 17.1293 16.4436 16.7865 16.7864C16.4437 17.1292 15.888 17.1292 15.5452 16.7864L8.99991 10.2411L2.39844 16.8428C2.05565 17.1856 1.49989 17.1856 1.15711 16.8428C0.814329 16.5 0.814329 15.9443 1.15711 15.6015L7.7587 8.99968L1.21353 2.45474C0.870753 2.11195 0.870753 1.5562 1.21353 1.21341C1.55632 0.870631 2.11208 0.870631 2.45486 1.21341L9.00002 7.75835L15.6016 1.15699C15.9444 0.814207 16.5002 0.814207 16.8429 1.15699Z" fill="white"/>
|
||||
</svg>
|
After Width: | Height: | Size: 700 B |
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M8.9999 2.69995C13.4734 2.69995 17.0999 6.29647 17.0999 10.733C17.0999 12.3941 16.5915 13.9374 15.7207 15.2183H2.27915C1.40828 13.9374 0.899902 12.3941 0.899902 10.733C0.899902 6.29647 4.5264 2.69995 8.9999 2.69995ZM8.9999 4.17268C5.33704 4.17268 2.37263 7.11259 2.37263 10.733C2.37263 11.765 2.61287 12.7593 3.06475 13.656L3.11087 13.7455H14.8889L14.8921 13.7398C15.3587 12.8462 15.6132 11.8525 15.6266 10.8192L15.6272 10.733C15.6272 7.11259 12.6628 4.17268 8.9999 4.17268ZM11.7821 8.02523C12.0678 8.29853 12.0855 8.74671 11.8279 9.04146L11.8052 9.06634L9.53203 11.4428C9.25092 11.7367 8.7848 11.7471 8.49091 11.466C8.20519 11.1927 8.18746 10.7445 8.44502 10.4497L8.46778 10.4248L10.7409 8.04836C11.022 7.75448 11.4882 7.74412 11.7821 8.02523Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 875 B |
3
satellite/admin/back-office/ui/src/assets/icon-docs.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M14.128 3.60013C14.9368 3.60427 15.3608 3.6899 15.8016 3.92564C16.2192 4.14899 16.551 4.48078 16.7743 4.89841C17.0161 5.35051 17.1 5.78485 17.1 6.63492V11.9189C17.0957 12.7277 17.0101 13.1517 16.7743 13.5924C16.551 14.0101 16.2192 14.3419 15.8016 14.5652C15.3721 14.7949 14.9586 14.8821 14.1895 14.8902L14.0651 14.8909H3.93493C3.08487 14.8909 2.65053 14.807 2.19843 14.5652C1.7808 14.3419 1.44901 14.0101 1.22565 13.5924C0.995959 13.163 0.908776 12.7495 0.900632 11.9804L0.899994 11.8559V6.63492C0.899994 5.78485 0.983869 5.35051 1.22565 4.89841C1.44901 4.48078 1.7808 4.14899 2.19843 3.92564C2.65053 3.68385 3.08487 3.59998 3.93493 3.59998L14.128 3.60013ZM8.26363 4.94998L3.88478 4.95007C3.31066 4.95218 3.08426 4.99165 2.87239 5.09683L2.83509 5.11608C2.65272 5.21361 2.51363 5.3527 2.4161 5.53507C2.29321 5.76486 2.24999 5.98866 2.24999 6.63492V11.9061C2.2522 12.4811 2.2918 12.7073 2.39735 12.9195L2.4161 12.9558C2.51363 13.1382 2.65272 13.2772 2.83509 13.3748C3.06487 13.4977 3.28867 13.5409 3.93493 13.5409H8.26363V4.94998ZM14.1152 4.95007L9.61363 4.94998V13.5408L14.1152 13.5408C14.6728 13.5387 14.9024 13.5014 15.1093 13.4029L15.1286 13.3935L15.1649 13.3748C15.3473 13.2772 15.4864 13.1382 15.5839 12.9558C15.7068 12.726 15.75 12.5022 15.75 11.8559L15.7499 6.58476C15.7478 6.01064 15.7083 5.78424 15.6031 5.57238L15.5839 5.53507C15.4864 5.3527 15.3473 5.21361 15.1649 5.11608L15.1451 5.10567L15.1286 5.09734C15.1111 5.08865 15.0936 5.08041 15.0758 5.0726C15.074 5.07185 15.0722 5.07107 15.0704 5.07029L15.0758 5.0726C15.0709 5.07049 15.0661 5.0684 15.0612 5.06635L15.0704 5.07029C15.0625 5.06688 15.0546 5.06355 15.0465 5.0603L15.0612 5.06635C15.0542 5.0634 15.0471 5.06052 15.04 5.0577L15.0465 5.0603C15.0423 5.05859 15.038 5.0569 15.0338 5.05524L15.04 5.0577C15.0267 5.0524 15.0131 5.04733 14.9992 5.04247C14.9969 5.04166 14.9945 5.04084 14.9921 5.04004L14.9992 5.04247C14.9909 5.03956 14.9824 5.03672 14.9739 5.03396C14.9476 5.02548 14.9201 5.01774 14.8911 5.01068C14.8871 5.00971 14.8831 5.00876 14.8791 5.00782L14.8911 5.01068C14.8824 5.00857 14.8736 5.00651 14.8646 5.00453L14.8791 5.00782C14.8713 5.006 14.8634 5.00423 14.8554 5.00251L14.8646 5.00453C14.8565 5.00272 14.8482 5.00096 14.8397 4.99926L14.8554 5.00251C14.848 5.00094 14.8406 4.99941 14.833 4.99791L14.8397 4.99926C14.8154 4.99433 14.79 4.98982 14.7632 4.9857C14.7582 4.98493 14.7531 4.98418 14.7481 4.98344L14.7632 4.9857C14.7548 4.98441 14.7462 4.98316 14.7376 4.98194L14.7481 4.98344C14.7246 4.98004 14.7002 4.97692 14.6746 4.97408C14.6659 4.97311 14.657 4.97217 14.648 4.97127C14.6355 4.97001 14.6228 4.96882 14.6098 4.96769C14.5937 4.96629 14.5772 4.96498 14.5603 4.96375C14.5552 4.96339 14.5501 4.96303 14.5449 4.96268L14.5603 4.96375C14.5479 4.96286 14.5354 4.96202 14.5225 4.96122L14.5449 4.96268C14.5333 4.96189 14.5215 4.96114 14.5095 4.96044L14.5225 4.96122C14.5116 4.96055 14.5006 4.9599 14.4893 4.95929L14.5095 4.96044C14.4818 4.9588 14.4528 4.95736 14.4225 4.95612C14.4214 4.95608 14.4202 4.95603 14.4189 4.95598L14.4225 4.95612C14.4109 4.95565 14.399 4.9552 14.387 4.95478L14.4189 4.95598C14.3749 4.9542 14.3281 4.95284 14.2784 4.95187C14.2563 4.95144 14.2337 4.95108 14.2104 4.9508L14.1152 4.95007ZM6.29999 10.7795C6.67279 10.7795 6.97499 11.0817 6.97499 11.4545C6.97499 11.8182 6.68735 12.1147 6.32714 12.129L6.29999 12.1295H4.0909C3.71811 12.1295 3.4159 11.8273 3.4159 11.4545C3.4159 11.0908 3.70355 10.7943 4.06375 10.7801L4.0909 10.7795H6.29999ZM13.1114 10.7795C13.4841 10.7795 13.7864 11.0817 13.7864 11.4545C13.7864 11.8182 13.4987 12.1147 13.1385 12.129L13.1114 12.1295H11.5159C11.1431 12.1295 10.8409 11.8273 10.8409 11.4545C10.8409 11.0908 11.1285 10.7943 11.4888 10.7801L11.5159 10.7795H13.1114ZM6.29999 8.57043C6.67279 8.57043 6.97499 8.87264 6.97499 9.24543C6.97499 9.60913 6.68735 9.90565 6.32714 9.91989L6.29999 9.92043H4.0909C3.71811 9.92043 3.4159 9.61822 3.4159 9.24543C3.4159 8.88173 3.70355 8.58521 4.06375 8.57097L4.0909 8.57043H6.29999ZM13.725 8.57043C14.0978 8.57043 14.4 8.87264 14.4 9.24543C14.4 9.60913 14.1123 9.90565 13.7521 9.91989L13.725 9.92043H11.5159C11.1431 9.92043 10.8409 9.61822 10.8409 9.24543C10.8409 8.88173 11.1285 8.58521 11.4888 8.57097L11.5159 8.57043H13.725ZM6.29999 6.36134C6.67279 6.36134 6.97499 6.66355 6.97499 7.03634C6.97499 7.40004 6.68735 7.69656 6.32714 7.7108L6.29999 7.71134H4.0909C3.71811 7.71134 3.4159 7.40913 3.4159 7.03634C3.4159 6.67264 3.70355 6.37612 4.06375 6.36188L4.0909 6.36134H6.29999ZM13.725 6.36134C14.0978 6.36134 14.4 6.66355 14.4 7.03634C14.4 7.40004 14.1123 7.69656 13.7521 7.7108L13.725 7.71134H11.5159C11.1431 7.71134 10.8409 7.40913 10.8409 7.03634C10.8409 6.67264 11.1285 6.37612 11.4888 6.36188L11.5159 6.36134H13.725Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 4.7 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="25" height="24" viewBox="0 0 25 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M11.6275 16.4385L11.5989 16.4109L6.51469 11.3268C6.1286 10.9407 6.1286 10.3147 6.51469 9.92861C6.89136 9.55194 7.49635 9.54276 7.88416 9.90105L7.91283 9.92861L11.3423 13.3582V2.18835C11.3423 1.64234 11.785 1.19971 12.331 1.19971C12.8637 1.19971 13.298 1.62101 13.3188 2.14858L13.3196 2.18835V13.2924L16.683 9.92861C17.0597 9.55194 17.6647 9.54276 18.0525 9.90105L18.0812 9.92861C18.4579 10.3053 18.467 10.9103 18.1087 11.2981L18.0812 11.3268L12.997 16.4109C12.6203 16.7876 12.0153 16.7968 11.6275 16.4385ZM23.3012 20.8529C23.3012 21.399 22.8585 21.8416 22.3125 21.8416H2.71949C2.17348 21.8416 1.73085 21.399 1.73085 20.8529C1.73085 20.3069 2.17348 19.8643 2.71949 19.8643H22.3125C22.8585 19.8643 23.3012 20.3069 23.3012 20.8529ZM2.68981 21.7665C2.1438 21.7665 1.70117 21.3239 1.70117 20.7779V16.8233C1.70117 16.2773 2.1438 15.8347 2.68981 15.8347C3.23582 15.8347 3.67845 16.2773 3.67845 16.8233V20.7779C3.67845 21.3239 3.23582 21.7665 2.68981 21.7665ZM22.2829 21.7665C21.7368 21.7665 21.2942 21.3239 21.2942 20.7779V16.8233C21.2942 16.2773 21.7368 15.8347 22.2829 15.8347C22.8289 15.8347 23.2715 16.2773 23.2715 16.8233V20.7779C23.2715 21.3239 22.8289 21.7665 22.2829 21.7665Z" fill="white"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.3 KiB |
@ -0,0 +1,3 @@
|
||||
<svg width="10" height="10" viewBox="0 0 10 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.26677 7.26676C5.12303 7.4105 4.89216 7.41401 4.74418 7.27728L4.73323 7.26676L0.610498 3.14403C0.463168 2.9967 0.463168 2.75783 0.610498 2.6105C0.754235 2.46676 0.985101 2.46325 1.13309 2.59998L1.14403 2.6105L4.99996 6.46651L8.85597 2.6105C8.99971 2.46676 9.23057 2.46326 9.37856 2.59998L9.3895 2.6105C9.53324 2.75423 9.53674 2.9851 9.40002 3.13309L9.3895 3.14403L5.26677 7.26676Z" fill="#56606D"/>
|
||||
</svg>
|
After Width: | Height: | Size: 513 B |