Compare commits

..

77 Commits

Author SHA1 Message Date
977a27dde2 HACK: prebuild storagenode gui 2023-08-23 10:53:55 +01:00
igor gaidaienko
703fd437fe
release v1.84.2 2023-08-01 20:24:10 +03:00
Márton Elek
bf0f3b829f satellite/overlay: NR placement should exclude nodes without geofencing information
https://github.com/storj/storj-private/issues/378

Change-Id: If2af02083496e5a8eefe27beabb406388ee50644
2023-08-01 15:18:58 +03:00
Cameron
7f499e44a6 web/satellite: add indeterminate progress bar
Display indeterminate progress bars in upload modal if progress is less
than 1.

Change-Id: Icdad8e59914985f3ed8fd25dd01dba7e9ff88cf0
2023-07-24 12:58:35 +03:00
Wilfred Asomani
af93d2090b satellite/admin: add endpoint to unwarn user
This change enables the admin UI to remove the warning status of users.

resolves: storj-private/issues/342

Change-Id: Ib960ffb33fdabc045884ce7fa2c55c3553db0fb0
2023-07-24 12:58:35 +03:00
Vitalii
7059e10dfc web/satellite: update wording for upgrade account token flow
Update wording to reflect latest auto upgrade account flow.

new wording:
Send more than $10 in STORJ Tokens to the following deposit address to upgrade to a Pro account.
Your account will be upgraded after your transaction receives 15 confirmations.
If your account is not automatically upgraded, please fill out this limit increase request form.

Issue:
https://github.com/storj/storj-private/issues/367

Change-Id: I46fcb9243722313b98e5c54e8194d6152f7e1631
2023-07-24 12:58:35 +03:00
Clement Sam
4cb85186b2 storagenode/pieces: enable lazyfilewalker by default
Resolves https://github.com/storj/storj/issues/5861

Change-Id: I20e0a5b8a15ca966cbccd71369322a517a2c2130
2023-07-19 15:25:20 +00:00
paul cannon
9a871bf3bc go.mod: bump storj.io/common
In particular, to get commit 9db74ed9 in for the next storagenode build.

Change-Id: If07283bc72d66dfbd0ee91bc1cc8b2ce015871cd
2023-07-19 09:50:06 -05:00
Jeremy Wharton
afae5b578e web/satellite/vuetify-poc: allow navigation drawer to be toggled
This change allows the navigation drawer to be toggled by clicking the
the hamburger menu in the navigation bar. The hamburger menu has been
hidden in pages not containing a navigation drawer.

Resolves #6034

Change-Id: I48cfee1f48964c500c07f09f188c7077442261ab
2023-07-19 12:09:05 +00:00
Vitalii
5317135416 satellite/payments: fix config value for auto upgrade user tier flow
Fixed config value which indicates how many base units of US micro dollars are needed to auto upgrade user to paid tier.

Change-Id: I22821ac22fc3eaeeea21c6dec4e6912025df63aa
2023-07-19 10:43:07 +00:00
Wilfred Asomani
7cc873a62a satellite/payments: prevent removing other users' cards
This change patches a loophole that allowed accounts to remove cards
that belong to other users.

Closes #storj/storj-private#326

Change-Id: I33e9efe5c9cdb03aa48ad4c6b1d3283c396a7890
2023-07-19 09:44:06 +00:00
Michal Niewrzal
31bb6d54c7 cmd/tools: add tool to migrate segment copies metadata
We need migrate all existing segment copies to contain all the same
metadata as original segment. So far we were not duplicating stored
pieces but we are changing this behavior right now. We will use this
tool after enabling new way of doing server side copies.

Fixes https://github.com/storj/storj/issues/5890

Change-Id: Ia9ca12486f3c527abd28949eb438d1c4c7138d55
2023-07-18 15:12:51 +00:00
Egon Elbre
23631dc8bb satellite/accounting: fix TestProjectSegmentLimit*
Tally ensures that live accounting has the latest information,
however, when there are concurrent updates to live-accounting
it may by off by a few segments. Disable tally for those tests.

Change-Id: I6fa8a1794334bba093e18f29cb76e7b8d1244979
2023-07-18 14:26:05 +00:00
Egon Elbre
b1e7d70a86 satellite/payments/billing: fix test
`time.Now()` can return the same value when called sequentially.

Change-Id: I800c7696b919ad073e4558fb51c8d2eb4a04f05e
2023-07-18 16:25:42 +03:00
Vitalii
583ad54d86 satellite/{payments, console}: added functionality to get wallet's transactions (including pending)
Added new functionality to query storjscan for all wallet transactions (including pending).
Added new endpoint to query all wallet transactions.

Issue:
https://github.com/storj/storj/issues/5978

Change-Id: Id15fddfc9c95efcaa32aa21403cb177f9297e1ab
2023-07-18 11:09:29 +00:00
Vitalii
2ee0195eba satellite/payments: extend billing chore functionality to upgrade user
Added new observer for billing chore to check user's balance and upgrade their account if balance is more than or equal to needed amount for upgrade.
Added new config value which stands for needed amount of base units of US micro dollars needed to upgrade user.

Issue:
https://github.com/storj/storj/issues/5978

Change-Id: Ic3992cd3114397bfdd9e231ca090ff21ca66648b
2023-07-18 13:15:02 +03:00
Michal Niewrzal
0303920da7 satellite/metainfo: remove unused method
Change-Id: I08e307e6909cdc46951c5f3112d77a685e67fe2e
2023-07-18 08:45:29 +00:00
Jeremy Wharton
df9a6e968e web/satellite: lint Vuetify files
This change enables linting for the Vuetify proof of concept code and
fixes the linting errors that were detected. Additionally, it migrates
the Vuetify components to the composition API.

Change-Id: Id8cc083954e3f4cb66a00ad2715a96c8747b592c
2023-07-17 20:32:59 +00:00
dlamarmorgan
abe1463a73 payments/stripe/invoices: add token payment to overdue invoice payment
Add an attempt to pay overdue invoices via storj token if the user has
a token balance.

Change-Id: I819b89e7cf9cdb7deb9a51eab5ca684b54418218
2023-07-17 12:59:33 -07:00
dlamarmorgan
c96c83e805 satellite/payments/stripe/service: add manual payment with token command
Add the ability to pay an individual users open invoices using their
storj token balance.

Change-Id: I6115f2b033fd77f109ded6f55b1f35fc77c71ff1
2023-07-17 19:24:36 +00:00
dlamarmorgan
0f4371e84c scripts/tests/{backwardcompatibility,integrations}: add test scripts
Change-Id: Ib83cd0f083bab7f560a200fd95e62e5b21e60c27
2023-07-17 18:39:18 +00:00
Wilfred Asomani
0a8115b149 satellite/{console,payments}: fix handling for autofreeze flow
This change adds an extra step to the auto freeze chore to attempt
payment before freezing/warning a user.
It also attempts payment after modifying user's cards whether the user
is frozen/warned or not.

Issue: https://github.com/storj/storj-private/issues/341

Change-Id: Ia9c0c5a2d37837bca5153fe720fef61f1385cb15
2023-07-17 17:37:11 +00:00
Michal Niewrzal
47a4d4986d satellite/repair: enable declumping by default
This feature flag was disabled by default to test it slowly. Its enabled
for some time on one production satellite and test satellites without
any issue. We can enable it by default in code.

Change-Id: If9c36895bbbea12bd4aefa30cb4df912e1729e4c
2023-07-17 15:02:35 +00:00
Michal Niewrzal
5272fd8497 satellite/metainfo: do full bucket validation only on create
We are doing full bucket name validation for many requests but
we should do this only while creating bucket. Other requests will be
covered only by basic name length validation. Less strict validation for
other requests will make bucket usable in case of invalid bucket names
in DB (we have such cases from the past).

https://github.com/storj/storj/issues/6044

Change-Id: I3a41050e3637787f788705ef15b5dc4df4d01fc6
2023-07-17 16:15:33 +02:00
Jeremy Wharton
95761908b5 web/satellite: update Vuetify proof of concept
The changes as of storj/vuetify-storj@c801fe6 have been pulled into the
Vuetify proof of concept.

Change-Id: I3db208836cff21287052615d36258fcf2d4c6169
2023-07-14 12:46:50 +00:00
Michal Niewrzal
5234727886 satellite/repair/repairer: fix flaky TestSegmentRepairPlacement
Sometimes DownloadSelectionCache doesn't keep up with all node
placement changes we are doing during this test.

Change-Id: Idbda6511e3324b560cee3be85f980bf8d5b9b7ef
2023-07-14 10:10:40 +00:00
Tomasz Melcer
5a1c3f7f19
storage/reputation: logging changes to node scores (#5877)
Useful for monitoring storage nodes using log-parsing tools, like swatchdog.

Co-authored-by: Clement Sam <clementsam75@gmail.com>
2023-07-13 17:03:18 +02:00
Wilfred Asomani
4ee647a951 satellite: add request id to requests
This change adds request IDs to requests, logs them as part of audit
logs and sends to the client on error. This is to improve debugging
of customer issues.

Issue: https://github.com/storj/storj/issues/5898

Change-Id: I801514b547d28d810552d91aa7c8502051e552bf
2023-07-13 09:22:43 +00:00
Cameron
e8fcdc10a4 satellite/metainfo: set user_agent in bucket_metainfos on bucket recreation
Before this change, if a user creates a bucket with a user_agent attributed then deletes and recreates it, the row in bucket_metainfos
will not have the user_agent. This is because we skip setting the field
in bucket_metainfos if the bucket already exists in value_attributions.
This can be problematic, as we return the bucket's user agent during the
ListBuckets operation, and the client may be expecting this value to be
populated.

This change ensures the bucket table user_agent is set when (re)creating a bucket. To avoid decreasing BeginObject performance, which also
updates attribution, a flag has been added to determine whether to
make sure the buckets table is updated: `forceBucketUpdate`.

Change-Id: Iada2f233b327b292ad9f98c73ea76a1b0113c926
2023-07-12 21:48:05 +00:00
Michal Niewrzal
99128ab551 satellite/metabase: reuse Pieces while looping segments
Segments loop implementation is using lots of memory to convert
alias pieces to pieces for each segment while iteration. To improve
situation this change is reusing Pieces between batch pages. This
should signifcantly reduce memory usage for ranged loop executions.

Change-Id: I469188779908facb19ad85c6bb7bc3657111cc9a
2023-07-12 09:29:34 +00:00
Jeremy Wharton
062ca285a0 web/satellite: add sharing option to dropdown in buckets page
This change adds a sharing option to the dropdown menu for bucket rows
in the Buckets page.

Resolves #5964

Change-Id: Ife0eb8f6cabbe85eaedae1d94d97694f3c677a3e
2023-07-11 22:03:42 +00:00
Egon Elbre
465941b345 satellite/{nodeselection,overlay}: use location.Set
location.Set is faster for comparisons.

Updates #6028

Change-Id: I764eb5cafc507f908e4168b16a7994cc7721ce4d
2023-07-11 17:16:30 +00:00
Cameron
7e03ccfa46 satellite/console: optional separate web app server
This change creates the ability to run a server separate from the
console web server to serve the front end app. You can run it with
`satellite run ui`. Since there are now potentially two servers instead
of one, the UI server has the option to act as a reverse proxy to the
api server for local development by setting `--console.address` to the
console backend address and `--console.backend-reverse-proxy` to the
console backend's http url. Also, a feature flag has been implemented
on the api server to retain the ability to serve the front end app. It
is toggled with `--console.frontend-enable`.

github issue: https://github.com/storj/storj/issues/5843

Change-Id: I0d30451a20636e3184110dbe28c8a2a8a9505804
2023-07-11 12:17:35 -04:00
Egon Elbre
9370bc4580 satellite/{nodeselection,overlay}: bump common and fix some potential issues
* Handle failed country code conversion.
* Avoid potential issues with a data-race due to shared slice.

Updates #6028

Change-Id: If7beef2619abd084e1f4109de2d323f834a6090a
2023-07-11 11:13:41 +00:00
Michal Niewrzal
1f92e7acda satellite: move GC sender to Core peer
Having separate process/pod only for sending bloom filters once a week
is a bit waste. After reconfiguring production settings to use sender in
core we can remove also GC sender peer code.

https://github.com/storj/storj/issues/5979

Change-Id: I6efe3ec073f96545e1f70ad13843f8ccdf923ee8
2023-07-11 10:31:35 +00:00
Vitalii
a9d979e4d7 web/satellite: update default multipart upload part size
Updated multipart upload part size to be 64MB or higher depending on file size.
Increased queue size from 4 to 5 (5 parts being uploaded at a time) because theoretically it can decrease uploading time, right?

Issue:
https://github.com/storj/storj/issues/5851

Change-Id: Ida5661fa0ed6bc5a0651afc05b5feb7e77791efe
2023-07-10 20:07:51 +03:00
Moby von Briesen
4108aa72ba satellite/console,web/satellite: Fix project limit checking
* Fixes backend to use only a user's owned projects to determine if the
  user has hit the project limit
* Makes frontend logic consistent (and simpler) for checking whether to
  send user to the "Create Project" modal or the "upgrade account or
  request limit increase" modal

Before this change, projects that a user is a member of would be
included in determining whether the user could create a project. Also,
the "create project" button in the projects menu in the navbar of the UI
did not enable a free tier user to create a new project, even if they
had not hit their limits.

Change-Id: Ia776eb627ca37b83f5bc63bed83ee83c9f7cc789
2023-07-10 15:51:00 +00:00
Jeremy Wharton
4e876fbdba web/satellite: update upload modal
The upload modal has been updated to more closely match our designs.
- A button has been added to cancel all in-progress uploads.
- The status text has been fixed to display the proper file count.
- Clicking completed items in the upload modal previews them.

Resolves #5973

Change-Id: Iaee5fe05be14b3a6f2de1a9c807eca5137c7d643
2023-07-10 15:12:31 +00:00
Moby von Briesen
bd4d57c604 satellite/payments: Exclude users who pay via storjscan from autofreeze
Add a configuration (default true) to exclude users who have made
storjscan payments from being auto-warned/frozen for an unpaid invoice.
This will allow us to reach out to these users and handle warning/freezing
manually. Auto account freeze still handles CC-only users.

Fixes https://github.com/storj/storj/issues/6027

Change-Id: I0c862785dad1c8febfa11100c0d30e621ce3ae9b
2023-07-10 13:39:01 +00:00
Jeremy Wharton
c79d1b0d2f {satellite/console,web/satellite}: show error for project invite dupes
This change fixes an issue where a new project member invitation would
silently replace an older one that has the same project ID and email if
the email did not belong to a registered user. Additionally, the
satellite frontend has been updated to display more descriptive error
messages for project member invitations.

Change-Id: I32b582c40c0028b8eedf2aed4b5bfb43501594b4
2023-07-10 12:56:02 +00:00
Jeremy Wharton
fbda13c752 {satellite/console,web/satellite}: trim emails when inviting members
This change trims whitespace from email addresses in project member
invitation requests.

Change-Id: Idd9116820897bf29f3eeba8cf95770b1aa14690c
2023-07-10 12:22:07 +00:00
Jeremy Wharton
0f9a0ba9cd web/satellite: fix project switch when removing project members
This change fixes an issue where the currently selected project would
be switched when removing project members.

Change-Id: I8138b4229eb7933d25a2fe84e5aa0b5846fc79b8
2023-07-10 11:46:49 +00:00
JT Olio
73d65fce9a cmd/satellite/billing: don't fail the overall process if an individual invoice fails
Change-Id: I36591a717ef97bdb417cc6d9218e22b2f91f249b
2023-07-10 11:13:23 +00:00
Michal Niewrzal
1d62dc63f5 satellite/repair/repairer: fix NumHealthyInExcludedCountries calculation
Currently, we have issue were while counting unhealthy pieces we are
counting twice piece which is in excluded country and is outside segment
placement. This can cause unnecessary repair.

This change is also doing another step to move RepairExcludedCountryCodes
from overlay config into repair package.

Change-Id: I3692f6e0ddb9982af925db42be23d644aec1963f
2023-07-10 12:01:19 +02:00
Igor
05f30740f5
docs/testplan: add project cowbell testplan (#6001) 2023-07-10 11:23:55 +02:00
Márton Elek
97a89c3476 satellite: switch to use nodefilters instead of old placement.AllowedCountry
placement.AllowedCountry is the old way to specify placement, with the new approach we can use a more generic (dynamic method), which can check full node information instead of just the country code.

The 90% of this patch is just search and replace:

 * we need to use NodeFilters instead of placement.AllowedCountry
 * which means, we need an initialized PlacementRules available everywhere
 * which means we need to configure the placement rules

The remaining 10% is the placement.go, where we introduced a new type of configuration (lightweight expression language) to define any kind of placement without code change.

Change-Id: Ie644b0b1840871b0e6bbcf80c6b50a947503d7df
2023-07-07 16:55:45 +00:00
Wilfred Asomani
e0b5476e78 web/satellite: fix long table content overflow
This change fixes an issue where long text content in the common table
will overflow and breaks layout.

Change-Id: I30f6e08488410359e0a97995f8d769b272b56c72
2023-07-07 12:24:37 +00:00
Jeremy Wharton
074457fa4e web/satellite: add folder sharing
This change allows users to generate links for sharing folders.
Previously, users were only able to do this with files and buckets.

Resolves #5644

Change-Id: I16dd8270337e3561b6bda895b46d3cc9be5f8041
2023-07-07 10:03:06 +00:00
Vitalii
5fc6eaab17 satellite/{console, web}: display accurate legacy free tier information in upgrade modal
Updated upgrade account modal to show user account free tier limits instead of hardcoded values.

Issue:
https://github.com/storj/storj/issues/5939

Change-Id: I26ffbe2571c5ca4b37f02bec5211bac986bedc6a
2023-07-07 09:23:36 +00:00
Márton Elek
70cdca5d3c
satellite: move satellite/nodeselection/uploadselection => satellite/nodeselection
All the files in uploadselection are (in fact) related to generic node selection, and used not only for upload,
but for download, repair, etc...

Change-Id: Ie4098318a6f8f0bbf672d432761e87047d3762ab
2023-07-07 10:32:03 +02:00
Márton Elek
8b4387a498 satellite/satellitedb: add tag information to nodes selected for upload/downloads
Change-Id: I0fa7daebcf83f7949726e5fffe68e0bdc6fd1d7a
2023-07-07 07:54:16 +00:00
Vitalii
ced8657caa web/satellite: removed unused images
Change-Id: Ifd9c691c69f50a8f346ac4ac2fa1433b00ea81b9
2023-07-06 20:31:24 +00:00
Vitalii
ece0cc5785 web/satellite: fix bottom spacing for all pages
Fixed bottom spacing for all pages. Basically removed bottom padding override in dashboard wrapper.
Removed a couple of unused components and icons.
Made pagination size changer dropdown to appear on top of selector to not extend page height.

This change fixes pagination issue on Team page:
https://github.com/storj/storj/issues/6012

Change-Id: I707dd1bf9d61b05742b7f97a757a2a8f5f9f93fd
2023-07-06 19:57:43 +00:00
JT Olio
a85c080509 docs/blueprints: certified nodes
Change-Id: I7c670d740e4c3d1035dee145ed65aaed4cbaba0c
2023-07-06 14:07:46 -04:00
paul cannon
a4d68b9b7e satellite/metabase: server-side copy copies metadata
..instead of using segment_copies and ancestor_stream_id, etc.

This bypasses reference counting entirely, depending on our mark+sweep+
bloomfilter garbage collection strategy to get rid of pieces once they
are no longer part of a segment.

This is only safe to do after we have stopped passing delete requests on
to storage nodes.

Refs: https://github.com/storj/storj/issues/5889
Change-Id: I37bdcffaa752f84fd85045235d6875b3526b5ecc
2023-07-06 14:40:59 +00:00
Márton Elek
ddf1f1c340 satellite/{nodeselection,overlay}: NodeFilters for dynamic placement implementations
Change-Id: Ica3a7b535fa6736cd8fb12066e615b70e1fa65d6
2023-07-06 12:08:01 +00:00
Vitalii
e3d2f09988 web/satellite: add support link to upgrade account STORJ token flow
Added support link to upgrade account STORJ token flow to tell user that they have to fill in another form to upgrade with tokens only.

Issue:
https://github.com/storj/storj/issues/5985

Change-Id: Ifb852b883c6bf092d5eec588e823925a8ea661c9
2023-07-06 08:38:18 +00:00
Ethan Adams
f819b6a210 satellite/entrypoint: Ignore unset variable errors while checking for VALID_EXECUTABLE
Otherwise core and ranged loop fail at startup with "/entrypoint: line 94: $1: unbound variable"

Change-Id: I45a318038cd937c11f6a00d506c339ba69ea07bf
2023-07-05 20:18:38 +00:00
Márton Elek
1525324384 satellite/uploadselection: avoid String conversation of location during node selection
Converting location to String is not free, better to avoid it.

81cb588c23/storj/location/countrycode.go (L32)

Thanks to Egon, who reported this issue.

See also: https://review.dev.storj.io/c/storj/common/+/10732

Change-Id: Ife348cffa59c020b46914a68be231c6eb75f06c9
2023-07-05 19:22:12 +00:00
Michal Niewrzal
2c3464081f satellite/metainfo: fix bucket name validation
Change-Id: Ifa400ec855ee978ff001fa3736a8a4c1c53fd18c
2023-07-05 14:42:31 +00:00
Márton Elek
6a3802de4f satellite,storagenode: propagate node tags with NodeCheckin
Change-Id: Ib1a602a8cf81204efa001b5d338914ea4218c39b
2023-07-05 13:45:42 +00:00
Clement Sam
a740f96f75 storagenode/pieces/lazyfilewalker: test zapwrapper
This add tests to the zapwrapper package and also adds a test
to verify the issue in https://github.com/storj/storj/issues/6006

Change-Id: Iec3f568e72683af71e1718017109a1ed52794b0b
2023-07-05 12:33:00 +00:00
Clement Sam
7ac2031cac web/multinode: fix wrong free disk space in allocation on dashboard
There are many case where the keywords `free` and `available`
are confused in their usage.

For most cases, `free` space is the amount of free space left
on the whole disk, and not just in allocation while
`available` space is the amount of free space left in the
allocated disk space.

What the user/sno wants to see is not the free space but the
available space. To the SNO, free space is the free space
left in the allocated disk space.

Because of this confusion, the multinode dashboard displays
the `free` disk space instead of the free space in the
allocated disk space https://github.com/storj/storj/issues/5248
While the storagenode dashboard shows the correct free space
in the allocation.

This change fixes the wrong free disk space. I also added a
few comments to make a distinction between the `free`
and `available` fields in the `DiskSpace*` structs.

Change-Id: I11b372ca53a5ac05dc3f79834c18f85ebec11855
2023-07-05 11:24:24 +00:00
Michal Niewrzal
21c1e66a85 satellite/overlay: refactor ReliabilityCache to keep more data
ReliabilityCache will be now using refactored overlay Reliable method.
This method will provide more info about nodes (e.g. country code) and
with this we are able to add two dedicated methods to classify pieces:
* OutOfPlacementPieces
* PiecesNodesLastNetsInOrder

With those new method we will fix issue where offline but reliable node
won't be checked for clumped pieces and off placement pieces.

https://github.com/storj/storj/issues/5998

Change-Id: I9ffbed9f07f4881c9db3bd0e5f0412f1a418dd82
2023-07-05 11:19:10 +02:00
Michal Niewrzal
f2cd7b0928 satellite/overlay: refactor Reliable to be used with repair checker
Currently we are using Reliable to get missing pieces for repair
checker. The issue is that now checker is looking at more things than
just missing pieces (clumped/off, placement pieces) and using only node
ID is not enough. We have issue where we are skipping offline nodes from
clumped and off placement pieces check.

Reliable was refactored to get data (e.g. country, lastNet) about all
reliable nodes. List is split into online and offline. This data will be
cached for quick use by repair checker. It will be also possible to
check nodes metadata like country code or lastNet.

We are also slowly moving `RepairExcludedCountryCodes` config from
overlay to repair which makes more sens for it.

This this first part of changes.

https://github.com/storj/storj/issues/5998

Change-Id: If534342488c0e440affc2894a8fbda6507b8959d
2023-07-05 10:56:31 +02:00
Márton Elek
500b6244f8
satellite/satellitedb: create table for node tags
Change-Id: I884bb740974e6b8241aa6b85faf266b85fe892d4
2023-07-05 09:38:53 +02:00
Clement Sam
1851d103f9 go.mod: bump storj.io/private
Updates https://github.com/storj/storj/issues/6006

Change-Id: I1f549d5642213e420f3e5d0df4ef972c77add713
2023-07-03 23:59:15 +00:00
paul cannon
032546219c satellite/admin: fix spelling of list-apikeys endpoint
Currently, any attempt to list the api keys associated with a project
from the admin UI results in a 404 NOT FOUND error.

This appears to be because there is no /api/projects/{project}/apiKeys
endpoint registered; it should have a lowercase k.

Change-Id: Ifbe4cd0f9ba12a6e37a0d9f64df91c264ced5558
2023-07-03 21:03:53 +00:00
Jeremy Wharton
1173877167 {satellite/console,web/satellite}: encode email in project invite URLs
This change properly encodes email addresses that are used as query
parameters in project invitation-related URLs.

Change-Id: Iaaf7b62b5ac3db3f0b0e000cc06fef8e315400a8
2023-07-03 18:07:19 +00:00
Vitalii
cb41c51692 web/satellite: abort request to get object count in 10 seconds
When entering passphrase to open bucket we make a ListObjectsV2Command request to figure out if there are objects encrypted with different passphrase.
If there are a lot of objects then this request takes forever to process.
By this change I added 10 seconds timeout for that request to not block user.

Issue:
https://github.com/storj/storj/issues/5954

Change-Id: I5243fba68d0b56dff2fb2b3a608a5e71860724c2
2023-07-03 17:33:11 +00:00
Márton Elek
d38b8fa2c4 satellite/nodeselection: use the same Node object from overlay and nodeselection
We use two different Node types in `overlay` and `uploadnodeselection` and converting back and forth.

Using the same object would allow us to use a unified node selection interface everywhere.

Change-Id: Ie71e29d60184ee0e5b4547eb54325f09c418f73c
2023-07-03 16:59:33 +00:00
Márton Elek
20a47034a5
cmd/tools: tag-signer utility to create signed node tags
Change-Id: I2983d688a109325a02fcd060ca1a2d4eb8e9e931
2023-07-03 18:10:08 +02:00
Márton Elek
01e33e7753 cmd/satellite: make satellite docker image compatible with storj-up
This patch makes satellite container images compatible with storj-up.

Which means that any official release can be easily tested locally.

It means that we need some binaries (like storj-up, dlv) and shall fragments part of the production image, but I think the risk is very low and the benefit is very high.

This is the first towards to unify all the images and make it possible to test/run the same components everywhere (storj-up/nigttly/prod).

https://github.com/storj/storj/issues/5946

Change-Id: Ib53b6213d94f93a793a841dedfe32cc59ef69b72
2023-07-03 15:02:27 +00:00
Márton Elek
8482b37c14
go.mod: bump to use latest common
Change-Id: Ie31e7779e86d13ca3c8acaacfe6ed1e23f2c9740
2023-07-03 13:32:35 +02:00
Wilfred Asomani
f131047f1a web/satellite: match projects table with the designs
This change updates the projects table on all projects dashboard to
more closely match the designs.

Change-Id: I547a83352fba8c3ad7958802db7b38b342b383e8
2023-06-30 12:05:47 +00:00
Vitalii
8d8f6734de satellite/{db, accounting}: added functionality to query settled bandwidth for given project
Added functionality to return only settled traffic from project_bandwidth_daily_rollups table for given month.
Updated {projectID}/usage-limits endpoint to return only settled bandwidth used.

This is a possible fix for this issue
https://github.com/storj/storj-private/issues/293

Change-Id: I12516dc898f449c2122e7442b8fbb88309a48ebe
2023-06-30 13:24:16 +03:00
Jeremy Wharton
c006126d54 web/satellite: add Back button to Bucket Details page
A button has been added to the Bucket Details page that returns the
user to the file browser.

Change-Id: Ic2868b1fc9e3b2b0e9785728dc7a116c828eced8
2023-06-30 08:53:36 +00:00
410 changed files with 15647 additions and 6254 deletions

View File

@ -12,13 +12,27 @@ FROM debian:buster-slim as ca-cert
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates
RUN update-ca-certificates RUN update-ca-certificates
# Install storj-up helper (for local/dev runs)
FROM --platform=$TARGETPLATFORM golang:1.19 AS storjup
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go install storj.io/storj-up@latest
# Install dlv (for local/dev runs)
FROM --platform=$TARGETPLATFORM golang:1.19 AS dlv
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go install github.com/go-delve/delve/cmd/dlv@latest
FROM ${DOCKER_ARCH:-amd64}/debian:buster-slim FROM ${DOCKER_ARCH:-amd64}/debian:buster-slim
ARG TAG ARG TAG
ARG GOARCH ARG GOARCH
ENV GOARCH ${GOARCH} ENV GOARCH ${GOARCH}
ENV CONF_PATH=/root/.local/share/storj/satellite \ ENV CONF_PATH=/root/.local/share/storj/satellite \
STORJ_CONSOLE_STATIC_DIR=/app \ STORJ_CONSOLE_STATIC_DIR=/app \
STORJ_MAIL_TEMPLATE_PATH=/app/static/emails \
STORJ_CONSOLE_ADDRESS=0.0.0.0:10100 STORJ_CONSOLE_ADDRESS=0.0.0.0:10100
ENV PATH=$PATH:/app
EXPOSE 7777 EXPOSE 7777
EXPOSE 10100 EXPOSE 10100
WORKDIR /app WORKDIR /app
@ -30,5 +44,9 @@ COPY release/${TAG}/wasm/wasm_exec.js /app/static/wasm/
COPY release/${TAG}/wasm/access.wasm.br /app/static/wasm/ COPY release/${TAG}/wasm/access.wasm.br /app/static/wasm/
COPY release/${TAG}/wasm/wasm_exec.js.br /app/static/wasm/ COPY release/${TAG}/wasm/wasm_exec.js.br /app/static/wasm/
COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite COPY release/${TAG}/satellite_linux_${GOARCH:-amd64} /app/satellite
COPY --from=storjup /go/bin/storj-up /usr/local/bin/storj-up
COPY --from=dlv /go/bin/dlv /usr/local/bin/dlv
# test identities for quick-start
COPY --from=img.dev.storj.io/storjup/base:20230607-1 /var/lib/storj/identities /var/lib/storj/identities
COPY cmd/satellite/entrypoint /entrypoint COPY cmd/satellite/entrypoint /entrypoint
ENTRYPOINT ["/entrypoint"] ENTRYPOINT ["/entrypoint"]

View File

@ -1,6 +1,7 @@
#!/bin/bash #!/bin/bash
set -euo pipefail set -euo pipefail
## production helpers
SETUP_PARAMS="" SETUP_PARAMS=""
if [ -n "${IDENTITY_ADDR:-}" ]; then if [ -n "${IDENTITY_ADDR:-}" ]; then
@ -21,6 +22,10 @@ if [ "${SATELLITE_API:-}" = "true" ]; then
exec ./satellite run api $RUN_PARAMS "$@" exec ./satellite run api $RUN_PARAMS "$@"
fi fi
if [ "${SATELLITE_UI:-}" = "true" ]; then
exec ./satellite run ui $RUN_PARAMS "$@"
fi
if [ "${SATELLITE_GC:-}" = "true" ]; then if [ "${SATELLITE_GC:-}" = "true" ]; then
exec ./satellite run garbage-collection $RUN_PARAMS "$@" exec ./satellite run garbage-collection $RUN_PARAMS "$@"
fi fi
@ -37,4 +42,63 @@ if [ "${SATELLITE_AUDITOR:-}" = "true" ]; then
exec ./satellite run auditor $RUN_PARAMS "$@" exec ./satellite run auditor $RUN_PARAMS "$@"
fi fi
exec ./satellite run $RUN_PARAMS "$@" ## storj-up helpers
if [ "${STORJUP_ROLE:-""}" ]; then
if [ "${STORJ_IDENTITY_DIR:-""}" ]; then
#Generate identity if missing
if [ ! -f "$STORJ_IDENTITY_DIR/identity.key" ]; then
if [ "$STORJ_USE_PREDEFINED_IDENTITY" ]; then
# use predictable, pre-generated identity
mkdir -p $(dirname $STORJ_IDENTITY_DIR)
cp -r /var/lib/storj/identities/$STORJ_USE_PREDEFINED_IDENTITY $STORJ_IDENTITY_DIR
else
identity --identity-dir $STORJ_IDENTITY_DIR --difficulty 8 create .
fi
fi
fi
if [ "${STORJ_WAIT_FOR_DB:-""}" ]; then
storj-up util wait-for-port cockroach:26257
storj-up util wait-for-port redis:6379
fi
if [ "${STORJUP_ROLE:-""}" == "satellite-api" ]; then
mkdir -p /var/lib/storj/.local
#only migrate first time
if [ ! -f "/var/lib/storj/.local/migrated" ]; then
satellite run migration --identity-dir $STORJ_IDENTITY_DIR
touch /var/lib/storj/.local/migrated
fi
fi
# default config generated without arguments is misleading
rm /root/.local/share/storj/satellite/config.yaml
mkdir -p /var/lib/storj/.local/share/storj/satellite || true
if [ "${GO_DLV:-""}" ]; then
echo "Starting with go dlv"
#absolute file path is required
CMD=$(which $1)
shift
/usr/local/bin/dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec --check-go-version=false -- $CMD "$@"
exit $?
fi
fi
# for backward compatibility reason, we use argument as command, only if it's an executable (and use it as satellite flags oterwise)
set +eo nounset
which "$1" > /dev/null
VALID_EXECUTABLE=$?
set -eo nounset
if [ $VALID_EXECUTABLE -eq 0 ]; then
# this is a full command (what storj-up uses)
exec "$@"
else
# legacy, run-only parameters
exec ./satellite run $RUN_PARAMS "$@"
fi

View File

@ -40,7 +40,7 @@ import (
"storj.io/storj/satellite/accounting/live" "storj.io/storj/satellite/accounting/live"
"storj.io/storj/satellite/compensation" "storj.io/storj/satellite/compensation"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/payments/stripe" "storj.io/storj/satellite/payments/stripe"
"storj.io/storj/satellite/satellitedb" "storj.io/storj/satellite/satellitedb"
) )
@ -100,6 +100,11 @@ var (
Short: "Run the satellite API", Short: "Run the satellite API",
RunE: cmdAPIRun, RunE: cmdAPIRun,
} }
runUICmd = &cobra.Command{
Use: "ui",
Short: "Run the satellite UI",
RunE: cmdUIRun,
}
runRepairerCmd = &cobra.Command{ runRepairerCmd = &cobra.Command{
Use: "repair", Use: "repair",
Short: "Run the repair service", Short: "Run the repair service",
@ -255,12 +260,19 @@ var (
Long: "Finalizes all draft stripe invoices known to satellite's stripe account.", Long: "Finalizes all draft stripe invoices known to satellite's stripe account.",
RunE: cmdFinalizeCustomerInvoices, RunE: cmdFinalizeCustomerInvoices,
} }
payCustomerInvoicesCmd = &cobra.Command{ payInvoicesWithTokenCmd = &cobra.Command{
Use: "pay-customer-invoices",
Short: "pay open finalized invoices for customer",
Long: "attempts payment on any open finalized invoices for a specific user.",
Args: cobra.ExactArgs(1),
RunE: cmdPayCustomerInvoices,
}
payAllInvoicesCmd = &cobra.Command{
Use: "pay-invoices", Use: "pay-invoices",
Short: "pay finalized invoices", Short: "pay finalized invoices",
Long: "attempts payment on all open finalized invoices according to subscriptions settings.", Long: "attempts payment on all open finalized invoices according to subscriptions settings.",
Args: cobra.ExactArgs(1), Args: cobra.ExactArgs(1),
RunE: cmdPayCustomerInvoices, RunE: cmdPayAllInvoices,
} }
stripeCustomerCmd = &cobra.Command{ stripeCustomerCmd = &cobra.Command{
Use: "ensure-stripe-customer", Use: "ensure-stripe-customer",
@ -366,6 +378,7 @@ func init() {
rootCmd.AddCommand(runCmd) rootCmd.AddCommand(runCmd)
runCmd.AddCommand(runMigrationCmd) runCmd.AddCommand(runMigrationCmd)
runCmd.AddCommand(runAPICmd) runCmd.AddCommand(runAPICmd)
runCmd.AddCommand(runUICmd)
runCmd.AddCommand(runAdminCmd) runCmd.AddCommand(runAdminCmd)
runCmd.AddCommand(runRepairerCmd) runCmd.AddCommand(runRepairerCmd)
runCmd.AddCommand(runAuditorCmd) runCmd.AddCommand(runAuditorCmd)
@ -398,12 +411,14 @@ func init() {
billingCmd.AddCommand(createCustomerInvoicesCmd) billingCmd.AddCommand(createCustomerInvoicesCmd)
billingCmd.AddCommand(generateCustomerInvoicesCmd) billingCmd.AddCommand(generateCustomerInvoicesCmd)
billingCmd.AddCommand(finalizeCustomerInvoicesCmd) billingCmd.AddCommand(finalizeCustomerInvoicesCmd)
billingCmd.AddCommand(payCustomerInvoicesCmd) billingCmd.AddCommand(payInvoicesWithTokenCmd)
billingCmd.AddCommand(payAllInvoicesCmd)
billingCmd.AddCommand(stripeCustomerCmd) billingCmd.AddCommand(stripeCustomerCmd)
consistencyCmd.AddCommand(consistencyGECleanupCmd) consistencyCmd.AddCommand(consistencyGECleanupCmd)
process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runMigrationCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runAPICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runUICmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runAdminCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runAdminCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runRepairerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runRepairerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(runAuditorCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(runAuditorCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
@ -432,7 +447,8 @@ func init() {
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(generateCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(generateCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(finalizeCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(payCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(payInvoicesWithTokenCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(payAllInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(stripeCustomerCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(consistencyGECleanupCmd, &consistencyGECleanupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(fixLastNetsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(fixLastNetsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
@ -862,6 +878,18 @@ func cmdFinalizeCustomerInvoices(cmd *cobra.Command, args []string) (err error)
func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) { func cmdPayCustomerInvoices(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd) ctx, _ := process.Ctx(cmd)
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
err := payments.InvoiceApplyCustomerTokenBalance(ctx, args[0])
if err != nil {
return errs.New("error applying native token payments to invoice for customer: %v", err)
}
return payments.PayCustomerInvoices(ctx, args[0])
})
}
func cmdPayAllInvoices(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd)
periodStart, err := parseYearMonth(args[0]) periodStart, err := parseYearMonth(args[0])
if err != nil { if err != nil {
return err return err
@ -932,7 +960,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
successes := new(int64) successes := new(int64)
failures := new(int64) failures := new(int64)
undelete := func(node *overlay.SelectedNode) { undelete := func(node *nodeselection.SelectedNode) {
log.Info("starting restore trash", zap.String("Node ID", node.ID.String())) log.Info("starting restore trash", zap.String("Node ID", node.ID.String()))
ctx, cancel := context.WithTimeout(ctx, 10*time.Second) ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
@ -966,9 +994,9 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
log.Info("successful restore trash", zap.String("Node ID", node.ID.String())) log.Info("successful restore trash", zap.String("Node ID", node.ID.String()))
} }
var nodes []*overlay.SelectedNode var nodes []*nodeselection.SelectedNode
if len(args) == 0 { if len(args) == 0 {
err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *overlay.SelectedNode) error { err = db.OverlayCache().IterateAllContactedNodes(ctx, func(ctx context.Context, node *nodeselection.SelectedNode) error {
nodes = append(nodes, node) nodes = append(nodes, node)
return nil return nil
}) })
@ -985,7 +1013,7 @@ func cmdRestoreTrash(cmd *cobra.Command, args []string) error {
if err != nil { if err != nil {
return err return err
} }
nodes = append(nodes, &overlay.SelectedNode{ nodes = append(nodes, &nodeselection.SelectedNode{
ID: dossier.Id, ID: dossier.Id,
Address: dossier.Address, Address: dossier.Address,
LastNet: dossier.LastNet, LastNet: dossier.LastNet,

View File

@ -94,7 +94,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
dialer := rpc.NewDefaultDialer(tlsOptions) dialer := rpc.NewDefaultDialer(tlsOptions)
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay) overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil { if err != nil {
return err return err
} }
@ -102,8 +102,9 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
orders, err := orders.NewService( orders, err := orders.NewService(
log.Named("orders"), log.Named("orders"),
signing.SignerFromFullIdentity(identity), signing.SignerFromFullIdentity(identity),
overlay, overlayService,
orders.NewNoopDB(), orders.NewNoopDB(),
config.Placement.CreateFilters,
config.Orders, config.Orders,
) )
if err != nil { if err != nil {
@ -122,9 +123,10 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
log.Named("segment-repair"), log.Named("segment-repair"),
metabaseDB, metabaseDB,
orders, orders,
overlay, overlayService,
nil, // TODO add noop version nil, // TODO add noop version
ecRepairer, ecRepairer,
config.Placement.CreateFilters,
config.Checker.RepairOverrides, config.Checker.RepairOverrides,
config.Repairer, config.Repairer,
) )
@ -132,7 +134,7 @@ func cmdRepairSegment(cmd *cobra.Command, args []string) (err error) {
// TODO reorganize to avoid using peer. // TODO reorganize to avoid using peer.
peer := &satellite.Repairer{} peer := &satellite.Repairer{}
peer.Overlay = overlay peer.Overlay = overlayService
peer.Orders.Service = orders peer.Orders.Service = orders
peer.EcRepairer = ecRepairer peer.EcRepairer = ecRepairer
peer.SegmentRepairer = segmentRepairer peer.SegmentRepairer = segmentRepairer

47
cmd/satellite/ui.go Normal file
View File

@ -0,0 +1,47 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/private/process"
"storj.io/storj/satellite"
)
func cmdUIRun(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd)
log := zap.L()
runCfg.Debug.Address = *process.DebugAddrFlag
identity, err := runCfg.Identity.Load()
if err != nil {
log.Error("Failed to load identity.", zap.Error(err))
return errs.New("Failed to load identity: %+v", err)
}
satAddr := runCfg.Config.Contact.ExternalAddress
if satAddr == "" {
return errs.New("cannot run satellite ui if contact.external-address is not set")
}
apiAddress := runCfg.Config.Console.ExternalAddress
if apiAddress == "" {
apiAddress = runCfg.Config.Console.Address
}
peer, err := satellite.NewUI(log, identity, &runCfg.Config, process.AtomicLevel(cmd), satAddr, apiAddress)
if err != nil {
return err
}
if err := process.InitMetricsWithHostname(ctx, log, nil); err != nil {
log.Warn("Failed to initialize telemetry batcher on satellite api", zap.Error(err))
}
runError := peer.Run(ctx)
closeError := peer.Close()
return errs.Combine(runError, closeError)
}

View File

@ -0,0 +1,248 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"context"
"encoding/csv"
"errors"
"os"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/uuid"
"storj.io/private/cfgstruct"
"storj.io/private/dbutil/pgutil"
"storj.io/private/process"
"storj.io/private/tagsql"
"storj.io/storj/satellite/metabase"
)
var mon = monkit.Package()
var (
rootCmd = &cobra.Command{
Use: "migrate-segment-copies",
Short: "migrate-segment-copies",
}
runCmd = &cobra.Command{
Use: "run",
Short: "run migrate-segment-copies",
RunE: run,
}
config Config
)
func init() {
rootCmd.AddCommand(runCmd)
cfgstruct.Bind(pflag.CommandLine, &config)
}
// Config defines configuration for migration.
type Config struct {
MetabaseDB string `help:"connection URL for metabaseDB"`
BatchSize int `help:"number of entries from segment_copies processed at once" default:"2000"`
SegmentCopiesBackup string `help:"cvs file where segment copies entries will be backup"`
}
// VerifyFlags verifies whether the values provided are valid.
func (config *Config) VerifyFlags() error {
var errlist errs.Group
if config.MetabaseDB == "" {
errlist.Add(errors.New("flag '--metabasedb' is not set"))
}
return errlist.Err()
}
func run(cmd *cobra.Command, args []string) error {
if err := config.VerifyFlags(); err != nil {
return err
}
ctx, _ := process.Ctx(cmd)
log := zap.L()
return Migrate(ctx, log, config)
}
func main() {
process.Exec(rootCmd)
}
// Migrate starts segment copies migration.
func Migrate(ctx context.Context, log *zap.Logger, config Config) (err error) {
defer mon.Task()(&ctx)(&err)
db, err := metabase.Open(ctx, log, config.MetabaseDB, metabase.Config{})
if err != nil {
return errs.New("unable to connect %q: %w", config.MetabaseDB, err)
}
defer func() {
err = errs.Combine(err, db.Close())
}()
return MigrateSegments(ctx, log, db, config)
}
// MigrateSegments updates segment copies with proper metadata (pieces and placment).
func MigrateSegments(ctx context.Context, log *zap.Logger, metabaseDB *metabase.DB, config Config) (err error) {
defer mon.Task()(&ctx)(&err)
var backupCSV *csv.Writer
if config.SegmentCopiesBackup != "" {
f, err := os.Create(config.SegmentCopiesBackup)
if err != nil {
return err
}
defer func() {
err = errs.Combine(err, f.Close())
}()
backupCSV = csv.NewWriter(f)
defer backupCSV.Flush()
if err := backupCSV.Write([]string{"stream_id", "ancestor_stream_id"}); err != nil {
return err
}
}
db := metabaseDB.UnderlyingTagSQL()
var streamIDCursor uuid.UUID
ancestorStreamIDs := []uuid.UUID{}
streamIDs := []uuid.UUID{}
processed := 0
// what we are doing here:
// * read batch of entries from segment_copies table
// * read ancestors (original) segments metadata from segments table
// * update segment copies with missing metadata, one by one
// * delete entries from segment_copies table
for {
log.Info("Processed entries", zap.Int("processed", processed))
ancestorStreamIDs = ancestorStreamIDs[:0]
streamIDs = streamIDs[:0]
idsMap := map[uuid.UUID][]uuid.UUID{}
err := withRows(db.QueryContext(ctx, `
SELECT stream_id, ancestor_stream_id FROM segment_copies WHERE stream_id > $1 ORDER BY stream_id LIMIT $2
`, streamIDCursor, config.BatchSize))(func(rows tagsql.Rows) error {
for rows.Next() {
var streamID, ancestorStreamID uuid.UUID
err := rows.Scan(&streamID, &ancestorStreamID)
if err != nil {
return err
}
streamIDCursor = streamID
ancestorStreamIDs = append(ancestorStreamIDs, ancestorStreamID)
streamIDs = append(streamIDs, streamID)
idsMap[ancestorStreamID] = append(idsMap[ancestorStreamID], streamID)
}
return nil
})
if err != nil {
return err
}
type Update struct {
StreamID uuid.UUID
AncestorStreamID uuid.UUID
Position int64
RemoteAliasPieces []byte
RootPieceID []byte
RepairedAt *time.Time
Placement int64
}
updates := []Update{}
err = withRows(db.QueryContext(ctx, `
SELECT stream_id, position, remote_alias_pieces, root_piece_id, repaired_at, placement FROM segments WHERE stream_id = ANY($1::BYTEA[])
`, pgutil.UUIDArray(ancestorStreamIDs)))(func(rows tagsql.Rows) error {
for rows.Next() {
var ancestorStreamID uuid.UUID
var position int64
var remoteAliasPieces, rootPieceID []byte
var repairedAt *time.Time
var placement int64
err := rows.Scan(&ancestorStreamID, &position, &remoteAliasPieces, &rootPieceID, &repairedAt, &placement)
if err != nil {
return err
}
streamIDs, ok := idsMap[ancestorStreamID]
if !ok {
return errs.New("unable to map ancestor stream id: %s", ancestorStreamID)
}
for _, streamID := range streamIDs {
updates = append(updates, Update{
StreamID: streamID,
AncestorStreamID: ancestorStreamID,
Position: position,
RemoteAliasPieces: remoteAliasPieces,
RootPieceID: rootPieceID,
RepairedAt: repairedAt,
Placement: placement,
})
}
}
return nil
})
if err != nil {
return err
}
for _, update := range updates {
_, err := db.ExecContext(ctx, `
UPDATE segments SET
remote_alias_pieces = $3,
root_piece_id = $4,
repaired_at = $5,
placement = $6
WHERE (stream_id, position) = ($1, $2)
`, update.StreamID, update.Position, update.RemoteAliasPieces, update.RootPieceID, update.RepairedAt, update.Placement)
if err != nil {
return err
}
if backupCSV != nil {
if err := backupCSV.Write([]string{update.StreamID.String(), update.AncestorStreamID.String()}); err != nil {
return err
}
}
}
if backupCSV != nil {
backupCSV.Flush()
}
processed += len(streamIDs)
if len(updates) == 0 {
return nil
}
}
}
func withRows(rows tagsql.Rows, err error) func(func(tagsql.Rows) error) error {
return func(callback func(tagsql.Rows) error) error {
if err != nil {
return err
}
err := callback(rows)
return errs.Combine(rows.Err(), rows.Close(), err)
}
}

View File

@ -0,0 +1,324 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package main_test
import (
"os"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/common/uuid"
cmd "storj.io/storj/cmd/tools/migrate-segment-copies"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/metabasetest"
)
func TestMigrateSingleCopy(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, metabaseDB *metabase.DB) {
obj := metabasetest.RandObjectStream()
expectedPieces := metabase.Pieces{
{Number: 1, StorageNode: testrand.NodeID()},
{Number: 3, StorageNode: testrand.NodeID()},
}
object, _ := metabasetest.CreateTestObject{
CreateSegment: func(object metabase.Object, index int) metabase.Segment {
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(index)},
RootPieceID: testrand.PieceID(),
Pieces: expectedPieces,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
Placement: storj.EEA,
},
}.Check(ctx, t, metabaseDB)
return metabase.Segment{}
},
}.Run(ctx, t, metabaseDB, obj, 50)
copyObject, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: object,
}.Run(ctx, t, metabaseDB, false)
segments, err := metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
for _, segment := range segments {
if segment.StreamID == copyObject.StreamID {
require.Len(t, segment.Pieces, 0)
require.Equal(t, storj.EveryCountry, segment.Placement)
}
}
require.NotZero(t, numberOfSegmentCopies(t, ctx, metabaseDB))
err = cmd.MigrateSegments(ctx, zaptest.NewLogger(t), metabaseDB, cmd.Config{
BatchSize: 3,
})
require.NoError(t, err)
segments, err = metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
for _, segment := range segments {
require.Equal(t, expectedPieces, segment.Pieces)
require.Equal(t, storj.EEA, segment.Placement)
}
})
}
func TestMigrateManyCopies(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, metabaseDB *metabase.DB) {
obj := metabasetest.RandObjectStream()
expectedPieces := metabase.Pieces{
{Number: 1, StorageNode: testrand.NodeID()},
{Number: 3, StorageNode: testrand.NodeID()},
}
object, _ := metabasetest.CreateTestObject{
CreateSegment: func(object metabase.Object, index int) metabase.Segment {
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(index)},
RootPieceID: testrand.PieceID(),
Pieces: expectedPieces,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
Placement: storj.EEA,
},
}.Check(ctx, t, metabaseDB)
return metabase.Segment{}
},
}.Run(ctx, t, metabaseDB, obj, 20)
for i := 0; i < 10; i++ {
copyObject, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: object,
}.Run(ctx, t, metabaseDB, false)
segments, err := metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
for _, segment := range segments {
if segment.StreamID == copyObject.StreamID {
require.Len(t, segment.Pieces, 0)
require.Equal(t, storj.EveryCountry, segment.Placement)
}
}
}
require.NotZero(t, numberOfSegmentCopies(t, ctx, metabaseDB))
err := cmd.MigrateSegments(ctx, zaptest.NewLogger(t), metabaseDB, cmd.Config{
BatchSize: 7,
})
require.NoError(t, err)
segments, err := metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
for _, segment := range segments {
require.Equal(t, expectedPieces, segment.Pieces)
require.Equal(t, storj.EEA, segment.Placement)
}
})
}
func TestMigrateDifferentSegment(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, metabaseDB *metabase.DB) {
type Segment struct {
StreamID uuid.UUID
Position int64
}
expectedResults := map[Segment]metabase.Pieces{}
createData := func(numberOfObjecsts int, pieces metabase.Pieces) {
for i := 0; i < numberOfObjecsts; i++ {
numberOfSegments := 3
obj := metabasetest.RandObjectStream()
object, _ := metabasetest.CreateTestObject{
CreateSegment: func(object metabase.Object, index int) metabase.Segment {
metabasetest.CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: uint32(index)},
RootPieceID: testrand.PieceID(),
Pieces: pieces,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedETag: []byte{5},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: metabasetest.DefaultRedundancy,
Placement: storj.EEA,
},
}.Check(ctx, t, metabaseDB)
return metabase.Segment{}
},
}.Run(ctx, t, metabaseDB, obj, 3)
for n := 0; n < numberOfSegments; n++ {
expectedResults[Segment{
StreamID: object.StreamID,
Position: int64(n),
}] = pieces
}
copyObject, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: object,
}.Run(ctx, t, metabaseDB, false)
for n := 0; n < numberOfSegments; n++ {
expectedResults[Segment{
StreamID: copyObject.StreamID,
Position: int64(n),
}] = pieces
segments, err := metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
for _, segment := range segments {
if segment.StreamID == copyObject.StreamID {
require.Len(t, segment.Pieces, 0)
require.Equal(t, storj.EveryCountry, segment.Placement)
}
}
}
}
}
expectedPieces := metabase.Pieces{
{Number: 1, StorageNode: testrand.NodeID()},
{Number: 3, StorageNode: testrand.NodeID()},
}
createData(5, expectedPieces)
expectedPieces = metabase.Pieces{
{Number: 2, StorageNode: testrand.NodeID()},
{Number: 4, StorageNode: testrand.NodeID()},
}
createData(5, expectedPieces)
require.NotZero(t, numberOfSegmentCopies(t, ctx, metabaseDB))
err := cmd.MigrateSegments(ctx, zaptest.NewLogger(t), metabaseDB, cmd.Config{
BatchSize: 7,
})
require.NoError(t, err)
segments, err := metabaseDB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Equal(t, len(expectedResults), len(segments))
for _, segment := range segments {
pieces := expectedResults[Segment{
StreamID: segment.StreamID,
Position: int64(segment.Position.Encode()),
}]
require.Equal(t, pieces, segment.Pieces)
require.Equal(t, storj.EEA, segment.Placement)
}
})
}
func numberOfSegmentCopies(t *testing.T, ctx *testcontext.Context, metabaseDB *metabase.DB) int {
var count int
err := metabaseDB.UnderlyingTagSQL().QueryRow(ctx, "SELECT count(1) FROM segment_copies").Scan(&count)
require.NoError(t, err)
return count
}
func TestMigrateEndToEnd(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
expectedData := testrand.Bytes(10 * memory.KiB)
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test", "object", expectedData)
require.NoError(t, err)
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
require.NoError(t, err)
defer ctx.Check(project.Close)
_, err = project.CopyObject(ctx, "test", "object", "test", "object-copy", nil)
require.NoError(t, err)
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "test", "object-copy")
require.NoError(t, err)
require.Equal(t, expectedData, data)
err = cmd.MigrateSegments(ctx, zaptest.NewLogger(t), planet.Satellites[0].Metabase.DB, cmd.Config{
BatchSize: 1,
})
require.NoError(t, err)
data, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "test", "object-copy")
require.NoError(t, err)
require.Equal(t, expectedData, data)
})
}
func TestMigrateBackupCSV(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
expectedData := testrand.Bytes(10 * memory.KiB)
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test", "object", expectedData)
require.NoError(t, err)
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
require.NoError(t, err)
defer ctx.Check(project.Close)
_, err = project.CopyObject(ctx, "test", "object", "test", "object-copy", nil)
require.NoError(t, err)
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "test", "object-copy")
require.NoError(t, err)
require.Equal(t, expectedData, data)
backupFile := ctx.File("backupcsv")
err = cmd.MigrateSegments(ctx, zaptest.NewLogger(t), planet.Satellites[0].Metabase.DB, cmd.Config{
BatchSize: 1,
SegmentCopiesBackup: backupFile,
})
require.NoError(t, err)
data, err = planet.Uplinks[0].Download(ctx, planet.Satellites[0], "test", "object-copy")
require.NoError(t, err)
require.Equal(t, expectedData, data)
fileByes, err := os.ReadFile(backupFile)
require.NoError(t, err)
require.NotEmpty(t, fileByes)
})
}

View File

@ -203,12 +203,12 @@ func verifySegments(cmd *cobra.Command, args []string) error {
dialer := rpc.NewDefaultDialer(tlsOptions) dialer := rpc.NewDefaultDialer(tlsOptions)
// setup dependencies for verification // setup dependencies for verification
overlay, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), "", "", satelliteCfg.Overlay) overlayService, err := overlay.NewService(log.Named("overlay"), db.OverlayCache(), db.NodeEvents(), overlay.NewPlacementRules().CreateFilters, "", "", satelliteCfg.Overlay)
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)
} }
ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlay, orders.NewNoopDB(), satelliteCfg.Orders) ordersService, err := orders.NewService(log.Named("orders"), signing.SignerFromFullIdentity(identity), overlayService, orders.NewNoopDB(), overlay.NewPlacementRules().CreateFilters, satelliteCfg.Orders)
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -243,7 +243,7 @@ func verifySegments(cmd *cobra.Command, args []string) error {
// setup verifier // setup verifier
verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig) verifier := NewVerifier(log.Named("verifier"), dialer, ordersService, verifyConfig)
service, err := NewService(log.Named("service"), metabaseDB, verifier, overlay, serviceConfig) service, err := NewService(log.Named("service"), metabaseDB, verifier, overlayService, serviceConfig)
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)
} }

View File

@ -15,6 +15,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
"storj.io/private/process" "storj.io/private/process"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb" "storj.io/storj/satellite/satellitedb"
) )
@ -78,7 +79,7 @@ type NodeCheckConfig struct {
// NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing. // NodeCheckOverlayDB contains dependencies from overlay that are needed for the processing.
type NodeCheckOverlayDB interface { type NodeCheckOverlayDB interface {
IterateAllContactedNodes(context.Context, func(context.Context, *overlay.SelectedNode) error) error IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error IterateAllNodeDossiers(context.Context, func(context.Context, *overlay.NodeDossier) error) error
} }

View File

@ -21,6 +21,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
"storj.io/storj/satellite/audit" "storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -46,7 +47,7 @@ type Verifier interface {
type Overlay interface { type Overlay interface {
// Get looks up the node by nodeID // Get looks up the node by nodeID
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
} }
// SegmentWriter allows writing segments to some output. // SegmentWriter allows writing segments to some output.

View File

@ -23,6 +23,7 @@ import (
segmentverify "storj.io/storj/cmd/tools/segment-verify" segmentverify "storj.io/storj/cmd/tools/segment-verify"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -344,10 +345,10 @@ func (db *metabaseMock) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.
}, nil }, nil
} }
func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*overlay.SelectedNode, error) { func (db *metabaseMock) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error) {
var xs []*overlay.SelectedNode var xs []*nodeselection.SelectedNode
for nodeID := range db.nodeIDToAlias { for nodeID := range db.nodeIDToAlias {
xs = append(xs, &overlay.SelectedNode{ xs = append(xs, &nodeselection.SelectedNode{
ID: nodeID, ID: nodeID,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: fmt.Sprintf("nodeid:%v", nodeID), Address: fmt.Sprintf("nodeid:%v", nodeID),

View File

@ -0,0 +1,186 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/gogo/protobuf/proto"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/identity"
"storj.io/common/nodetag"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/private/process"
)
var (
rootCmd = &cobra.Command{
Use: "tag-signer",
Short: "Sign key=value pairs with identity",
Long: "Node tags are arbitrary key value pairs signed by an authority. If the public key is configured on " +
"Satellite side, Satellite will check the signatures and save the tags, which can be used (for example)" +
" during node selection. Storagenodes can be configured to send encoded node tags to the Satellite. " +
"This utility helps creating/managing the values of this specific configuration value, which is encoded by default.",
}
signCmd = &cobra.Command{
Use: "sign <key=value> <key2=value> ...",
Short: "Create signed tagset",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := process.Ctx(cmd)
encoded, err := signTags(ctx, config, args)
if err != nil {
return err
}
fmt.Println(encoded)
return nil
},
}
inspectCmd = &cobra.Command{
Use: "inspect <encoded string>",
Short: "Print out the details from an encoded node set",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := process.Ctx(cmd)
return inspect(ctx, args[0])
},
}
config Config
)
// Config contains configuration required for signing.
type Config struct {
IdentityDir string `help:"location if the identity files" path:"true"`
NodeID string `help:"the ID of the node, which will used this tag "`
}
func init() {
rootCmd.AddCommand(signCmd)
rootCmd.AddCommand(inspectCmd)
process.Bind(signCmd, &config)
}
func signTags(ctx context.Context, cfg Config, tagPairs []string) (string, error) {
if cfg.IdentityDir == "" {
return "", errs.New("Please specify the identity, used as a signer with --identity-dir")
}
if cfg.NodeID == "" {
return "", errs.New("Please specify the --node-id")
}
identityConfig := identity.Config{
CertPath: filepath.Join(cfg.IdentityDir, "identity.cert"),
KeyPath: filepath.Join(cfg.IdentityDir, "identity.key"),
}
fullIdentity, err := identityConfig.Load()
if err != nil {
return "", err
}
signer := signing.SignerFromFullIdentity(fullIdentity)
nodeID, err := storj.NodeIDFromString(cfg.NodeID)
if err != nil {
return "", errs.New("Wrong NodeID format: %v", err)
}
tagSet := &pb.NodeTagSet{
NodeId: nodeID.Bytes(),
Timestamp: time.Now().Unix(),
}
for _, tag := range tagPairs {
tag = strings.TrimSpace(tag)
if len(tag) == 0 {
continue
}
parts := strings.SplitN(tag, "=", 2)
if len(parts) != 2 {
return "", errs.New("tags should be in KEY=VALUE format, but it was %s", tag)
}
tagSet.Tags = append(tagSet.Tags, &pb.Tag{
Name: parts[0],
Value: []byte(parts[1]),
})
}
signedMessage, err := nodetag.Sign(ctx, tagSet, signer)
if err != nil {
return "", err
}
all := &pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signedMessage,
},
}
raw, err := proto.Marshal(all)
if err != nil {
return "", errs.Wrap(err)
}
return base64.StdEncoding.EncodeToString(raw), nil
}
func inspect(ctx context.Context, s string) error {
raw, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return errs.New("Input is not in base64 format")
}
sets := &pb.SignedNodeTagSets{}
err = proto.Unmarshal(raw, sets)
if err != nil {
return errs.New("Input is not a protobuf encoded *pb.SignedNodeTagSets message")
}
for _, msg := range sets.Tags {
signerNodeID, err := storj.NodeIDFromBytes(msg.SignerNodeId)
if err != nil {
return err
}
fmt.Println("Signer: ", signerNodeID.String())
fmt.Println("Signature: ", hex.EncodeToString(msg.Signature))
tags := &pb.NodeTagSet{}
err = proto.Unmarshal(msg.SerializedTag, tags)
if err != nil {
return err
}
nodeID, err := storj.NodeIDFromBytes(tags.NodeId)
if err != nil {
return err
}
fmt.Println("Timestamp: ", time.Unix(tags.Timestamp, 0).Format(time.RFC3339))
fmt.Println("NodeID: ", nodeID.String())
fmt.Println("Tags:")
for _, tag := range tags.Tags {
fmt.Printf(" %s=%s\n", tag.Name, string(tag.Value))
}
fmt.Println()
}
return nil
}
func main() {
process.Exec(rootCmd)
}

View File

@ -0,0 +1,273 @@
# Node and operator certification
## Abstract
This is a proposal for a small feature and service that allows for nodes and
operators to have signed tags of certain kinds for use in project-specific or
Satellite-specific node selection.
## Background/context
We have a couple of ongoing needs:
* 1099 KYC
* Private storage node networks
* SOC2/HIPAA/etc node certification
* Voting and operator signaling
### 1099 KYC
The United States has a rule that if node operators earn more than $600/year,
we need to file a 1099 for each of them. Our current way of dealing with this
is manual and time consuming, and so it would be nice to automate it.
Ultimately, we should be able to automatically:
1) keep track of which nodes are run by operators under or over the $600
threshold.
2) keep track of if an automated KYC service has signed off that we have the
necessary information to file a 1099.
3) automatically suspend nodes that have earned more than $600 but have not
provided legally required information.
### Private storage node networks
We have seen growing interest from customers that want to bring their own
hard drives, or be extremely choosy about the nodes they are willing to work
with. The current way we are solving this is spinning up private Satellites
that are configured to only work with the nodes those customers provide, but
it would be better if we didn't have to start custom Satellites for this.
Instead, it would be nice to have a per-project configuration on an existing
Satellite that allowed that project to specify a specific subset of verified
or validated nodes, e.g., Project A should be able to say only nodes from
node providers B and C should be selected. Symmetrically, Nodes from providers
B and C may only want to accept data from certain projects, like Project A.
When nodes from providers B and C are added to the Satellite, they should be
able to provide a provider-specific signature, and requirements about
customer-specific requirements, if any.
### SOC2/HIPAA/etc node certification
This is actually just a slightly different shape of the private storage node
network problem, but instead of being provider-specific, it is property
specific.
Perhaps Project D has a compliance requirement. They can only store data
on nodes that meet specific requirements.
Node operators E and F are willing to conform and attest to these compliance
requirements, but don't know about project D. It would be nice if Node
operators E and F could navigate to a compliance portal and see a list of
potential compliance attestations available. For possible compliance
attestations, node operators could sign agreements for these, and then receive
a verified signature that shows their selected compliance options.
Then, Project D's node selection process would filter by nodes that had been
approved for the necessary compliance requirements.
### Voting and operator signaling
As Satellite operators ourselves, we are currently engaged in a discussion about
pricing changes with storage node operators. Future Satellite operators may find
themselves in similar situations. It would be nice if storage node operators
could indicate votes for values. This would potentially be more representative
of network sentiment than posts on a forum.
Note that this isn't a transparent voting scheme, where other voters can see
the votes made, so this may not be a great voting solution in general.
## Design and implementation
I believe there are two basic building blocks that solves all of the above
issues:
* Signed node tags (with potential values)
* A document signing service
### Signed node tags
The network representation:
```
message Tag {
// Note that there is a signal flat namespace of all names per
// signer node id. Signers should be careful to make sure that
// there are no name collisions. For self-signed content-hash
// based values, the name should have the prefix of the content
// hash.
string name = 1;
bytes value = 2; // optional, representation dependent on name.
}
message TagSet {
// must always be set. this is the node the signer is signing for.
bytes node_id = 1;
repeated Tag tags = 2;
// must always be set. this makes sure the signature is signing the
// timestamp inside.
int64 timestamp = 3;
}
message SignedTagSet {
// this is the seralized form of TagSet, serialized so that
// the signature process has something stable to work with.
bytes serialized_tag = 1;
// this is who signed (could be self signed, could be well known).
bytes signer_node_id = 3;
bytes signature = 4;
}
message SignedTagSets {
repeated SignedTagSet tags = 1;
}
```
Note that every tag is signing a name/value pair (value optional) against
a specific node id.
Note also that names are only unique within the namespace of a given signer.
The database representation on the Satellite. N.B.: nothing should be entered
into this database without validation:
```
model signed_tags (
field node_id blob
field name text
field value blob
field timestamp int64
field signer_node_id blob
)
```
The "signer_node_id" is worth more explanation. Every signer should have a
stable node id. Satellites and storage nodes already have one, but any other
service that validates node tags would also need one.
In particular, the document signing service (below) would have its own unique
node id for signing tags, whereas for voting-style tags or tags based on a
content-addressed identifier (e.g. a hash of a document), the nodes would
self-sign.
### Document signing service
We would start a small web service, where users can log in and sign and fill
out documents. This web service would then create a unique activation code
that storage node operators could run on their storage nodes for activation and
signing. They could run `storagenode activate <code>` and then the node would
reach out to the signing service and get a `SignedTag` related to that node
given the information the user provided. The node could then present these
to the satellite.
Ultimately, the document signing service will require a separate design doc,
but here are some considerations for it:
Activation codes must expire shortly. Even Netflix has two hours of validity
for their service code - for a significantly less critical use case. What would
be a usable validity time for our use case? 15 minutes? 1 hour? Should we make
it configurable?
We want to still keep usability in mind for a SNO who needs to activate 500
nodes.
It would be even better if the SNO could force invalidating the activation code
when they are done with it.
As activation codes expire, the SNO should be able to generate a new activation
code if they want to associate a new node to an already signed document.
It should be hard to brute-force activation codes. They shouldn't be simple
numbers (4-digit or 6-digit) but something as complex as UUID.
It's also possible that SNO uses some signature mechanism during signing service
authentication, and the same signature is used for activation. If the same
signature mechanism is used during activation then no token is necessary.
### Update node selection
Once the above two building blocks exist, many problems become much more easily
solvable.
We would want to extend node selection to be able to do queries,
given project-specific configuration, based on these signed_tag values.
Because node selection mostly happens in memory from cached node table data,
it should be easy to add some denormalized data for certain selected cases,
such as:
* Document hashes nodes have self signed.
* Approval states based on well known third party signer nodes (a KYC service).
Once these fields exist, then node selection can happen as before, filtering
for the appropriate value given project settings.
## How these building blocks work for the example use cases
### 1099 KYC
The document signing service would have a KYC (Know Your Customer) form. Once
filled out, the document signing service would make a `TagSet` that includes all
of the answers to the KYC questions, for the given node id, signed by the
document signing service's node id.
The node would hang on to this `SignedTagSet` and submit it along with others
in a `SignedTagSets` to Satellites occasionally (maybe once a month during
node CheckIn).
### Private storage node networks
Storage node provisioning would provide nodes with a signed `SignedTagSet`
from a provisioning service that had its own node id. Then a private Satellite
could be configured to require that all nodes present a `SignedTagSet` signed
by the configured provisioning service that has that node's id in it.
Notably - this functionality could also be solved by the older waitlist node
identity signing certificate process, but we are slowly removing what remains
of that feature over time.
This functionality could also be solved by setting the Satellite's minimum
allowable node id difficulty to the maximum possible difficulty, thus preventing
any automatic node registration, and manually inserting node ids into the
database. This is what we are currently doing for private network trials, but
if `SignedTagSet`s existed, that would be easier.
### SOC2/HIPAA/etc node certification
For any type of document that doesn't require any third party service
(such as government id validation, etc), the document and its fields can be
filled out and self signed by the node, along with a content hash of the
document in question.
The node would create a `TagSet`, where one field is the hash of the legal
document that was agreed upon, and the remaining fields (with names prefixed
by the document's content hash) would be form fields
that the node operator filled in and ascribed to the document. Then, the
`TagSet` would be signed by the node itself. The cryptographic nature of the
content hash inside the `TagSet` would validate what the node operator had
agreed to.
### Voting and operator signaling
Node operators could self sign additional `Tag`s inside of a miscellaneous
`TagSet`, including `Tag`s such as
```
"storage-node-vote-20230611-network-change": "yes"
```
Or similar.
## Open problems
* Revocation? - `TagSets` have a timestamp inside that must be filled out. In
The future, certain tags could have an expiry or updated values or similar.
## Other options
## Wrapup
## Related work

View File

@ -0,0 +1,25 @@
# Mini Cowbell Testplan
&nbsp;
## Background
We want to deploy the entire Storj stack on environments that have kubernetes running on 5 NUCs.
&nbsp;
## Pre-condition
Configuration for satellites that only have 5 node and the recommended RS scheme is [2,3,4,4] where:
- 2 is the number of required pieces to reconstitute the segment.
- 3 is the repair threshold, i.e. if a segment remains with only 3 healthy pieces, it will be repaired.
- 4 is the success threshold, i.e. the number of pieces required for a successful upload or repair.
- 4 is the number of total erasure-coded pieces that will be generated.
| Test Scenario | Test Case | Description | Comments |
|---------------|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Upload | Upload with all nodes online | Every file is uploaded to 4 nodes with 2x expansion factor. So one node has no files. | Happy path scenario |
| | Upload with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for uploads) | Uploads will continue uninterrupted if the client uses the new refactored upload path. This improved upload logic will request the satellite for a new node if the satellite selects the offline node for the upload, unaware it is already offline. If the client uses the old upload logic, uploads may fail if the satellite selects the offline node (20% chance). When the satellite detects the offline node, all uploads will be successful. |
| Download | Download with one node offline | If one of five nodes fails and goes offline, 80% of the stored data will lose one erasure-coded piece. The health status of these segments will be reduced from 4 pieces to 3 pieces and will mark these segments for repair. overlay.node.online-window: 4h0m0s -> for about 4 hours the node will still be selected for downloads) | |
| Repair | Repair with 2 nodes disqualified | Disqualify 2 nodes so the repair download are still possible but there is no node available for an upload, shouldn't consume download bandwidth and error out early. Only spend download bandwidth when there is at least one node available for an upload | If two nodes go offline, there are remaining pieces in the worst case, which cannot be repaired and is a de facto data loss if the offline nodes are damaged. |
| Audit | | Audits can't identify corrupted pieces with just the minimum number of pieces. Reputation should not increase. Audits should be able to identify corrupted pieces with minumum + 1 pieces. Reputation should decrease. | |
| Upgrades | Nodes restart for upgrades | No more than a single node goes offline for maintenance. Otherwise, normal operation of the network cannot be ensured. | Occasionally, nodes may need to restart due to software updates. This brings the node offline for some period of time |

5
go.mod
View File

@ -22,6 +22,7 @@ require (
github.com/jackc/pgx/v5 v5.3.1 github.com/jackc/pgx/v5 v5.3.1
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3 github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6
github.com/loov/hrtime v1.0.3 github.com/loov/hrtime v1.0.3
github.com/mattn/go-sqlite3 v1.14.12 github.com/mattn/go-sqlite3 v1.14.12
@ -60,10 +61,10 @@ require (
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
gopkg.in/segmentio/analytics-go.v3 v3.1.0 gopkg.in/segmentio/analytics-go.v3 v3.1.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d storj.io/common v0.0.0-20230719104100-cb5eec2edc30
storj.io/drpc v0.0.33 storj.io/drpc v0.0.33
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 storj.io/private v0.0.0-20230703113355-ccd4db5ae659
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
) )

10
go.sum
View File

@ -324,6 +324,8 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5 h1:SriMFVtftPsQmG+0xaABotz9HnoKoo1QM
github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ= github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFbEVL040m9FAF/hTrjQ=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts= github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d h1:MAGZUXA8MLSA5oJT1Gua3nLSyTYF2uvBgM4Sfs5+jts=
github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE= github.com/jtolio/eventkit v0.0.0-20230607152326-4668f79ff72d/go.mod h1:PXFUrknJu7TkBNyL8t7XWDPtDFFLFrNQQAdsXv9YfJE=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b h1:HKvXTXZTeUHXRibg2ilZlkGSQP6A3cs0zXrBd4xMi6M=
github.com/jtolio/mito v0.0.0-20230523171229-d78ef06bb77b/go.mod h1:Mrym6OnPMkBKvN8/uXSkyhFSh6ndKKYE+Q4kxCfQ4V0=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U= github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6 h1:iVMQyk78uOpX/UKjEbzyBdptXgEz6jwGwo7kM9IQ+3U=
github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk= github.com/jtolio/noiseconn v0.0.0-20230301220541-88105e6c8ac6/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -1013,8 +1015,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ= storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d h1:AXdJxmg4Jqdz1nmogSrImKOHAU+bn8JCy8lHYnTwP0Y= storj.io/common v0.0.0-20230719104100-cb5eec2edc30 h1:xso8DyZExwYO2SFV0C/vt7unT/Vg3jQV2mtESiVEpUY=
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d/go.mod h1:zu2L8WdpvfIBrCbBTgPsz4qhHSArYSiDgRcV1RLlIF8= storj.io/common v0.0.0-20230719104100-cb5eec2edc30/go.mod h1:zu2L8WdpvfIBrCbBTgPsz4qhHSArYSiDgRcV1RLlIF8=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg= storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI= storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4= storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4=
@ -1022,7 +1024,7 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs= storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0= storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0= storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w= storj.io/private v0.0.0-20230703113355-ccd4db5ae659 h1:J72VWwbpllfolJoCsjVMr3YnscUUOQAruzFTsivqIqY=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ= storj.io/private v0.0.0-20230703113355-ccd4db5ae659/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=

View File

@ -69,7 +69,9 @@ type DiskSpace struct {
Allocated int64 `json:"allocated"` Allocated int64 `json:"allocated"`
Used int64 `json:"usedPieces"` Used int64 `json:"usedPieces"`
Trash int64 `json:"usedTrash"` Trash int64 `json:"usedTrash"`
Free int64 `json:"free"` // Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
Free int64 `json:"free"`
// Available is the amount of free space on the allocated disk space, in bytes.
Available int64 `json:"available"` Available int64 `json:"available"`
Overused int64 `json:"overused"` Overused int64 `json:"overused"`
} }

View File

@ -27,7 +27,9 @@ message DiskSpaceResponse {
int64 allocated = 1; int64 allocated = 1;
int64 used_pieces = 2; int64 used_pieces = 2;
int64 used_trash = 3; int64 used_trash = 3;
// Free is the actual amount of free space on the whole disk, not just allocated disk space, in bytes.
int64 free = 4; int64 free = 4;
// Available is the amount of free space on the allocated disk space, in bytes.
int64 available = 5; int64 available = 5;
int64 overused = 6; int64 overused = 6;
} }

View File

@ -66,10 +66,10 @@ type Satellite struct {
Core *satellite.Core Core *satellite.Core
API *satellite.API API *satellite.API
UI *satellite.UI
Repairer *satellite.Repairer Repairer *satellite.Repairer
Auditor *satellite.Auditor Auditor *satellite.Auditor
Admin *satellite.Admin Admin *satellite.Admin
GC *satellite.GarbageCollection
GCBF *satellite.GarbageCollectionBF GCBF *satellite.GarbageCollectionBF
RangedLoop *satellite.RangedLoop RangedLoop *satellite.RangedLoop
@ -173,12 +173,17 @@ type Satellite struct {
Service *mailservice.Service Service *mailservice.Service
} }
Console struct { ConsoleBackend struct {
Listener net.Listener Listener net.Listener
Service *console.Service Service *console.Service
Endpoint *consoleweb.Server Endpoint *consoleweb.Server
} }
ConsoleFrontend struct {
Listener net.Listener
Endpoint *consoleweb.Server
}
NodeStats struct { NodeStats struct {
Endpoint *nodestats.Endpoint Endpoint *nodestats.Endpoint
} }
@ -285,7 +290,6 @@ func (system *Satellite) Close() error {
system.Repairer.Close(), system.Repairer.Close(),
system.Auditor.Close(), system.Auditor.Close(),
system.Admin.Close(), system.Admin.Close(),
system.GC.Close(),
system.GCBF.Close(), system.GCBF.Close(),
) )
} }
@ -300,6 +304,11 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(system.API.Run(ctx)) return errs2.IgnoreCanceled(system.API.Run(ctx))
}) })
if system.UI != nil {
group.Go(func() error {
return errs2.IgnoreCanceled(system.UI.Run(ctx))
})
}
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(system.Repairer.Run(ctx)) return errs2.IgnoreCanceled(system.Repairer.Run(ctx))
}) })
@ -309,9 +318,6 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(system.Admin.Run(ctx)) return errs2.IgnoreCanceled(system.Admin.Run(ctx))
}) })
group.Go(func() error {
return errs2.IgnoreCanceled(system.GC.Run(ctx))
})
group.Go(func() error { group.Go(func() error {
return errs2.IgnoreCanceled(system.GCBF.Run(ctx)) return errs2.IgnoreCanceled(system.GCBF.Run(ctx))
}) })
@ -524,6 +530,15 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
// only run if front-end endpoints on console back-end server are disabled.
var ui *satellite.UI
if !config.Console.FrontendEnable {
ui, err = planet.newUI(ctx, index, identity, config, api.ExternalAddress, api.Console.Listener.Addr().String())
if err != nil {
return nil, errs.Wrap(err)
}
}
adminPeer, err := planet.newAdmin(ctx, index, identity, db, metabaseDB, config, versionInfo) adminPeer, err := planet.newAdmin(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
@ -539,11 +554,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, errs.Wrap(err)
}
gcBFPeer, err := planet.newGarbageCollectionBF(ctx, index, db, metabaseDB, config, versionInfo) gcBFPeer, err := planet.newGarbageCollectionBF(ctx, index, db, metabaseDB, config, versionInfo)
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
@ -558,23 +568,23 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
peer.Mail.EmailReminders.TestSetLinkAddress("http://" + api.Console.Listener.Addr().String() + "/") peer.Mail.EmailReminders.TestSetLinkAddress("http://" + api.Console.Listener.Addr().String() + "/")
} }
return createNewSystem(prefix, log, config, peer, api, repairerPeer, auditorPeer, adminPeer, gcPeer, gcBFPeer, rangedLoopPeer), nil return createNewSystem(prefix, log, config, peer, api, ui, repairerPeer, auditorPeer, adminPeer, gcBFPeer, rangedLoopPeer), nil
} }
// createNewSystem makes a new Satellite System and exposes the same interface from // createNewSystem makes a new Satellite System and exposes the same interface from
// before we split out the API. In the short term this will help keep all the tests passing // before we split out the API. In the short term this will help keep all the tests passing
// without much modification needed. However long term, we probably want to rework this // without much modification needed. However long term, we probably want to rework this
// so it represents how the satellite will run when it is made up of many processes. // so it represents how the satellite will run when it is made up of many processes.
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcPeer *satellite.GarbageCollection, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite { func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, ui *satellite.UI, repairerPeer *satellite.Repairer, auditorPeer *satellite.Auditor, adminPeer *satellite.Admin, gcBFPeer *satellite.GarbageCollectionBF, rangedLoopPeer *satellite.RangedLoop) *Satellite {
system := &Satellite{ system := &Satellite{
Name: name, Name: name,
Config: config, Config: config,
Core: peer, Core: peer,
API: api, API: api,
UI: ui,
Repairer: repairerPeer, Repairer: repairerPeer,
Auditor: auditorPeer, Auditor: auditorPeer,
Admin: adminPeer, Admin: adminPeer,
GC: gcPeer,
GCBF: gcBFPeer, GCBF: gcBFPeer,
RangedLoop: rangedLoopPeer, RangedLoop: rangedLoopPeer,
} }
@ -622,7 +632,7 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
system.Audit.Reporter = auditorPeer.Audit.Reporter system.Audit.Reporter = auditorPeer.Audit.Reporter
system.Audit.ContainmentSyncChore = peer.Audit.ContainmentSyncChore system.Audit.ContainmentSyncChore = peer.Audit.ContainmentSyncChore
system.GarbageCollection.Sender = gcPeer.GarbageCollection.Sender system.GarbageCollection.Sender = peer.GarbageCollection.Sender
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
system.ZombieDeletion.Chore = peer.ZombieDeletion.Chore system.ZombieDeletion.Chore = peer.ZombieDeletion.Chore
@ -666,6 +676,15 @@ func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil) return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
} }
func (planet *Planet) newUI(ctx context.Context, index int, identity *identity.FullIdentity, config satellite.Config, satelliteAddr, consoleAPIAddr string) (_ *satellite.UI, err error) {
defer mon.Task()(&ctx)(&err)
prefix := "satellite-ui" + strconv.Itoa(index)
log := planet.log.Named(prefix)
return satellite.NewUI(log, identity, &config, nil, satelliteAddr, consoleAPIAddr)
}
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Admin, err error) { func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.Admin, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -713,20 +732,6 @@ func (cache rollupsWriteCacheCloser) Close() error {
return cache.RollupsWriteCache.CloseAndFlush(context.TODO()) return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
} }
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollection, err error) {
defer mon.Task()(&ctx)(&err)
prefix := "satellite-gc" + strconv.Itoa(index)
log := planet.log.Named(prefix)
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
return satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, versionInfo, &config, nil)
}
func (planet *Planet) newGarbageCollectionBF(ctx context.Context, index int, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollectionBF, err error) { func (planet *Planet) newGarbageCollectionBF(ctx context.Context, index int, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (_ *satellite.GarbageCollectionBF, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -746,7 +751,6 @@ func (planet *Planet) newRangedLoop(ctx context.Context, index int, db satellite
prefix := "satellite-ranged-loop" + strconv.Itoa(index) prefix := "satellite-ranged-loop" + strconv.Itoa(index)
log := planet.log.Named(prefix) log := planet.log.Named(prefix)
return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil) return satellite.NewRangedLoop(log, db, metabaseDB, &config, nil)
} }

View File

@ -21,6 +21,7 @@ import (
"storj.io/common/peertls/tlsopts" "storj.io/common/peertls/tlsopts"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/private/debug" "storj.io/private/debug"
"storj.io/storj/cmd/storagenode/internalcmd"
"storj.io/storj/private/revocation" "storj.io/storj/private/revocation"
"storj.io/storj/private/server" "storj.io/storj/private/server"
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
@ -215,6 +216,10 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
MinDownloadTimeout: 2 * time.Minute, MinDownloadTimeout: 2 * time.Minute,
}, },
} }
// enable the lazy filewalker
config.Pieces.EnableLazyFilewalker = true
if planet.config.Reconfigure.StorageNode != nil { if planet.config.Reconfigure.StorageNode != nil {
planet.config.Reconfigure.StorageNode(index, &config) planet.config.Reconfigure.StorageNode(index, &config)
} }
@ -275,6 +280,21 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
return nil, errs.New("error while trying to issue new api key: %v", err) return nil, errs.New("error while trying to issue new api key: %v", err)
} }
{
// set up the used space lazyfilewalker filewalker
cmd := internalcmd.NewUsedSpaceFilewalkerCmd()
cmd.Logger = log.Named("used-space-filewalker")
cmd.Ctx = ctx
peer.Storage2.LazyFileWalker.TestingSetUsedSpaceCmd(cmd)
}
{
// set up the GC lazyfilewalker filewalker
cmd := internalcmd.NewGCFilewalkerCmd()
cmd.Logger = log.Named("gc-filewalker")
cmd.Ctx = ctx
peer.Storage2.LazyFileWalker.TestingSetGCCmd(cmd)
}
return &StorageNode{ return &StorageNode{
Name: prefix, Name: prefix,
Config: config, Config: config,

View File

@ -105,9 +105,9 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
} }
// confirm that we marked the correct number of storage nodes as offline // confirm that we marked the correct number of storage nodes as offline
nodes, err := satellite.Overlay.Service.Reliable(ctx) online, _, err := satellite.Overlay.Service.Reliable(ctx)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, nodes, len(planet.StorageNodes)-toKill) require.Len(t, online, len(planet.StorageNodes)-toKill)
// we should be able to download data without any of the original nodes // we should be able to download data without any of the original nodes
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path") newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")

View File

@ -6,16 +6,16 @@ package version
import _ "unsafe" // needed for go:linkname import _ "unsafe" // needed for go:linkname
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp //go:linkname buildTimestamp storj.io/private/version.buildTimestamp
var buildTimestamp string var buildTimestamp string = "1690910649"
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash //go:linkname buildCommitHash storj.io/private/version.buildCommitHash
var buildCommitHash string var buildCommitHash string = "bf0f3b829f699bc5fc7029c4acf747e7857e13d8"
//go:linkname buildVersion storj.io/private/version.buildVersion //go:linkname buildVersion storj.io/private/version.buildVersion
var buildVersion string var buildVersion string = "v1.84.2"
//go:linkname buildRelease storj.io/private/version.buildRelease //go:linkname buildRelease storj.io/private/version.buildRelease
var buildRelease string var buildRelease string = "true"
// ensure that linter understands that the variables are being used. // ensure that linter understands that the variables are being used.
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) } func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }

View File

@ -4,24 +4,34 @@
package web package web
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/http/requestid"
) )
// ServeJSONError writes a JSON error to the response output stream. // ServeJSONError writes a JSON error to the response output stream.
func ServeJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error) { func ServeJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error) {
ServeCustomJSONError(log, w, status, err, err.Error()) ServeCustomJSONError(ctx, log, w, status, err, err.Error())
} }
// ServeCustomJSONError writes a JSON error with a custom message to the response output stream. // ServeCustomJSONError writes a JSON error with a custom message to the response output stream.
func ServeCustomJSONError(log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) { func ServeCustomJSONError(ctx context.Context, log *zap.Logger, w http.ResponseWriter, status int, err error, msg string) {
fields := []zap.Field{ fields := []zap.Field{
zap.Int("code", status), zap.Int("code", status),
zap.String("message", msg), zap.String("message", msg),
zap.Error(err), zap.Error(err),
} }
if requestID := requestid.FromContext(ctx); requestID != "" {
fields = append(fields, zap.String("requestID", requestID))
msg += fmt.Sprintf(" (request id: %s)", requestID)
}
switch status { switch status {
case http.StatusNoContent: case http.StatusNoContent:
return return

View File

@ -87,12 +87,12 @@ func (rl *RateLimiter) Limit(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
key, err := rl.keyFunc(r) key, err := rl.keyFunc(r)
if err != nil { if err != nil {
ServeCustomJSONError(rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg) ServeCustomJSONError(r.Context(), rl.log, w, http.StatusInternalServerError, err, internalServerErrMsg)
return return
} }
limit := rl.getUserLimit(key) limit := rl.getUserLimit(key)
if !limit.Allow() { if !limit.Allow() {
ServeJSONError(rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg)) ServeJSONError(r.Context(), rl.log, w, http.StatusTooManyRequests, errs.New(rateLimitErrMsg))
return return
} }
next.ServeHTTP(w, r) next.ServeHTTP(w, r)

View File

@ -219,6 +219,8 @@ type ProjectAccounting interface {
GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error) GetProjectSettledBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (_ int64, err error)
// GetProjectBandwidth returns project allocated bandwidth for the specified year, month and day. // GetProjectBandwidth returns project allocated bandwidth for the specified year, month and day.
GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (int64, error) GetProjectBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int, asOfSystemInterval time.Duration) (int64, error)
// GetProjectSettledBandwidth returns the used settled bandwidth for the specified year and month.
GetProjectSettledBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, asOfSystemInterval time.Duration) (int64, error)
// GetProjectDailyBandwidth returns bandwidth (allocated and settled) for the specified day. // GetProjectDailyBandwidth returns bandwidth (allocated and settled) for the specified day.
GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (int64, int64, int64, error) GetProjectDailyBandwidth(ctx context.Context, projectID uuid.UUID, year int, month time.Month, day int) (int64, int64, int64, error)
// DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time // DeleteProjectBandwidthBefore deletes project bandwidth rollups before the given time

View File

@ -218,6 +218,17 @@ func (usage *Service) GetProjectBandwidthTotals(ctx context.Context, projectID u
return total, ErrProjectUsage.Wrap(err) return total, ErrProjectUsage.Wrap(err)
} }
// GetProjectSettledBandwidth returns total amount of settled bandwidth used for past 30 days.
func (usage *Service) GetProjectSettledBandwidth(ctx context.Context, projectID uuid.UUID) (_ int64, err error) {
defer mon.Task()(&ctx, projectID)(&err)
// from the beginning of the current month
year, month, _ := usage.nowFn().Date()
total, err := usage.projectAccountingDB.GetProjectSettledBandwidth(ctx, projectID, year, month, usage.asOfSystemInterval)
return total, ErrProjectUsage.Wrap(err)
}
// GetProjectSegmentTotals returns total amount of allocated segments used for past 30 days. // GetProjectSegmentTotals returns total amount of allocated segments used for past 30 days.
func (usage *Service) GetProjectSegmentTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) { func (usage *Service) GetProjectSegmentTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) {
defer mon.Task()(&ctx, projectID)(&err) defer mon.Task()(&ctx, projectID)(&err)

View File

@ -182,7 +182,8 @@ func TestProjectSegmentLimit(t *testing.T) {
}, },
}, },
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
data := testrand.Bytes(160 * memory.KiB) // tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
planet.Satellites[0].Accounting.Tally.Loop.Pause()
// set limit manually to 10 segments // set limit manually to 10 segments
accountingDB := planet.Satellites[0].DB.ProjectAccounting() accountingDB := planet.Satellites[0].DB.ProjectAccounting()
@ -190,6 +191,7 @@ func TestProjectSegmentLimit(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// successful upload // successful upload
data := testrand.Bytes(160 * memory.KiB)
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/0", data) err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/0", data)
require.NoError(t, err) require.NoError(t, err)
@ -203,14 +205,17 @@ func TestProjectSegmentLimit(t *testing.T) {
func TestProjectSegmentLimitInline(t *testing.T) { func TestProjectSegmentLimitInline(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, UplinkCount: 1}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { SatelliteCount: 1, UplinkCount: 1,
data := testrand.Bytes(1 * memory.KiB) }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
planet.Satellites[0].Accounting.Tally.Loop.Pause()
// set limit manually to 10 segments // set limit manually to 10 segments
accountingDB := planet.Satellites[0].DB.ProjectAccounting() accountingDB := planet.Satellites[0].DB.ProjectAccounting()
err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 10) err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 10)
require.NoError(t, err) require.NoError(t, err)
data := testrand.Bytes(1 * memory.KiB)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// successful upload // successful upload
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data) err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data)
@ -260,14 +265,17 @@ func TestProjectBandwidthLimitWithoutCache(t *testing.T) {
func TestProjectSegmentLimitMultipartUpload(t *testing.T) { func TestProjectSegmentLimitMultipartUpload(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, UplinkCount: 1}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { SatelliteCount: 1, UplinkCount: 1,
data := testrand.Bytes(1 * memory.KiB) }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// tally self-corrects live accounting, however, it may cause things to be temporarily off by a few segments.
planet.Satellites[0].Accounting.Tally.Loop.Pause()
// set limit manually to 10 segments // set limit manually to 10 segments
accountingDB := planet.Satellites[0].DB.ProjectAccounting() accountingDB := planet.Satellites[0].DB.ProjectAccounting()
err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 4) err := accountingDB.UpdateProjectSegmentLimit(ctx, planet.Uplinks[0].Projects[0].ID, 4)
require.NoError(t, err) require.NoError(t, err)
data := testrand.Bytes(1 * memory.KiB)
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
// successful upload // successful upload
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data) err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data)

View File

@ -151,6 +151,7 @@ func NewServer(log *zap.Logger, listener net.Listener, db DB, buckets *buckets.S
limitUpdateAPI.HandleFunc("/users/{useremail}/limits", server.updateLimits).Methods("PUT") limitUpdateAPI.HandleFunc("/users/{useremail}/limits", server.updateLimits).Methods("PUT")
limitUpdateAPI.HandleFunc("/users/{useremail}/freeze", server.freezeUser).Methods("PUT") limitUpdateAPI.HandleFunc("/users/{useremail}/freeze", server.freezeUser).Methods("PUT")
limitUpdateAPI.HandleFunc("/users/{useremail}/freeze", server.unfreezeUser).Methods("DELETE") limitUpdateAPI.HandleFunc("/users/{useremail}/freeze", server.unfreezeUser).Methods("DELETE")
limitUpdateAPI.HandleFunc("/users/{useremail}/warning", server.unWarnUser).Methods("DELETE")
limitUpdateAPI.HandleFunc("/projects/{project}/limit", server.getProjectLimit).Methods("GET") limitUpdateAPI.HandleFunc("/projects/{project}/limit", server.getProjectLimit).Methods("GET")
limitUpdateAPI.HandleFunc("/projects/{project}/limit", server.putProjectLimit).Methods("PUT", "POST") limitUpdateAPI.HandleFunc("/projects/{project}/limit", server.putProjectLimit).Methods("PUT", "POST")

View File

@ -249,7 +249,7 @@ export class Admin {
desc: 'Get the API keys of a specific project', desc: 'Get the API keys of a specific project',
params: [['Project ID', new InputText('text', true)]], params: [['Project ID', new InputText('text', true)]],
func: async (projectId: string): Promise<Record<string, unknown>> => { func: async (projectId: string): Promise<Record<string, unknown>> => {
return this.fetch('GET', `projects/${projectId}/apiKeys`); return this.fetch('GET', `projects/${projectId}/apikeys`);
} }
}, },
{ {
@ -464,6 +464,14 @@ Blank fields will not be updated.`,
func: async (email: string): Promise<null> => { func: async (email: string): Promise<null> => {
return this.fetch('DELETE', `users/${email}/freeze`) as Promise<null>; return this.fetch('DELETE', `users/${email}/freeze`) as Promise<null>;
} }
},
{
name: 'unwarn user',
desc: "Remove a user's warning status",
params: [['email', new InputText('email', true)]],
func: async (email: string): Promise<null> => {
return this.fetch('DELETE', `users/${email}/warning`) as Promise<null>;
}
} }
], ],
rest_api_keys: [ rest_api_keys: [

View File

@ -630,6 +630,35 @@ func (server *Server) unfreezeUser(w http.ResponseWriter, r *http.Request) {
} }
} }
func (server *Server) unWarnUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
userEmail, ok := vars["useremail"]
if !ok {
sendJSONError(w, "user-email missing", "", http.StatusBadRequest)
return
}
u, err := server.db.Console().Users().GetByEmail(ctx, userEmail)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
sendJSONError(w, fmt.Sprintf("user with email %q does not exist", userEmail),
"", http.StatusNotFound)
return
}
sendJSONError(w, "failed to get user details",
err.Error(), http.StatusInternalServerError)
return
}
if err = server.freezeAccounts.UnWarnUser(ctx, u.ID); err != nil {
sendJSONError(w, "failed to unwarn user",
err.Error(), http.StatusInternalServerError)
return
}
}
func (server *Server) deleteUser(w http.ResponseWriter, r *http.Request) { func (server *Server) deleteUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()

View File

@ -428,6 +428,43 @@ func TestFreezeUnfreezeUser(t *testing.T) {
}) })
} }
func TestWarnUnwarnUser(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 0,
UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(_ *zap.Logger, _ int, config *satellite.Config) {
config.Admin.Address = "127.0.0.1:0"
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
address := planet.Satellites[0].Admin.Admin.Listener.Addr()
user, err := planet.Satellites[0].DB.Console().Users().Get(ctx, planet.Uplinks[0].Projects[0].Owner.ID)
require.NoError(t, err)
err = planet.Satellites[0].Admin.FreezeAccounts.Service.WarnUser(ctx, user.ID)
require.NoError(t, err)
freeze, warning, err := planet.Satellites[0].DB.Console().AccountFreezeEvents().GetAll(ctx, user.ID)
require.NoError(t, err)
require.Nil(t, freeze)
require.NotNil(t, warning)
link := fmt.Sprintf("http://"+address.String()+"/api/users/%s/warning", user.Email)
body := assertReq(ctx, t, link, http.MethodDelete, "", http.StatusOK, "", planet.Satellites[0].Config.Console.AuthToken)
require.Len(t, body, 0)
freeze, warning, err = planet.Satellites[0].DB.Console().AccountFreezeEvents().GetAll(ctx, user.ID)
require.NoError(t, err)
require.Nil(t, freeze)
require.Nil(t, warning)
body = assertReq(ctx, t, link, http.MethodDelete, "", http.StatusInternalServerError, "", planet.Satellites[0].Config.Console.AuthToken)
require.Contains(t, string(body), "user is not warned")
})
}
func TestUserDelete(t *testing.T) { func TestUserDelete(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, SatelliteCount: 1,

View File

@ -84,6 +84,7 @@ const (
eventAccountUnwarned = "Account Unwarned" eventAccountUnwarned = "Account Unwarned"
eventAccountFreezeWarning = "Account Freeze Warning" eventAccountFreezeWarning = "Account Freeze Warning"
eventUnpaidLargeInvoice = "Large Invoice Unpaid" eventUnpaidLargeInvoice = "Large Invoice Unpaid"
eventUnpaidStorjscanInvoice = "Storjscan Invoice Unpaid"
eventExpiredCreditNeedsRemoval = "Expired Credit Needs Removal" eventExpiredCreditNeedsRemoval = "Expired Credit Needs Removal"
eventExpiredCreditRemoved = "Expired Credit Removed" eventExpiredCreditRemoved = "Expired Credit Removed"
eventProjectInvitationAccepted = "Project Invitation Accepted" eventProjectInvitationAccepted = "Project Invitation Accepted"
@ -122,6 +123,9 @@ type FreezeTracker interface {
// TrackLargeUnpaidInvoice sends an event to Segment indicating that a user has not paid a large invoice. // TrackLargeUnpaidInvoice sends an event to Segment indicating that a user has not paid a large invoice.
TrackLargeUnpaidInvoice(invID string, userID uuid.UUID, email string) TrackLargeUnpaidInvoice(invID string, userID uuid.UUID, email string)
// TrackStorjscanUnpaidInvoice sends an event to Segment indicating that a user has not paid an invoice, but has storjscan transaction history.
TrackStorjscanUnpaidInvoice(invID string, userID uuid.UUID, email string)
} }
// Service for sending analytics. // Service for sending analytics.
@ -418,6 +422,23 @@ func (service *Service) TrackLargeUnpaidInvoice(invID string, userID uuid.UUID,
}) })
} }
// TrackStorjscanUnpaidInvoice sends an event to Segment indicating that a user has not paid an invoice, but has storjscan transaction history.
func (service *Service) TrackStorjscanUnpaidInvoice(invID string, userID uuid.UUID, email string) {
if !service.config.Enabled {
return
}
props := segment.NewProperties()
props.Set("email", email)
props.Set("invoice", invID)
service.enqueueMessage(segment.Track{
UserId: userID.String(),
Event: service.satelliteName + " " + eventUnpaidStorjscanInvoice,
Properties: props,
})
}
// TrackAccessGrantCreated sends an "Access Grant Created" event to Segment. // TrackAccessGrantCreated sends an "Access Grant Created" event to Segment.
func (service *Service) TrackAccessGrantCreated(userID uuid.UUID, email string) { func (service *Service) TrackAccessGrantCreated(userID uuid.UUID, email string) {
if !service.config.Enabled { if !service.config.Enabled {

View File

@ -16,6 +16,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"storj.io/common/identity" "storj.io/common/identity"
"storj.io/common/nodetag"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/peertls/extensions" "storj.io/common/peertls/extensions"
"storj.io/common/peertls/tlsopts" "storj.io/common/peertls/tlsopts"
@ -281,7 +282,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay { // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache() peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay) peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
@ -325,7 +326,12 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
Type: pb.NodeType_SATELLITE, Type: pb.NodeType_SATELLITE,
Version: *pbVersion, Version: *pbVersion,
} }
peer.Contact.Service = contact.NewService(peer.Log.Named("contact:service"), self, peer.Overlay.Service, peer.DB.PeerIdentities(), peer.Dialer, config.Contact)
var authority nodetag.Authority
peerIdentity := full.PeerIdentity()
authority = append(authority, signing.SigneeFromPeerIdentity(peerIdentity))
peer.Contact.Service = contact.NewService(peer.Log.Named("contact:service"), self, peer.Overlay.Service, peer.DB.PeerIdentities(), peer.Dialer, authority, config.Contact)
peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.Service) peer.Contact.Endpoint = contact.NewEndpoint(peer.Log.Named("contact:endpoint"), peer.Contact.Service)
if err := pb.DRPCRegisterNode(peer.Server.DRPC(), peer.Contact.Endpoint); err != nil { if err := pb.DRPCRegisterNode(peer.Server.DRPC(), peer.Contact.Endpoint); err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
@ -381,6 +387,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
signing.SignerFromFullIdentity(peer.Identity), signing.SignerFromFullIdentity(peer.Identity),
peer.Overlay.Service, peer.Overlay.Service,
peer.Orders.DB, peer.Orders.DB,
config.Placement.CreateFilters,
config.Orders, config.Orders,
) )
if err != nil { if err != nil {
@ -540,7 +547,9 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Payments.StorjscanService = storjscan.NewService(log.Named("storjscan-service"), peer.Payments.StorjscanService = storjscan.NewService(log.Named("storjscan-service"),
peer.DB.Wallets(), peer.DB.Wallets(),
peer.DB.StorjscanPayments(), peer.DB.StorjscanPayments(),
peer.Payments.StorjscanClient) peer.Payments.StorjscanClient,
pc.Storjscan.Confirmations,
pc.BonusRate)
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
@ -603,6 +612,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
accountFreezeService, accountFreezeService,
peer.Console.Listener, peer.Console.Listener,
config.Payments.StripeCoinPayments.StripePublicKey, config.Payments.StripeCoinPayments.StripePublicKey,
config.Payments.Storjscan.Confirmations,
peer.URL(), peer.URL(),
config.Payments.PackagePlans, config.Payments.PackagePlans,
) )

View File

@ -141,7 +141,7 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
{ // setup overlay { // setup overlay
var err error var err error
peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay) peer.Overlay, err = overlay.NewService(log.Named("overlay"), overlayCache, nodeEvents, config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
@ -183,6 +183,7 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
// PUT and GET actions which are not used by // PUT and GET actions which are not used by
// auditor so we can set noop implementation. // auditor so we can set noop implementation.
orders.NewNoopDB(), orders.NewNoopDB(),
config.Placement.CreateFilters,
config.Orders, config.Orders,
) )
if err != nil { if err != nil {

View File

@ -48,6 +48,7 @@ type FrontendConfig struct {
PricingPackagesEnabled bool `json:"pricingPackagesEnabled"` PricingPackagesEnabled bool `json:"pricingPackagesEnabled"`
NewUploadModalEnabled bool `json:"newUploadModalEnabled"` NewUploadModalEnabled bool `json:"newUploadModalEnabled"`
GalleryViewEnabled bool `json:"galleryViewEnabled"` GalleryViewEnabled bool `json:"galleryViewEnabled"`
NeededTransactionConfirmations int `json:"neededTransactionConfirmations"`
} }
// Satellites is a configuration value that contains a list of satellite names and addresses. // Satellites is a configuration value that contains a list of satellite names and addresses.

View File

@ -41,13 +41,13 @@ func (a *ABTesting) GetABValues(w http.ResponseWriter, r *http.Request) {
user, err := console.GetUser(ctx) user, err := console.GetUser(ctx)
if err != nil { if err != nil {
web.ServeJSONError(a.log, w, http.StatusUnauthorized, err) web.ServeJSONError(ctx, a.log, w, http.StatusUnauthorized, err)
return return
} }
values, err := a.service.GetABValues(ctx, *user) values, err := a.service.GetABValues(ctx, *user)
if err != nil { if err != nil {
web.ServeJSONError(a.log, w, http.StatusInternalServerError, err) web.ServeJSONError(ctx, a.log, w, http.StatusInternalServerError, err)
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
@ -66,13 +66,13 @@ func (a *ABTesting) SendHit(w http.ResponseWriter, r *http.Request) {
action := mux.Vars(r)["action"] action := mux.Vars(r)["action"]
if action == "" { if action == "" {
web.ServeJSONError(a.log, w, http.StatusBadRequest, errs.New("parameter 'action' can't be empty")) web.ServeJSONError(ctx, a.log, w, http.StatusBadRequest, errs.New("parameter 'action' can't be empty"))
return return
} }
user, err := console.GetUser(ctx) user, err := console.GetUser(ctx)
if err != nil { if err != nil {
web.ServeJSONError(a.log, w, http.StatusUnauthorized, err) web.ServeJSONError(ctx, a.log, w, http.StatusUnauthorized, err)
return return
} }

View File

@ -4,6 +4,7 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/json" "encoding/json"
"io" "io"
"net/http" "net/http"
@ -54,17 +55,17 @@ func (a *Analytics) EventTriggered(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusInternalServerError, err) a.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
var et eventTriggeredBody var et eventTriggeredBody
err = json.Unmarshal(body, &et) err = json.Unmarshal(body, &et)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusInternalServerError, err) a.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
user, err := console.GetUser(ctx) user, err := console.GetUser(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusUnauthorized, err) a.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
@ -86,17 +87,17 @@ func (a *Analytics) PageEventTriggered(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusInternalServerError, err) a.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
var pv pageVisitBody var pv pageVisitBody
err = json.Unmarshal(body, &pv) err = json.Unmarshal(body, &pv)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusInternalServerError, err) a.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
user, err := console.GetUser(ctx) user, err := console.GetUser(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, http.StatusUnauthorized, err) a.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
@ -106,6 +107,6 @@ func (a *Analytics) PageEventTriggered(w http.ResponseWriter, r *http.Request) {
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (a *Analytics) serveJSONError(w http.ResponseWriter, status int, err error) { func (a *Analytics) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(a.log, w, status, err) web.ServeJSONError(ctx, a.log, w, status, err)
} }

View File

@ -4,6 +4,7 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
@ -42,24 +43,24 @@ func (keys *APIKeys) GetAllAPIKeyNames(w http.ResponseWriter, r *http.Request) {
projectIDString := r.URL.Query().Get("projectID") projectIDString := r.URL.Query().Get("projectID")
if projectIDString == "" { if projectIDString == "" {
keys.serveJSONError(w, http.StatusBadRequest, errs.New("Project ID was not provided.")) keys.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("Project ID was not provided."))
return return
} }
projectID, err := uuid.FromString(projectIDString) projectID, err := uuid.FromString(projectIDString)
if err != nil { if err != nil {
keys.serveJSONError(w, http.StatusBadRequest, err) keys.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
apiKeyNames, err := keys.service.GetAllAPIKeyNamesByProjectID(ctx, projectID) apiKeyNames, err := keys.service.GetAllAPIKeyNamesByProjectID(ctx, projectID)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
keys.serveJSONError(w, http.StatusUnauthorized, err) keys.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
keys.serveJSONError(w, http.StatusInternalServerError, err) keys.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -81,7 +82,7 @@ func (keys *APIKeys) DeleteByNameAndProjectID(w http.ResponseWriter, r *http.Req
publicIDString := r.URL.Query().Get("publicID") publicIDString := r.URL.Query().Get("publicID")
if name == "" { if name == "" {
keys.serveJSONError(w, http.StatusBadRequest, err) keys.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -89,38 +90,38 @@ func (keys *APIKeys) DeleteByNameAndProjectID(w http.ResponseWriter, r *http.Req
if projectIDString != "" { if projectIDString != "" {
projectID, err = uuid.FromString(projectIDString) projectID, err = uuid.FromString(projectIDString)
if err != nil { if err != nil {
keys.serveJSONError(w, http.StatusBadRequest, err) keys.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
} else if publicIDString != "" { } else if publicIDString != "" {
projectID, err = uuid.FromString(publicIDString) projectID, err = uuid.FromString(publicIDString)
if err != nil { if err != nil {
keys.serveJSONError(w, http.StatusBadRequest, err) keys.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
} else { } else {
keys.serveJSONError(w, http.StatusBadRequest, errs.New("Project ID was not provided.")) keys.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("Project ID was not provided."))
return return
} }
err = keys.service.DeleteAPIKeyByNameAndProjectID(ctx, name, projectID) err = keys.service.DeleteAPIKeyByNameAndProjectID(ctx, name, projectID)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
keys.serveJSONError(w, http.StatusUnauthorized, err) keys.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
if console.ErrNoAPIKey.Has(err) { if console.ErrNoAPIKey.Has(err) {
keys.serveJSONError(w, http.StatusNoContent, err) keys.serveJSONError(ctx, w, http.StatusNoContent, err)
return return
} }
keys.serveJSONError(w, http.StatusInternalServerError, err) keys.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (keys *APIKeys) serveJSONError(w http.ResponseWriter, status int, err error) { func (keys *APIKeys) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(keys.log, w, status, err) web.ServeJSONError(ctx, keys.log, w, status, err)
} }

View File

@ -4,6 +4,7 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"net/http" "net/http"
@ -53,7 +54,7 @@ type Auth struct {
} }
// NewAuth is a constructor for api auth controller. // NewAuth is a constructor for api auth controller.
func NewAuth(log *zap.Logger, service *console.Service, accountFreezeService *console.AccountFreezeService, mailService *mailservice.Service, cookieAuth *consolewebauth.CookieAuth, analytics *analytics.Service, satelliteName string, externalAddress string, letUsKnowURL string, termsAndConditionsURL string, contactInfoURL string, generalRequestURL string) *Auth { func NewAuth(log *zap.Logger, service *console.Service, accountFreezeService *console.AccountFreezeService, mailService *mailservice.Service, cookieAuth *consolewebauth.CookieAuth, analytics *analytics.Service, satelliteName, externalAddress, letUsKnowURL, termsAndConditionsURL, contactInfoURL, generalRequestURL string) *Auth {
return &Auth{ return &Auth{
log: log, log: log,
ExternalAddress: externalAddress, ExternalAddress: externalAddress,
@ -82,24 +83,24 @@ func (a *Auth) Token(w http.ResponseWriter, r *http.Request) {
tokenRequest := console.AuthUser{} tokenRequest := console.AuthUser{}
err = json.NewDecoder(r.Body).Decode(&tokenRequest) err = json.NewDecoder(r.Body).Decode(&tokenRequest)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
tokenRequest.UserAgent = r.UserAgent() tokenRequest.UserAgent = r.UserAgent()
tokenRequest.IP, err = web.GetRequestIP(r) tokenRequest.IP, err = web.GetRequestIP(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
tokenInfo, err := a.service.Token(ctx, tokenRequest) tokenInfo, err := a.service.Token(ctx, tokenRequest)
if err != nil { if err != nil {
if console.ErrMFAMissing.Has(err) { if console.ErrMFAMissing.Has(err) {
web.ServeCustomJSONError(a.log, w, http.StatusOK, err, a.getUserErrorMessage(err)) web.ServeCustomJSONError(ctx, a.log, w, http.StatusOK, err, a.getUserErrorMessage(err))
} else { } else {
a.log.Info("Error authenticating token request", zap.String("email", tokenRequest.Email), zap.Error(ErrAuthAPI.Wrap(err))) a.log.Info("Error authenticating token request", zap.String("email", tokenRequest.Email), zap.Error(ErrAuthAPI.Wrap(err)))
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
} }
return return
} }
@ -126,7 +127,7 @@ func (a *Auth) TokenByAPIKey(w http.ResponseWriter, r *http.Request) {
authToken := r.Header.Get("Authorization") authToken := r.Header.Get("Authorization")
if !(strings.HasPrefix(authToken, "Bearer ")) { if !(strings.HasPrefix(authToken, "Bearer ")) {
a.log.Info("authorization key format is incorrect. Should be 'Bearer <key>'") a.log.Info("authorization key format is incorrect. Should be 'Bearer <key>'")
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -135,14 +136,14 @@ func (a *Auth) TokenByAPIKey(w http.ResponseWriter, r *http.Request) {
userAgent := r.UserAgent() userAgent := r.UserAgent()
ip, err := web.GetRequestIP(r) ip, err := web.GetRequestIP(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
tokenInfo, err := a.service.TokenByAPIKey(ctx, userAgent, ip, apiKey) tokenInfo, err := a.service.TokenByAPIKey(ctx, userAgent, ip, apiKey)
if err != nil { if err != nil {
a.log.Info("Error authenticating token request", zap.Error(ErrAuthAPI.Wrap(err))) a.log.Info("Error authenticating token request", zap.Error(ErrAuthAPI.Wrap(err)))
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -184,13 +185,13 @@ func (a *Auth) Logout(w http.ResponseWriter, r *http.Request) {
sessionID, err := a.getSessionID(r) sessionID, err := a.getSessionID(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.DeleteSession(ctx, sessionID) err = a.service.DeleteSession(ctx, sessionID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -225,7 +226,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&registerData) err = json.NewDecoder(r.Body).Decode(&registerData)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -234,23 +235,23 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
isValidEmail := utils.ValidateEmail(registerData.Email) isValidEmail := utils.ValidateEmail(registerData.Email)
if !isValidEmail { if !isValidEmail {
a.serveJSONError(w, console.ErrValidation.Wrap(errs.New("Invalid email."))) a.serveJSONError(ctx, w, console.ErrValidation.Wrap(errs.New("Invalid email.")))
return return
} }
if len([]rune(registerData.Partner)) > 100 { if len([]rune(registerData.Partner)) > 100 {
a.serveJSONError(w, console.ErrValidation.Wrap(errs.New("Partner must be less than or equal to 100 characters"))) a.serveJSONError(ctx, w, console.ErrValidation.Wrap(errs.New("Partner must be less than or equal to 100 characters")))
return return
} }
if len([]rune(registerData.SignupPromoCode)) > 100 { if len([]rune(registerData.SignupPromoCode)) > 100 {
a.serveJSONError(w, console.ErrValidation.Wrap(errs.New("Promo code must be less than or equal to 100 characters"))) a.serveJSONError(ctx, w, console.ErrValidation.Wrap(errs.New("Promo code must be less than or equal to 100 characters")))
return return
} }
verified, unverified, err := a.service.GetUserByEmailWithUnverified(ctx, registerData.Email) verified, unverified, err := a.service.GetUserByEmailWithUnverified(ctx, registerData.Email)
if err != nil && !console.ErrEmailNotFound.Has(err) { if err != nil && !console.ErrEmailNotFound.Has(err) {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -279,7 +280,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
} else { } else {
secret, err := console.RegistrationSecretFromBase64(registerData.SecretInput) secret, err := console.RegistrationSecretFromBase64(registerData.SecretInput)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -289,7 +290,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
ip, err := web.GetRequestIP(r) ip, err := web.GetRequestIP(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -312,7 +313,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
secret, secret,
) )
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -351,7 +352,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
token, err := a.service.GenerateActivationToken(ctx, user.ID, user.Email) token, err := a.service.GenerateActivationToken(ctx, user.ID, user.Email)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -390,13 +391,13 @@ func (a *Auth) GetFreezeStatus(w http.ResponseWriter, r *http.Request) {
userID, err := a.service.GetUserID(ctx) userID, err := a.service.GetUserID(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
freeze, warning, err := a.accountFreezeService.GetAll(ctx, userID) freeze, warning, err := a.accountFreezeService.GetAll(ctx, userID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -424,12 +425,12 @@ func (a *Auth) UpdateAccount(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&updatedInfo) err = json.NewDecoder(r.Body).Decode(&updatedInfo)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
if err = a.service.UpdateAccount(ctx, updatedInfo.FullName, updatedInfo.ShortName); err != nil { if err = a.service.UpdateAccount(ctx, updatedInfo.FullName, updatedInfo.ShortName); err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
} }
} }
@ -440,27 +441,29 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var user struct { var user struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
FullName string `json:"fullName"` FullName string `json:"fullName"`
ShortName string `json:"shortName"` ShortName string `json:"shortName"`
Email string `json:"email"` Email string `json:"email"`
Partner string `json:"partner"` Partner string `json:"partner"`
ProjectLimit int `json:"projectLimit"` ProjectLimit int `json:"projectLimit"`
ProjectStorageLimit int64 `json:"projectStorageLimit"` ProjectStorageLimit int64 `json:"projectStorageLimit"`
IsProfessional bool `json:"isProfessional"` ProjectBandwidthLimit int64 `json:"projectBandwidthLimit"`
Position string `json:"position"` ProjectSegmentLimit int64 `json:"projectSegmentLimit"`
CompanyName string `json:"companyName"` IsProfessional bool `json:"isProfessional"`
EmployeeCount string `json:"employeeCount"` Position string `json:"position"`
HaveSalesContact bool `json:"haveSalesContact"` CompanyName string `json:"companyName"`
PaidTier bool `json:"paidTier"` EmployeeCount string `json:"employeeCount"`
MFAEnabled bool `json:"isMFAEnabled"` HaveSalesContact bool `json:"haveSalesContact"`
MFARecoveryCodeCount int `json:"mfaRecoveryCodeCount"` PaidTier bool `json:"paidTier"`
CreatedAt time.Time `json:"createdAt"` MFAEnabled bool `json:"isMFAEnabled"`
MFARecoveryCodeCount int `json:"mfaRecoveryCodeCount"`
CreatedAt time.Time `json:"createdAt"`
} }
consoleUser, err := console.GetUser(ctx) consoleUser, err := console.GetUser(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -473,6 +476,8 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
} }
user.ProjectLimit = consoleUser.ProjectLimit user.ProjectLimit = consoleUser.ProjectLimit
user.ProjectStorageLimit = consoleUser.ProjectStorageLimit user.ProjectStorageLimit = consoleUser.ProjectStorageLimit
user.ProjectBandwidthLimit = consoleUser.ProjectBandwidthLimit
user.ProjectSegmentLimit = consoleUser.ProjectSegmentLimit
user.IsProfessional = consoleUser.IsProfessional user.IsProfessional = consoleUser.IsProfessional
user.CompanyName = consoleUser.CompanyName user.CompanyName = consoleUser.CompanyName
user.Position = consoleUser.Position user.Position = consoleUser.Position
@ -497,7 +502,7 @@ func (a *Auth) DeleteAccount(w http.ResponseWriter, r *http.Request) {
defer mon.Task()(&ctx)(&errNotImplemented) defer mon.Task()(&ctx)(&errNotImplemented)
// We do not want to allow account deletion via API currently. // We do not want to allow account deletion via API currently.
a.serveJSONError(w, errNotImplemented) a.serveJSONError(ctx, w, errNotImplemented)
} }
// ChangeEmail auth user, changes users email for a new one. // ChangeEmail auth user, changes users email for a new one.
@ -512,13 +517,13 @@ func (a *Auth) ChangeEmail(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&emailChange) err = json.NewDecoder(r.Body).Decode(&emailChange)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.ChangeEmail(ctx, emailChange.NewEmail) err = a.service.ChangeEmail(ctx, emailChange.NewEmail)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
} }
@ -536,13 +541,13 @@ func (a *Auth) ChangePassword(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&passwordChange) err = json.NewDecoder(r.Body).Decode(&passwordChange)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.ChangePassword(ctx, passwordChange.CurrentPassword, passwordChange.NewPassword) err = a.service.ChangePassword(ctx, passwordChange.CurrentPassword, passwordChange.NewPassword)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
} }
@ -560,23 +565,23 @@ func (a *Auth) ForgotPassword(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&forgotPassword) err = json.NewDecoder(r.Body).Decode(&forgotPassword)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
ip, err := web.GetRequestIP(r) ip, err := web.GetRequestIP(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
valid, err := a.service.VerifyForgotPasswordCaptcha(ctx, forgotPassword.CaptchaResponse, ip) valid, err := a.service.VerifyForgotPasswordCaptcha(ctx, forgotPassword.CaptchaResponse, ip)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
if !valid { if !valid {
a.serveJSONError(w, console.ErrCaptcha.New("captcha validation unsuccessful")) a.serveJSONError(ctx, w, console.ErrCaptcha.New("captcha validation unsuccessful"))
return return
} }
@ -608,7 +613,7 @@ func (a *Auth) ForgotPassword(w http.ResponseWriter, r *http.Request) {
recoveryToken, err := a.service.GeneratePasswordRecoveryToken(ctx, user.ID) recoveryToken, err := a.service.GeneratePasswordRecoveryToken(ctx, user.ID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -659,7 +664,7 @@ func (a *Auth) ResendEmail(w http.ResponseWriter, r *http.Request) {
if verified != nil { if verified != nil {
recoveryToken, err := a.service.GeneratePasswordRecoveryToken(ctx, verified.ID) recoveryToken, err := a.service.GeneratePasswordRecoveryToken(ctx, verified.ID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -688,7 +693,7 @@ func (a *Auth) ResendEmail(w http.ResponseWriter, r *http.Request) {
token, err := a.service.GenerateActivationToken(ctx, user.ID, user.Email) token, err := a.service.GenerateActivationToken(ctx, user.ID, user.Email)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -719,31 +724,31 @@ func (a *Auth) EnableUserMFA(w http.ResponseWriter, r *http.Request) {
} }
err = json.NewDecoder(r.Body).Decode(&data) err = json.NewDecoder(r.Body).Decode(&data)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.EnableUserMFA(ctx, data.Passcode, time.Now()) err = a.service.EnableUserMFA(ctx, data.Passcode, time.Now())
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
sessionID, err := a.getSessionID(r) sessionID, err := a.getSessionID(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
consoleUser, err := console.GetUser(ctx) consoleUser, err := console.GetUser(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.DeleteAllSessionsByUserIDExcept(ctx, consoleUser.ID, sessionID) err = a.service.DeleteAllSessionsByUserIDExcept(ctx, consoleUser.ID, sessionID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
} }
@ -760,31 +765,31 @@ func (a *Auth) DisableUserMFA(w http.ResponseWriter, r *http.Request) {
} }
err = json.NewDecoder(r.Body).Decode(&data) err = json.NewDecoder(r.Body).Decode(&data)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.DisableUserMFA(ctx, data.Passcode, time.Now(), data.RecoveryCode) err = a.service.DisableUserMFA(ctx, data.Passcode, time.Now(), data.RecoveryCode)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
sessionID, err := a.getSessionID(r) sessionID, err := a.getSessionID(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
consoleUser, err := console.GetUser(ctx) consoleUser, err := console.GetUser(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
err = a.service.DeleteAllSessionsByUserIDExcept(ctx, consoleUser.ID, sessionID) err = a.service.DeleteAllSessionsByUserIDExcept(ctx, consoleUser.ID, sessionID)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
} }
@ -797,7 +802,7 @@ func (a *Auth) GenerateMFASecretKey(w http.ResponseWriter, r *http.Request) {
key, err := a.service.ResetMFASecretKey(ctx) key, err := a.service.ResetMFASecretKey(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -817,7 +822,7 @@ func (a *Auth) GenerateMFARecoveryCodes(w http.ResponseWriter, r *http.Request)
codes, err := a.service.ResetMFARecoveryCodes(ctx) codes, err := a.service.ResetMFARecoveryCodes(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -844,7 +849,7 @@ func (a *Auth) ResetPassword(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&resetPassword) err = json.NewDecoder(r.Body).Decode(&resetPassword)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
} }
err = a.service.ResetPassword(ctx, resetPassword.RecoveryToken, resetPassword.NewPassword, resetPassword.MFAPasscode, resetPassword.MFARecoveryCode, time.Now()) err = a.service.ResetPassword(ctx, resetPassword.RecoveryToken, resetPassword.NewPassword, resetPassword.MFAPasscode, resetPassword.MFARecoveryCode, time.Now())
@ -882,7 +887,7 @@ func (a *Auth) ResetPassword(w http.ResponseWriter, r *http.Request) {
} }
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
} else { } else {
a.cookieAuth.RemoveTokenCookie(w) a.cookieAuth.RemoveTokenCookie(w)
} }
@ -896,19 +901,19 @@ func (a *Auth) RefreshSession(w http.ResponseWriter, r *http.Request) {
tokenInfo, err := a.cookieAuth.GetToken(r) tokenInfo, err := a.cookieAuth.GetToken(r)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
id, err := uuid.FromBytes(tokenInfo.Token.Payload) id, err := uuid.FromBytes(tokenInfo.Token.Payload)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
tokenInfo.ExpiresAt, err = a.service.RefreshSession(ctx, id) tokenInfo.ExpiresAt, err = a.service.RefreshSession(ctx, id)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -929,7 +934,7 @@ func (a *Auth) GetUserSettings(w http.ResponseWriter, r *http.Request) {
settings, err := a.service.GetUserSettings(ctx) settings, err := a.service.GetUserSettings(ctx)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -954,7 +959,7 @@ func (a *Auth) SetOnboardingStatus(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&updateInfo) err = json.NewDecoder(r.Body).Decode(&updateInfo)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -964,7 +969,7 @@ func (a *Auth) SetOnboardingStatus(w http.ResponseWriter, r *http.Request) {
OnboardingStep: updateInfo.OnboardingStep, OnboardingStep: updateInfo.OnboardingStep,
}) })
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
} }
@ -985,7 +990,7 @@ func (a *Auth) SetUserSettings(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&updateInfo) err = json.NewDecoder(r.Body).Decode(&updateInfo)
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -1006,7 +1011,7 @@ func (a *Auth) SetUserSettings(w http.ResponseWriter, r *http.Request) {
SessionDuration: newDuration, SessionDuration: newDuration,
}) })
if err != nil { if err != nil {
a.serveJSONError(w, err) a.serveJSONError(ctx, w, err)
return return
} }
@ -1018,9 +1023,9 @@ func (a *Auth) SetUserSettings(w http.ResponseWriter, r *http.Request) {
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (a *Auth) serveJSONError(w http.ResponseWriter, err error) { func (a *Auth) serveJSONError(ctx context.Context, w http.ResponseWriter, err error) {
status := a.getStatusCode(err) status := a.getStatusCode(err)
web.ServeCustomJSONError(a.log, w, status, err, a.getUserErrorMessage(err)) web.ServeCustomJSONError(ctx, a.log, w, status, err, a.getUserErrorMessage(err))
} }
// getStatusCode returns http.StatusCode depends on console error class. // getStatusCode returns http.StatusCode depends on console error class.

View File

@ -4,6 +4,7 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
@ -49,28 +50,28 @@ func (b *Buckets) AllBucketNames(w http.ResponseWriter, r *http.Request) {
if projectIDString != "" { if projectIDString != "" {
projectID, err = uuid.FromString(projectIDString) projectID, err = uuid.FromString(projectIDString)
if err != nil { if err != nil {
b.serveJSONError(w, http.StatusBadRequest, err) b.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
} else if publicIDString != "" { } else if publicIDString != "" {
projectID, err = uuid.FromString(publicIDString) projectID, err = uuid.FromString(publicIDString)
if err != nil { if err != nil {
b.serveJSONError(w, http.StatusBadRequest, err) b.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
} else { } else {
b.serveJSONError(w, http.StatusBadRequest, errs.New("Project ID was not provided.")) b.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("Project ID was not provided."))
return return
} }
bucketNames, err := b.service.GetAllBucketNames(ctx, projectID) bucketNames, err := b.service.GetAllBucketNames(ctx, projectID)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
b.serveJSONError(w, http.StatusUnauthorized, err) b.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
b.serveJSONError(w, http.StatusInternalServerError, err) b.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -81,6 +82,6 @@ func (b *Buckets) AllBucketNames(w http.ResponseWriter, r *http.Request) {
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (b *Buckets) serveJSONError(w http.ResponseWriter, status int, err error) { func (b *Buckets) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(b.log, w, status, err) web.ServeJSONError(ctx, b.log, w, status, err)
} }

View File

@ -58,11 +58,11 @@ func (p *Payments) SetupAccount(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -83,11 +83,11 @@ func (p *Payments) AccountBalance(w http.ResponseWriter, r *http.Request) {
balance, err := p.service.Payments().AccountBalance(ctx) balance, err := p.service.Payments().AccountBalance(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -112,12 +112,12 @@ func (p *Payments) ProjectsCharges(w http.ResponseWriter, r *http.Request) {
sinceStamp, err := strconv.ParseInt(r.URL.Query().Get("from"), 10, 64) sinceStamp, err := strconv.ParseInt(r.URL.Query().Get("from"), 10, 64)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
beforeStamp, err := strconv.ParseInt(r.URL.Query().Get("to"), 10, 64) beforeStamp, err := strconv.ParseInt(r.URL.Query().Get("to"), 10, 64)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -127,11 +127,11 @@ func (p *Payments) ProjectsCharges(w http.ResponseWriter, r *http.Request) {
charges, err := p.service.Payments().ProjectsCharges(ctx, since, before) charges, err := p.service.Payments().ProjectsCharges(ctx, since, before)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -155,8 +155,8 @@ func (p *Payments) ProjectsCharges(w http.ResponseWriter, r *http.Request) {
} }
} }
// triggerAttemptPaymentIfFrozenOrWarned checks if the account is frozen and if frozen, will trigger attempt to pay outstanding invoices. // triggerAttemptPayment attempts payment and unfreezes/unwarn user if needed.
func (p *Payments) triggerAttemptPaymentIfFrozenOrWarned(ctx context.Context) (err error) { func (p *Payments) triggerAttemptPayment(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
userID, err := p.service.GetUserID(ctx) userID, err := p.service.GetUserID(ctx)
@ -169,12 +169,11 @@ func (p *Payments) triggerAttemptPaymentIfFrozenOrWarned(ctx context.Context) (e
return err return err
} }
if freeze != nil || warning != nil { err = p.service.Payments().AttemptPayOverdueInvoices(ctx)
err = p.service.Payments().AttemptPayOverdueInvoices(ctx) if err != nil {
if err != nil { return err
return err
}
} }
if freeze != nil { if freeze != nil {
err = p.accountFreezeService.UnfreezeUser(ctx, userID) err = p.accountFreezeService.UnfreezeUser(ctx, userID)
if err != nil { if err != nil {
@ -197,7 +196,7 @@ func (p *Payments) AddCreditCard(w http.ResponseWriter, r *http.Request) {
bodyBytes, err := io.ReadAll(r.Body) bodyBytes, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -206,17 +205,17 @@ func (p *Payments) AddCreditCard(w http.ResponseWriter, r *http.Request) {
_, err = p.service.Payments().AddCreditCard(ctx, token) _, err = p.service.Payments().AddCreditCard(ctx, token)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
err = p.triggerAttemptPaymentIfFrozenOrWarned(ctx) err = p.triggerAttemptPayment(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
@ -232,11 +231,11 @@ func (p *Payments) ListCreditCards(w http.ResponseWriter, r *http.Request) {
cards, err := p.service.Payments().ListCreditCards(ctx) cards, err := p.service.Payments().ListCreditCards(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -259,24 +258,24 @@ func (p *Payments) MakeCreditCardDefault(w http.ResponseWriter, r *http.Request)
cardID, err := io.ReadAll(r.Body) cardID, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
err = p.service.Payments().MakeCreditCardDefault(ctx, string(cardID)) err = p.service.Payments().MakeCreditCardDefault(ctx, string(cardID))
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
err = p.triggerAttemptPaymentIfFrozenOrWarned(ctx) err = p.triggerAttemptPayment(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
@ -291,18 +290,18 @@ func (p *Payments) RemoveCreditCard(w http.ResponseWriter, r *http.Request) {
cardID := vars["cardId"] cardID := vars["cardId"]
if cardID == "" { if cardID == "" {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
err = p.service.Payments().RemoveCreditCard(ctx, cardID) err = p.service.Payments().RemoveCreditCard(ctx, cardID)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
@ -318,11 +317,11 @@ func (p *Payments) BillingHistory(w http.ResponseWriter, r *http.Request) {
billingHistory, err := p.service.Payments().BillingHistory(ctx) billingHistory, err := p.service.Payments().BillingHistory(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -345,7 +344,7 @@ func (p *Payments) ApplyCouponCode(w http.ResponseWriter, r *http.Request) {
bodyBytes, err := io.ReadAll(r.Body) bodyBytes, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
couponCode := string(bodyBytes) couponCode := string(bodyBytes)
@ -358,7 +357,7 @@ func (p *Payments) ApplyCouponCode(w http.ResponseWriter, r *http.Request) {
} else if payments.ErrCouponConflict.Has(err) { } else if payments.ErrCouponConflict.Has(err) {
status = http.StatusConflict status = http.StatusConflict
} }
p.serveJSONError(w, status, err) p.serveJSONError(ctx, w, status, err)
return return
} }
@ -378,11 +377,11 @@ func (p *Payments) GetCoupon(w http.ResponseWriter, r *http.Request) {
coupon, err := p.service.Payments().GetCoupon(ctx) coupon, err := p.service.Payments().GetCoupon(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -402,15 +401,15 @@ func (p *Payments) GetWallet(w http.ResponseWriter, r *http.Request) {
walletInfo, err := p.service.Payments().GetWallet(ctx) walletInfo, err := p.service.Payments().GetWallet(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
if errs.Is(err, billing.ErrNoWallet) { if errs.Is(err, billing.ErrNoWallet) {
p.serveJSONError(w, http.StatusNotFound, err) p.serveJSONError(ctx, w, http.StatusNotFound, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -430,11 +429,11 @@ func (p *Payments) ClaimWallet(w http.ResponseWriter, r *http.Request) {
walletInfo, err := p.service.Payments().ClaimWallet(ctx) walletInfo, err := p.service.Payments().ClaimWallet(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -454,11 +453,11 @@ func (p *Payments) WalletPayments(w http.ResponseWriter, r *http.Request) {
walletPayments, err := p.service.Payments().WalletPayments(ctx) walletPayments, err := p.service.Payments().WalletPayments(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -467,6 +466,30 @@ func (p *Payments) WalletPayments(w http.ResponseWriter, r *http.Request) {
} }
} }
// WalletPaymentsWithConfirmations returns with the list of storjscan transactions (including confirmations count) for user`s wallet.
func (p *Payments) WalletPaymentsWithConfirmations(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
w.Header().Set("Content-Type", "application/json")
walletPayments, err := p.service.Payments().WalletPaymentsWithConfirmations(ctx)
if err != nil {
if console.ErrUnauthorized.Has(err) {
p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return
}
p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return
}
if err = json.NewEncoder(w).Encode(walletPayments); err != nil {
p.log.Error("failed to encode wallet payments with confirmations", zap.Error(ErrPaymentsAPI.Wrap(err)))
}
}
// GetProjectUsagePriceModel returns the project usage price model for the user. // GetProjectUsagePriceModel returns the project usage price model for the user.
func (p *Payments) GetProjectUsagePriceModel(w http.ResponseWriter, r *http.Request) { func (p *Payments) GetProjectUsagePriceModel(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
@ -477,7 +500,7 @@ func (p *Payments) GetProjectUsagePriceModel(w http.ResponseWriter, r *http.Requ
user, err := console.GetUser(ctx) user, err := console.GetUser(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -496,7 +519,7 @@ func (p *Payments) PurchasePackage(w http.ResponseWriter, r *http.Request) {
bodyBytes, err := io.ReadAll(r.Body) bodyBytes, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -504,13 +527,13 @@ func (p *Payments) PurchasePackage(w http.ResponseWriter, r *http.Request) {
u, err := console.GetUser(ctx) u, err := console.GetUser(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
pkg, err := p.packagePlans.Get(u.UserAgent) pkg, err := p.packagePlans.Get(u.UserAgent)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusNotFound, err) p.serveJSONError(ctx, w, http.StatusNotFound, err)
return return
} }
@ -518,9 +541,9 @@ func (p *Payments) PurchasePackage(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
switch { switch {
case console.ErrUnauthorized.Has(err): case console.ErrUnauthorized.Has(err):
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
default: default:
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
return return
} }
@ -529,19 +552,19 @@ func (p *Payments) PurchasePackage(w http.ResponseWriter, r *http.Request) {
err = p.service.Payments().UpdatePackage(ctx, description, time.Now()) err = p.service.Payments().UpdatePackage(ctx, description, time.Now())
if err != nil { if err != nil {
if !console.ErrAlreadyHasPackage.Has(err) { if !console.ErrAlreadyHasPackage.Has(err) {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
err = p.service.Payments().Purchase(ctx, pkg.Price, description, card.ID) err = p.service.Payments().Purchase(ctx, pkg.Price, description, card.ID)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
if err = p.service.Payments().ApplyCredit(ctx, pkg.Credit, description); err != nil { if err = p.service.Payments().ApplyCredit(ctx, pkg.Credit, description); err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
@ -554,7 +577,7 @@ func (p *Payments) PackageAvailable(w http.ResponseWriter, r *http.Request) {
u, err := console.GetUser(ctx) u, err := console.GetUser(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
@ -567,6 +590,6 @@ func (p *Payments) PackageAvailable(w http.ResponseWriter, r *http.Request) {
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (p *Payments) serveJSONError(w http.ResponseWriter, status int, err error) { func (p *Payments) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(p.log, w, status, err) web.ServeJSONError(ctx, p.log, w, status, err)
} }

View File

@ -4,9 +4,11 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"net/http" "net/http"
"strings"
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -42,18 +44,18 @@ func (p *Projects) GetSalt(w http.ResponseWriter, r *http.Request) {
idParam, ok := mux.Vars(r)["id"] idParam, ok := mux.Vars(r)["id"]
if !ok { if !ok {
p.serveJSONError(w, http.StatusBadRequest, errs.New("missing id route param")) p.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing id route param"))
return return
} }
id, err := uuid.FromString(idParam) id, err := uuid.FromString(idParam)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
} }
salt, err := p.service.GetSalt(ctx, id) salt, err := p.service.GetSalt(ctx, id)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusUnauthorized, err) p.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
@ -61,7 +63,7 @@ func (p *Projects) GetSalt(w http.ResponseWriter, r *http.Request) {
err = json.NewEncoder(w).Encode(b64SaltString) err = json.NewEncoder(w).Encode(b64SaltString)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
} }
@ -73,12 +75,12 @@ func (p *Projects) InviteUsers(w http.ResponseWriter, r *http.Request) {
idParam, ok := mux.Vars(r)["id"] idParam, ok := mux.Vars(r)["id"]
if !ok { if !ok {
p.serveJSONError(w, http.StatusBadRequest, errs.New("missing project id route param")) p.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing project id route param"))
return return
} }
id, err := uuid.FromString(idParam) id, err := uuid.FromString(idParam)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
} }
var data struct { var data struct {
@ -87,13 +89,17 @@ func (p *Projects) InviteUsers(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&data) err = json.NewDecoder(r.Body).Decode(&data)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
for i, email := range data.Emails {
data.Emails[i] = strings.TrimSpace(email)
}
_, err = p.service.InviteProjectMembers(ctx, id, data.Emails) _, err = p.service.InviteProjectMembers(ctx, id, data.Emails)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
} }
@ -104,28 +110,28 @@ func (p *Projects) GetInviteLink(w http.ResponseWriter, r *http.Request) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
idParam, ok := mux.Vars(r)["id"] idParam, ok := mux.Vars(r)["id"]
if !ok { if !ok {
p.serveJSONError(w, http.StatusBadRequest, errs.New("missing project id route param")) p.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing project id route param"))
return return
} }
id, err := uuid.FromString(idParam) id, err := uuid.FromString(idParam)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
} }
email := r.URL.Query().Get("email") email := r.URL.Query().Get("email")
if email == "" { if email == "" {
p.serveJSONError(w, http.StatusBadRequest, errs.New("missing email query param")) p.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing email query param"))
return return
} }
link, err := p.service.GetInviteLink(ctx, id, email) link, err := p.service.GetInviteLink(ctx, id, email)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
err = json.NewEncoder(w).Encode(link) err = json.NewEncoder(w).Encode(link)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
} }
@ -139,7 +145,7 @@ func (p *Projects) GetUserInvitations(w http.ResponseWriter, r *http.Request) {
invites, err := p.service.GetUserProjectInvitations(ctx) invites, err := p.service.GetUserProjectInvitations(ctx)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -156,7 +162,7 @@ func (p *Projects) GetUserInvitations(w http.ResponseWriter, r *http.Request) {
for _, invite := range invites { for _, invite := range invites {
proj, err := p.service.GetProjectNoAuth(ctx, invite.ProjectID) proj, err := p.service.GetProjectNoAuth(ctx, invite.ProjectID)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -170,7 +176,7 @@ func (p *Projects) GetUserInvitations(w http.ResponseWriter, r *http.Request) {
if invite.InviterID != nil { if invite.InviterID != nil {
inviter, err := p.service.GetUser(ctx, *invite.InviterID) inviter, err := p.service.GetUser(ctx, *invite.InviterID)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
respInvite.InviterEmail = inviter.Email respInvite.InviterEmail = inviter.Email
@ -181,7 +187,7 @@ func (p *Projects) GetUserInvitations(w http.ResponseWriter, r *http.Request) {
err = json.NewEncoder(w).Encode(response) err = json.NewEncoder(w).Encode(response)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusInternalServerError, err) p.serveJSONError(ctx, w, http.StatusInternalServerError, err)
} }
} }
@ -195,13 +201,13 @@ func (p *Projects) RespondToInvitation(w http.ResponseWriter, r *http.Request) {
var idParam string var idParam string
if idParam, ok = mux.Vars(r)["id"]; !ok { if idParam, ok = mux.Vars(r)["id"]; !ok {
p.serveJSONError(w, http.StatusBadRequest, errs.New("missing project id route param")) p.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing project id route param"))
return return
} }
id, err := uuid.FromString(idParam) id, err := uuid.FromString(idParam)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
} }
var payload struct { var payload struct {
@ -210,7 +216,7 @@ func (p *Projects) RespondToInvitation(w http.ResponseWriter, r *http.Request) {
err = json.NewDecoder(r.Body).Decode(&payload) err = json.NewDecoder(r.Body).Decode(&payload)
if err != nil { if err != nil {
p.serveJSONError(w, http.StatusBadRequest, err) p.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -225,11 +231,11 @@ func (p *Projects) RespondToInvitation(w http.ResponseWriter, r *http.Request) {
case console.ErrValidation.Has(err): case console.ErrValidation.Has(err):
status = http.StatusBadRequest status = http.StatusBadRequest
} }
p.serveJSONError(w, status, err) p.serveJSONError(ctx, w, status, err)
} }
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (p *Projects) serveJSONError(w http.ResponseWriter, status int, err error) { func (p *Projects) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(p.log, w, status, err) web.ServeJSONError(ctx, p.log, w, status, err)
} }

View File

@ -4,6 +4,7 @@
package consoleapi package consoleapi
import ( import (
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
"strconv" "strconv"
@ -50,13 +51,13 @@ func (ul *UsageLimits) ProjectUsageLimits(w http.ResponseWriter, r *http.Request
var idParam string var idParam string
if idParam, ok = mux.Vars(r)["id"]; !ok { if idParam, ok = mux.Vars(r)["id"]; !ok {
ul.serveJSONError(w, http.StatusBadRequest, errs.New("missing project id route param")) ul.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing project id route param"))
return return
} }
projectID, err := uuid.FromString(idParam) projectID, err := uuid.FromString(idParam)
if err != nil { if err != nil {
ul.serveJSONError(w, http.StatusBadRequest, errs.New("invalid project id: %v", err)) ul.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("invalid project id: %v", err))
return return
} }
@ -64,13 +65,13 @@ func (ul *UsageLimits) ProjectUsageLimits(w http.ResponseWriter, r *http.Request
if err != nil { if err != nil {
switch { switch {
case console.ErrUnauthorized.Has(err): case console.ErrUnauthorized.Has(err):
ul.serveJSONError(w, http.StatusUnauthorized, err) ul.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
case accounting.ErrInvalidArgument.Has(err): case accounting.ErrInvalidArgument.Has(err):
ul.serveJSONError(w, http.StatusBadRequest, err) ul.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
default: default:
ul.serveJSONError(w, http.StatusInternalServerError, err) ul.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
} }
@ -90,11 +91,11 @@ func (ul *UsageLimits) TotalUsageLimits(w http.ResponseWriter, r *http.Request)
usageLimits, err := ul.service.GetTotalUsageLimits(ctx) usageLimits, err := ul.service.GetTotalUsageLimits(ctx)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
ul.serveJSONError(w, http.StatusUnauthorized, err) ul.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
ul.serveJSONError(w, http.StatusInternalServerError, err) ul.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -114,23 +115,23 @@ func (ul *UsageLimits) DailyUsage(w http.ResponseWriter, r *http.Request) {
var idParam string var idParam string
if idParam, ok = mux.Vars(r)["id"]; !ok { if idParam, ok = mux.Vars(r)["id"]; !ok {
ul.serveJSONError(w, http.StatusBadRequest, errs.New("missing project id route param")) ul.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("missing project id route param"))
return return
} }
projectID, err := uuid.FromString(idParam) projectID, err := uuid.FromString(idParam)
if err != nil { if err != nil {
ul.serveJSONError(w, http.StatusBadRequest, errs.New("invalid project id: %v", err)) ul.serveJSONError(ctx, w, http.StatusBadRequest, errs.New("invalid project id: %v", err))
return return
} }
sinceStamp, err := strconv.ParseInt(r.URL.Query().Get("from"), 10, 64) sinceStamp, err := strconv.ParseInt(r.URL.Query().Get("from"), 10, 64)
if err != nil { if err != nil {
ul.serveJSONError(w, http.StatusBadRequest, err) ul.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
beforeStamp, err := strconv.ParseInt(r.URL.Query().Get("to"), 10, 64) beforeStamp, err := strconv.ParseInt(r.URL.Query().Get("to"), 10, 64)
if err != nil { if err != nil {
ul.serveJSONError(w, http.StatusBadRequest, err) ul.serveJSONError(ctx, w, http.StatusBadRequest, err)
return return
} }
@ -140,11 +141,11 @@ func (ul *UsageLimits) DailyUsage(w http.ResponseWriter, r *http.Request) {
dailyUsage, err := ul.service.GetDailyProjectUsage(ctx, projectID, since, before) dailyUsage, err := ul.service.GetDailyProjectUsage(ctx, projectID, since, before)
if err != nil { if err != nil {
if console.ErrUnauthorized.Has(err) { if console.ErrUnauthorized.Has(err) {
ul.serveJSONError(w, http.StatusUnauthorized, err) ul.serveJSONError(ctx, w, http.StatusUnauthorized, err)
return return
} }
ul.serveJSONError(w, http.StatusInternalServerError, err) ul.serveJSONError(ctx, w, http.StatusInternalServerError, err)
return return
} }
@ -155,6 +156,6 @@ func (ul *UsageLimits) DailyUsage(w http.ResponseWriter, r *http.Request) {
} }
// serveJSONError writes JSON error to response output stream. // serveJSONError writes JSON error to response output stream.
func (ul *UsageLimits) serveJSONError(w http.ResponseWriter, status int, err error) { func (ul *UsageLimits) serveJSONError(ctx context.Context, w http.ResponseWriter, status int, err error) {
web.ServeJSONError(ul.log, w, status, err) web.ServeJSONError(ctx, ul.log, w, status, err)
} }

View File

@ -15,6 +15,7 @@ import (
"mime" "mime"
"net" "net"
"net/http" "net/http"
"net/http/httputil"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -32,6 +33,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"storj.io/common/errs2" "storj.io/common/errs2"
"storj.io/common/http/requestid"
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/private/web" "storj.io/storj/private/web"
@ -62,10 +64,14 @@ var (
// Config contains configuration for console web server. // Config contains configuration for console web server.
type Config struct { type Config struct {
Address string `help:"server address of the graphql api gateway and frontend app" devDefault:"127.0.0.1:0" releaseDefault:":10100"` Address string `help:"server address of the graphql api gateway and frontend app" devDefault:"127.0.0.1:0" releaseDefault:":10100"`
StaticDir string `help:"path to static resources" default:""` FrontendAddress string `help:"server address of the front-end app" devDefault:"127.0.0.1:0" releaseDefault:":10200"`
Watch bool `help:"whether to load templates on each request" default:"false" devDefault:"true"` ExternalAddress string `help:"external endpoint of the satellite if hosted" default:""`
ExternalAddress string `help:"external endpoint of the satellite if hosted" default:""` FrontendEnable bool `help:"feature flag to toggle whether console back-end server should also serve front-end endpoints" default:"true"`
BackendReverseProxy string `help:"the target URL of console back-end reverse proxy for local development when running a UI server" default:""`
StaticDir string `help:"path to static resources" default:""`
Watch bool `help:"whether to load templates on each request" default:"false" devDefault:"true"`
AuthToken string `help:"auth token needed for access to registration token creation endpoint" default:"" testDefault:"very-secret-token"` AuthToken string `help:"auth token needed for access to registration token creation endpoint" default:"" testDefault:"very-secret-token"`
AuthTokenSecret string `help:"secret used to sign auth tokens" releaseDefault:"" devDefault:"my-suppa-secret-key"` AuthTokenSecret string `help:"secret used to sign auth tokens" releaseDefault:"" devDefault:"my-suppa-secret-key"`
@ -138,7 +144,8 @@ type Server struct {
userIDRateLimiter *web.RateLimiter userIDRateLimiter *web.RateLimiter
nodeURL storj.NodeURL nodeURL storj.NodeURL
stripePublicKey string stripePublicKey string
neededTokenPaymentConfirmations int
packagePlans paymentsconfig.PackagePlans packagePlans paymentsconfig.PackagePlans
@ -204,23 +211,24 @@ func (a *apiAuth) RemoveAuthCookie(w http.ResponseWriter) {
} }
// NewServer creates new instance of console server. // NewServer creates new instance of console server.
func NewServer(logger *zap.Logger, config Config, service *console.Service, oidcService *oidc.Service, mailService *mailservice.Service, analytics *analytics.Service, abTesting *abtesting.Service, accountFreezeService *console.AccountFreezeService, listener net.Listener, stripePublicKey string, nodeURL storj.NodeURL, packagePlans paymentsconfig.PackagePlans) *Server { func NewServer(logger *zap.Logger, config Config, service *console.Service, oidcService *oidc.Service, mailService *mailservice.Service, analytics *analytics.Service, abTesting *abtesting.Service, accountFreezeService *console.AccountFreezeService, listener net.Listener, stripePublicKey string, neededTokenPaymentConfirmations int, nodeURL storj.NodeURL, packagePlans paymentsconfig.PackagePlans) *Server {
server := Server{ server := Server{
log: logger, log: logger,
config: config, config: config,
listener: listener, listener: listener,
service: service, service: service,
mailService: mailService, mailService: mailService,
analytics: analytics, analytics: analytics,
abTesting: abTesting, abTesting: abTesting,
stripePublicKey: stripePublicKey, stripePublicKey: stripePublicKey,
ipRateLimiter: web.NewIPRateLimiter(config.RateLimit, logger), neededTokenPaymentConfirmations: neededTokenPaymentConfirmations,
userIDRateLimiter: NewUserIDRateLimiter(config.RateLimit, logger), ipRateLimiter: web.NewIPRateLimiter(config.RateLimit, logger),
nodeURL: nodeURL, userIDRateLimiter: NewUserIDRateLimiter(config.RateLimit, logger),
packagePlans: packagePlans, nodeURL: nodeURL,
packagePlans: packagePlans,
} }
logger.Debug("Starting Satellite UI.", zap.Stringer("Address", server.listener.Addr())) logger.Debug("Starting Satellite Console server.", zap.Stringer("Address", server.listener.Addr()))
server.cookieAuth = consolewebauth.NewCookieAuth(consolewebauth.CookieSettings{ server.cookieAuth = consolewebauth.NewCookieAuth(consolewebauth.CookieSettings{
Name: "_tokenKey", Name: "_tokenKey",
@ -245,6 +253,8 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
// the earliest in the HTTP chain. // the earliest in the HTTP chain.
router.Use(newTraceRequestMiddleware(logger, router)) router.Use(newTraceRequestMiddleware(logger, router))
router.Use(requestid.AddToContext)
// limit body size // limit body size
router.Use(newBodyLimiterMiddleware(logger.Named("body-limiter-middleware"), config.BodySizeLimit)) router.Use(newBodyLimiterMiddleware(logger.Named("body-limiter-middleware"), config.BodySizeLimit))
@ -324,6 +334,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet, http.MethodOptions) paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost, http.MethodOptions) paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet, http.MethodOptions) paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet/payments-with-confirmations", paymentController.WalletPaymentsWithConfirmations).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet, http.MethodOptions) paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch, http.MethodOptions) paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch, http.MethodOptions)
paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet, http.MethodOptions) paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet, http.MethodOptions)
@ -353,30 +364,26 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost, http.MethodOptions) analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost, http.MethodOptions)
analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost, http.MethodOptions) analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost, http.MethodOptions)
if server.config.StaticDir != "" { oidc := oidc.NewEndpoint(
oidc := oidc.NewEndpoint( server.nodeURL, server.config.ExternalAddress,
server.nodeURL, server.config.ExternalAddress, logger, oidcService, service,
logger, oidcService, service, server.config.OauthCodeExpiry, server.config.OauthAccessTokenExpiry, server.config.OauthRefreshTokenExpiry,
server.config.OauthCodeExpiry, server.config.OauthAccessTokenExpiry, server.config.OauthRefreshTokenExpiry, )
)
router.HandleFunc("/.well-known/openid-configuration", oidc.WellKnownConfiguration) router.HandleFunc("/.well-known/openid-configuration", oidc.WellKnownConfiguration)
router.Handle("/oauth/v2/authorize", server.withAuth(http.HandlerFunc(oidc.AuthorizeUser))).Methods(http.MethodPost) router.Handle("/oauth/v2/authorize", server.withAuth(http.HandlerFunc(oidc.AuthorizeUser))).Methods(http.MethodPost)
router.Handle("/oauth/v2/tokens", server.ipRateLimiter.Limit(http.HandlerFunc(oidc.Tokens))).Methods(http.MethodPost) router.Handle("/oauth/v2/tokens", server.ipRateLimiter.Limit(http.HandlerFunc(oidc.Tokens))).Methods(http.MethodPost)
router.Handle("/oauth/v2/userinfo", server.ipRateLimiter.Limit(http.HandlerFunc(oidc.UserInfo))).Methods(http.MethodGet) router.Handle("/oauth/v2/userinfo", server.ipRateLimiter.Limit(http.HandlerFunc(oidc.UserInfo))).Methods(http.MethodGet)
router.Handle("/oauth/v2/clients/{id}", server.withAuth(http.HandlerFunc(oidc.GetClient))).Methods(http.MethodGet) router.Handle("/oauth/v2/clients/{id}", server.withAuth(http.HandlerFunc(oidc.GetClient))).Methods(http.MethodGet)
router.HandleFunc("/invited", server.handleInvited)
router.HandleFunc("/activation", server.accountActivationHandler)
router.HandleFunc("/cancel-password-recovery", server.cancelPasswordRecoveryHandler)
if server.config.StaticDir != "" && server.config.FrontendEnable {
fs := http.FileServer(http.Dir(server.config.StaticDir)) fs := http.FileServer(http.Dir(server.config.StaticDir))
router.PathPrefix("/static/").Handler(server.withCORS(server.brotliMiddleware(http.StripPrefix("/static", fs)))) router.PathPrefix("/static/").Handler(server.withCORS(server.brotliMiddleware(http.StripPrefix("/static", fs))))
router.HandleFunc("/invited", server.handleInvited)
// These paths previously required a trailing slash, so we support both forms for now
slashRouter := router.NewRoute().Subrouter()
slashRouter.StrictSlash(true)
slashRouter.HandleFunc("/activation", server.accountActivationHandler)
slashRouter.HandleFunc("/cancel-password-recovery", server.cancelPasswordRecoveryHandler)
if server.config.UseVuetifyProject { if server.config.UseVuetifyProject {
router.PathPrefix("/vuetifypoc").Handler(server.withCORS(http.HandlerFunc(server.vuetifyAppHandler))) router.PathPrefix("/vuetifypoc").Handler(server.withCORS(http.HandlerFunc(server.vuetifyAppHandler)))
} }
@ -427,6 +434,100 @@ func (server *Server) Run(ctx context.Context) (err error) {
return group.Wait() return group.Wait()
} }
// NewFrontendServer creates new instance of console front-end server.
// NB: The return type is currently consoleweb.Server, but it does not contain all the dependencies.
// It should only be used with RunFrontEnd and Close. We plan on moving this to its own type, but
// right now since we have a feature flag to allow the backend server to continue serving the frontend, it
// makes it easier if they are the same type.
func NewFrontendServer(logger *zap.Logger, config Config, listener net.Listener, nodeURL storj.NodeURL, stripePublicKey string) (server *Server, err error) {
server = &Server{
log: logger,
config: config,
listener: listener,
nodeURL: nodeURL,
stripePublicKey: stripePublicKey,
}
logger.Debug("Starting Satellite UI server.", zap.Stringer("Address", server.listener.Addr()))
router := mux.NewRouter()
// N.B. This middleware has to be the first one because it has to be called
// the earliest in the HTTP chain.
router.Use(newTraceRequestMiddleware(logger, router))
// in local development, proxy certain requests to the console back-end server
if config.BackendReverseProxy != "" {
target, err := url.Parse(config.BackendReverseProxy)
if err != nil {
return nil, Error.Wrap(err)
}
proxy := httputil.NewSingleHostReverseProxy(target)
logger.Debug("Reverse proxy targeting", zap.String("address", config.BackendReverseProxy))
router.PathPrefix("/api").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
}))
router.PathPrefix("/oauth").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
}))
router.HandleFunc("/.well-known/openid-configuration", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
router.HandleFunc("/invited", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
router.HandleFunc("/activation", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
router.HandleFunc("/cancel-password-recovery", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
router.HandleFunc("/registrationToken/", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
proxy.ServeHTTP(w, r)
})
}
fs := http.FileServer(http.Dir(server.config.StaticDir))
router.HandleFunc("/robots.txt", server.seoHandler)
router.PathPrefix("/static/").Handler(server.brotliMiddleware(http.StripPrefix("/static", fs)))
router.HandleFunc("/config", server.frontendConfigHandler)
if server.config.UseVuetifyProject {
router.PathPrefix("/vuetifypoc").Handler(http.HandlerFunc(server.vuetifyAppHandler))
}
router.PathPrefix("/").Handler(http.HandlerFunc(server.appHandler))
server.server = http.Server{
Handler: server.withRequest(router),
MaxHeaderBytes: ContentLengthLimit.Int(),
}
return server, nil
}
// RunFrontend starts the server that runs the webapp.
func (server *Server) RunFrontend(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
ctx, cancel := context.WithCancel(ctx)
var group errgroup.Group
group.Go(func() error {
<-ctx.Done()
return server.server.Shutdown(context.Background())
})
group.Go(func() error {
defer cancel()
err := server.server.Serve(server.listener)
if errs2.IsCanceled(err) || errors.Is(err, http.ErrServerClosed) {
err = nil
}
return err
})
return group.Wait()
}
// Close closes server and underlying listener. // Close closes server and underlying listener.
func (server *Server) Close() error { func (server *Server) Close() error {
return server.server.Close() return server.server.Close()
@ -550,7 +651,7 @@ func (server *Server) withAuth(handler http.Handler) http.Handler {
defer func() { defer func() {
if err != nil { if err != nil {
web.ServeJSONError(server.log, w, http.StatusUnauthorized, console.ErrUnauthorized.Wrap(err)) web.ServeJSONError(ctx, server.log, w, http.StatusUnauthorized, console.ErrUnauthorized.Wrap(err))
server.cookieAuth.RemoveTokenCookie(w) server.cookieAuth.RemoveTokenCookie(w)
} }
}() }()
@ -620,6 +721,7 @@ func (server *Server) frontendConfigHandler(w http.ResponseWriter, r *http.Reque
PricingPackagesEnabled: server.config.PricingPackagesEnabled, PricingPackagesEnabled: server.config.PricingPackagesEnabled,
NewUploadModalEnabled: server.config.NewUploadModalEnabled, NewUploadModalEnabled: server.config.NewUploadModalEnabled,
GalleryViewEnabled: server.config.GalleryViewEnabled, GalleryViewEnabled: server.config.GalleryViewEnabled,
NeededTransactionConfirmations: server.neededTokenPaymentConfirmations,
} }
err := json.NewEncoder(w).Encode(&cfg) err := json.NewEncoder(w).Encode(&cfg)
@ -783,7 +885,7 @@ func (server *Server) handleInvited(w http.ResponseWriter, r *http.Request) {
return return
} }
if user != nil { if user != nil {
http.Redirect(w, r, loginLink+"?email="+user.Email, http.StatusTemporaryRedirect) http.Redirect(w, r, loginLink+"?email="+url.QueryEscape(user.Email), http.StatusTemporaryRedirect)
return return
} }
@ -829,6 +931,10 @@ func (server *Server) graphqlHandler(w http.ResponseWriter, r *http.Request) {
jsonError.Error = err.Error() jsonError.Error = err.Error()
if requestID := requestid.FromContext(ctx); requestID != "" {
jsonError.Error += fmt.Sprintf(" (request id: %s)", requestID)
}
if err := json.NewEncoder(w).Encode(jsonError); err != nil { if err := json.NewEncoder(w).Encode(jsonError); err != nil {
server.log.Error("error graphql error", zap.Error(err)) server.log.Error("error graphql error", zap.Error(err))
} }
@ -893,6 +999,10 @@ func (server *Server) graphqlHandler(w http.ResponseWriter, r *http.Request) {
jsonError.Errors = append(jsonError.Errors, err.Message) jsonError.Errors = append(jsonError.Errors, err.Message)
} }
if requestID := requestid.FromContext(ctx); requestID != "" {
jsonError.Errors = append(jsonError.Errors, fmt.Sprintf("request id: %s", requestID))
}
if err := json.NewEncoder(w).Encode(jsonError); err != nil { if err := json.NewEncoder(w).Encode(jsonError); err != nil {
server.log.Error("error graphql error", zap.Error(err)) server.log.Error("error graphql error", zap.Error(err))
} }
@ -1124,7 +1234,7 @@ func newBodyLimiterMiddleware(log *zap.Logger, limit memory.Size) mux.Middleware
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ContentLength > limit.Int64() { if r.ContentLength > limit.Int64() {
web.ServeJSONError(log, w, http.StatusRequestEntityTooLarge, errs.New("Request body is too large")) web.ServeJSONError(r.Context(), log, w, http.StatusRequestEntityTooLarge, errs.New("Request body is too large"))
return return
} }

View File

@ -5,6 +5,7 @@ package consoleweb_test
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"net/http" "net/http"
"testing" "testing"
@ -140,14 +141,14 @@ func TestInvitedRouting(t *testing.T) {
params := "email=invited%40mail.test&inviter=Project+Owner&inviter_email=owner%40mail.test&project=Test+Project" params := "email=invited%40mail.test&inviter=Project+Owner&inviter_email=owner%40mail.test&project=Test+Project"
checkInvitedRedirect("Invited - Nonexistent user", baseURL+"signup?"+params, token) checkInvitedRedirect("Invited - Nonexistent user", baseURL+"signup?"+params, token)
invitedUser, err := sat.AddUser(ctx, console.CreateUser{ _, err = sat.AddUser(ctx, console.CreateUser{
FullName: "Invited User", FullName: "Invited User",
Email: invitedEmail, Email: invitedEmail,
}, 1) }, 1)
require.NoError(t, err) require.NoError(t, err)
// valid invite should redirect to login page with email. // valid invite should redirect to login page with email.
checkInvitedRedirect("Invited - User invited", loginURL+"?email="+invitedUser.Email, token) checkInvitedRedirect("Invited - User invited", loginURL+"?email=invited%40mail.test", token)
}) })
} }
@ -219,3 +220,56 @@ func TestUserIDRateLimiter(t *testing.T) {
require.Equal(t, http.StatusTooManyRequests, applyCouponStatus(firstToken)) require.Equal(t, http.StatusTooManyRequests, applyCouponStatus(firstToken))
}) })
} }
func TestConsoleBackendWithDisabledFrontEnd(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Console.FrontendEnable = false
config.Console.UseVuetifyProject = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiAddr := planet.Satellites[0].API.Console.Listener.Addr().String()
uiAddr := planet.Satellites[0].UI.Console.Listener.Addr().String()
testEndpoint(ctx, t, apiAddr, "/", http.StatusNotFound)
testEndpoint(ctx, t, apiAddr, "/vuetifypoc", http.StatusNotFound)
testEndpoint(ctx, t, apiAddr, "/static/", http.StatusNotFound)
testEndpoint(ctx, t, uiAddr, "/", http.StatusOK)
testEndpoint(ctx, t, uiAddr, "/vuetifypoc", http.StatusOK)
testEndpoint(ctx, t, uiAddr, "/static/", http.StatusOK)
})
}
func TestConsoleBackendWithEnabledFrontEnd(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Console.UseVuetifyProject = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiAddr := planet.Satellites[0].API.Console.Listener.Addr().String()
testEndpoint(ctx, t, apiAddr, "/", http.StatusOK)
testEndpoint(ctx, t, apiAddr, "/vuetifypoc", http.StatusOK)
testEndpoint(ctx, t, apiAddr, "/static/", http.StatusOK)
})
}
func testEndpoint(ctx context.Context, t *testing.T, addr, endpoint string, expectedStatus int) {
client := http.Client{}
url := "http://" + addr + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
require.NoError(t, err)
result, err := client.Do(req)
require.NoError(t, err)
require.Equal(t, expectedStatus, result.StatusCode)
require.NoError(t, result.Body.Close())
}

View File

@ -0,0 +1,89 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"storj.io/common/memory"
"storj.io/storj/satellite/payments/billing"
)
var _ billing.Observer = (*UpgradeUserObserver)(nil)
// UpgradeUserObserver used to upgrade user if their balance is more than $10 after confirmed token transaction.
type UpgradeUserObserver struct {
consoleDB DB
transactionsDB billing.TransactionsDB
usageLimitsConfig UsageLimitsConfig
userBalanceForUpgrade int64
}
// NewUpgradeUserObserver creates new observer instance.
func NewUpgradeUserObserver(consoleDB DB, transactionsDB billing.TransactionsDB, usageLimitsConfig UsageLimitsConfig, userBalanceForUpgrade int64) *UpgradeUserObserver {
return &UpgradeUserObserver{
consoleDB: consoleDB,
transactionsDB: transactionsDB,
usageLimitsConfig: usageLimitsConfig,
userBalanceForUpgrade: userBalanceForUpgrade,
}
}
// Process puts user into the paid tier and converts projects to upgraded limits.
func (o *UpgradeUserObserver) Process(ctx context.Context, transaction billing.Transaction) (err error) {
defer mon.Task()(&ctx)(&err)
user, err := o.consoleDB.Users().Get(ctx, transaction.UserID)
if err != nil {
return err
}
if user.PaidTier {
return nil
}
balance, err := o.transactionsDB.GetBalance(ctx, user.ID)
if err != nil {
return err
}
// check if user's balance is less than needed amount for upgrade.
if balance.BaseUnits() < o.userBalanceForUpgrade {
return nil
}
err = o.consoleDB.Users().UpdatePaidTier(ctx, user.ID, true,
o.usageLimitsConfig.Bandwidth.Paid,
o.usageLimitsConfig.Storage.Paid,
o.usageLimitsConfig.Segment.Paid,
o.usageLimitsConfig.Project.Paid,
)
if err != nil {
return err
}
projects, err := o.consoleDB.Projects().GetOwn(ctx, user.ID)
if err != nil {
return err
}
for _, project := range projects {
if project.StorageLimit == nil || *project.StorageLimit < o.usageLimitsConfig.Storage.Paid {
project.StorageLimit = new(memory.Size)
*project.StorageLimit = o.usageLimitsConfig.Storage.Paid
}
if project.BandwidthLimit == nil || *project.BandwidthLimit < o.usageLimitsConfig.Bandwidth.Paid {
project.BandwidthLimit = new(memory.Size)
*project.BandwidthLimit = o.usageLimitsConfig.Bandwidth.Paid
}
if project.SegmentLimit == nil || *project.SegmentLimit < o.usageLimitsConfig.Segment.Paid {
*project.SegmentLimit = o.usageLimitsConfig.Segment.Paid
}
err = o.consoleDB.Projects().Update(ctx, &project)
if err != nil {
return err
}
}
return nil
}

View File

@ -24,6 +24,7 @@ import (
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
"storj.io/common/currency" "storj.io/common/currency"
"storj.io/common/http/requestid"
"storj.io/common/macaroon" "storj.io/common/macaroon"
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/uuid" "storj.io/common/uuid"
@ -75,7 +76,7 @@ const (
projInviteInvalidErrMsg = "The invitation has expired or is invalid" projInviteInvalidErrMsg = "The invitation has expired or is invalid"
projInviteAlreadyMemberErrMsg = "You are already a member of the project" projInviteAlreadyMemberErrMsg = "You are already a member of the project"
projInviteResponseInvalidErrMsg = "Invalid project member invitation response" projInviteResponseInvalidErrMsg = "Invalid project member invitation response"
projInviteActiveErrMsg = "The invitation for '%s' has not expired yet" projInviteExistsErrMsg = "An active invitation for '%s' already exists"
) )
var ( var (
@ -143,8 +144,8 @@ var (
// or has expired. // or has expired.
ErrProjectInviteInvalid = errs.Class("invalid project invitation") ErrProjectInviteInvalid = errs.Class("invalid project invitation")
// ErrProjectInviteActive occurs when trying to reinvite a user whose invitation hasn't expired yet. // ErrAlreadyInvited occurs when trying to invite a user who already has an unexpired invitation.
ErrProjectInviteActive = errs.Class("project invitation active") ErrAlreadyInvited = errs.Class("user is already invited")
) )
// Service is handling accounts related logic. // Service is handling accounts related logic.
@ -193,6 +194,7 @@ type Config struct {
LoginAttemptsWithoutPenalty int `help:"number of times user can try to login without penalty" default:"3"` LoginAttemptsWithoutPenalty int `help:"number of times user can try to login without penalty" default:"3"`
FailedLoginPenalty float64 `help:"incremental duration of penalty for failed login attempts in minutes" default:"2.0"` FailedLoginPenalty float64 `help:"incremental duration of penalty for failed login attempts in minutes" default:"2.0"`
ProjectInvitationExpiration time.Duration `help:"duration that project member invitations are valid for" default:"168h"` ProjectInvitationExpiration time.Duration `help:"duration that project member invitations are valid for" default:"168h"`
UserBalanceForUpgrade int64 `help:"amount of base units of US micro dollars needed to upgrade user's tier status" default:"10000000"`
UsageLimits UsageLimitsConfig UsageLimits UsageLimitsConfig
Captcha CaptchaConfig Captcha CaptchaConfig
Session SessionConfig Session SessionConfig
@ -302,6 +304,10 @@ func (s *Service) auditLog(ctx context.Context, operation string, userID *uuid.U
if email != "" { if email != "" {
fields = append(fields, zap.String("email", email)) fields = append(fields, zap.String("email", email))
} }
if requestID := requestid.FromContext(ctx); requestID != "" {
fields = append(fields, zap.String("requestID", requestID))
}
fields = append(fields, fields...) fields = append(fields, fields...)
s.auditLogger.Info("console activity", fields...) s.auditLogger.Info("console activity", fields...)
} }
@ -2725,7 +2731,7 @@ func (s *Service) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
prUsageLimits, err := s.getProjectUsageLimits(ctx, isMember.project.ID) prUsageLimits, err := s.getProjectUsageLimits(ctx, isMember.project.ID, true)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
@ -2767,7 +2773,7 @@ func (s *Service) GetTotalUsageLimits(ctx context.Context) (_ *ProjectUsageLimit
var totalBandwidthUsed int64 var totalBandwidthUsed int64
for _, pr := range projects { for _, pr := range projects {
prUsageLimits, err := s.getProjectUsageLimits(ctx, pr.ID) prUsageLimits, err := s.getProjectUsageLimits(ctx, pr.ID, false)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
@ -2786,7 +2792,7 @@ func (s *Service) GetTotalUsageLimits(ctx context.Context) (_ *ProjectUsageLimit
}, nil }, nil
} }
func (s *Service) getProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (_ *ProjectUsageLimits, err error) { func (s *Service) getProjectUsageLimits(ctx context.Context, projectID uuid.UUID, onlySettledBandwidth bool) (_ *ProjectUsageLimits, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
storageLimit, err := s.projectUsage.GetProjectStorageLimit(ctx, projectID) storageLimit, err := s.projectUsage.GetProjectStorageLimit(ctx, projectID)
@ -2806,10 +2812,17 @@ func (s *Service) getProjectUsageLimits(ctx context.Context, projectID uuid.UUID
if err != nil { if err != nil {
return nil, err return nil, err
} }
bandwidthUsed, err := s.projectUsage.GetProjectBandwidthTotals(ctx, projectID)
var bandwidthUsed int64
if onlySettledBandwidth {
bandwidthUsed, err = s.projectUsage.GetProjectSettledBandwidth(ctx, projectID)
} else {
bandwidthUsed, err = s.projectUsage.GetProjectBandwidthTotals(ctx, projectID)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
segmentUsed, err := s.projectUsage.GetProjectSegmentTotals(ctx, projectID) segmentUsed, err := s.projectUsage.GetProjectSegmentTotals(ctx, projectID)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2923,7 +2936,7 @@ func (s *Service) checkProjectLimit(ctx context.Context, userID uuid.UUID) (curr
return 0, Error.Wrap(err) return 0, Error.Wrap(err)
} }
projects, err := s.GetUsersProjects(ctx) projects, err := s.store.Projects().GetOwn(ctx, userID)
if err != nil { if err != nil {
return 0, Error.Wrap(err) return 0, Error.Wrap(err)
} }
@ -3078,6 +3091,12 @@ func EtherscanURL(tx string) string {
// ErrWalletNotClaimed shows that no address is claimed by the user. // ErrWalletNotClaimed shows that no address is claimed by the user.
var ErrWalletNotClaimed = errs.Class("wallet is not claimed") var ErrWalletNotClaimed = errs.Class("wallet is not claimed")
// TestSwapDepositWallets replaces the existing handler for deposit wallets with
// the one specified for use in testing.
func (payment Payments) TestSwapDepositWallets(dw payments.DepositWallets) {
payment.service.depositWallets = dw
}
// ClaimWallet requests a new wallet for the users to be used for payments. If wallet is already claimed, // ClaimWallet requests a new wallet for the users to be used for payments. If wallet is already claimed,
// it will return with the info without error. // it will return with the info without error.
func (payment Payments) ClaimWallet(ctx context.Context) (_ WalletInfo, err error) { func (payment Payments) ClaimWallet(ctx context.Context) (_ WalletInfo, err error) {
@ -3198,6 +3217,27 @@ func (payment Payments) WalletPayments(ctx context.Context) (_ WalletPayments, e
}, nil }, nil
} }
// WalletPaymentsWithConfirmations returns with all the native blockchain payments (including pending) for a user's wallet.
func (payment Payments) WalletPaymentsWithConfirmations(ctx context.Context) (paymentsWithConfirmations []payments.WalletPaymentWithConfirmations, err error) {
defer mon.Task()(&ctx)(&err)
user, err := GetUser(ctx)
if err != nil {
return nil, Error.Wrap(err)
}
address, err := payment.service.depositWallets.Get(ctx, user.ID)
if err != nil {
return nil, Error.Wrap(err)
}
paymentsWithConfirmations, err = payment.service.depositWallets.PaymentsWithConfirmations(ctx, address)
if err != nil {
return nil, Error.Wrap(err)
}
return
}
// Purchase makes a purchase of `price` amount with description of `desc` and payment method with id of `paymentMethodID`. // Purchase makes a purchase of `price` amount with description of `desc` and payment method with id of `paymentMethodID`.
// If a paid invoice with the same description exists, then we assume this is a retried request and don't create and pay // If a paid invoice with the same description exists, then we assume this is a retried request and don't create and pay
// another invoice. // another invoice.
@ -3563,7 +3603,6 @@ func (s *Service) RespondToProjectInvitation(ctx context.Context, projectID uuid
// InviteProjectMembers invites users by email to given project. // InviteProjectMembers invites users by email to given project.
// If an invitation already exists and has expired, it will be replaced and the user will be sent a new email. // If an invitation already exists and has expired, it will be replaced and the user will be sent a new email.
// Email addresses not belonging to a user are ignored.
// projectID here may be project.PublicID or project.ID. // projectID here may be project.PublicID or project.ID.
func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID, emails []string) (invites []ProjectInvitation, err error) { func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID, emails []string) (invites []ProjectInvitation, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -3581,6 +3620,14 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
var users []*User var users []*User
var newUserEmails []string var newUserEmails []string
for _, email := range emails { for _, email := range emails {
invite, err := s.store.ProjectInvitations().Get(ctx, projectID, email)
if err != nil && !errs.Is(err, sql.ErrNoRows) {
return nil, Error.Wrap(err)
}
if invite != nil && !s.IsProjectInvitationExpired(invite) {
return nil, ErrAlreadyInvited.New(projInviteExistsErrMsg, email)
}
invitedUser, err := s.store.Users().GetByEmail(ctx, email) invitedUser, err := s.store.Users().GetByEmail(ctx, email)
if err == nil { if err == nil {
_, err = s.isProjectMember(ctx, invitedUser.ID, projectID) _, err = s.isProjectMember(ctx, invitedUser.ID, projectID)
@ -3589,14 +3636,6 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
} else if err == nil { } else if err == nil {
return nil, ErrAlreadyMember.New("%s is already a member", email) return nil, ErrAlreadyMember.New("%s is already a member", email)
} }
invite, err := s.store.ProjectInvitations().Get(ctx, projectID, email)
if err != nil && !errs.Is(err, sql.ErrNoRows) {
return nil, Error.Wrap(err)
}
if invite != nil && !s.IsProjectInvitationExpired(invite) {
return nil, ErrProjectInviteActive.New(projInviteActiveErrMsg, invitedUser.Email)
}
users = append(users, invitedUser) users = append(users, invitedUser)
} else if errs.Is(err, sql.ErrNoRows) { } else if errs.Is(err, sql.ErrNoRows) {
newUserEmails = append(newUserEmails, email) newUserEmails = append(newUserEmails, email)

View File

@ -23,6 +23,7 @@ import (
"storj.io/common/currency" "storj.io/common/currency"
"storj.io/common/macaroon" "storj.io/common/macaroon"
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
@ -434,6 +435,20 @@ func TestService(t *testing.T) {
require.Equal(t, updatedBandwidthLimit.Int64(), limits1.BandwidthLimit) require.Equal(t, updatedBandwidthLimit.Int64(), limits1.BandwidthLimit)
require.Equal(t, updatedStorageLimit.Int64(), limits2.StorageLimit) require.Equal(t, updatedStorageLimit.Int64(), limits2.StorageLimit)
require.Equal(t, updatedBandwidthLimit.Int64(), limits2.BandwidthLimit) require.Equal(t, updatedBandwidthLimit.Int64(), limits2.BandwidthLimit)
bucket := "testbucket1"
err = planet.Uplinks[1].CreateBucket(ctx, sat, bucket)
require.NoError(t, err)
now := time.Now().UTC()
startOfMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
err = sat.DB.Orders().UpdateBucketBandwidthAllocation(ctx, up2Proj.ID, []byte(bucket), pb.PieceAction_GET, 1000, startOfMonth)
require.NoError(t, err)
limits2, err = service.GetProjectUsageLimits(userCtx2, up2Proj.PublicID)
require.NoError(t, err)
require.NotNil(t, limits2)
require.Equal(t, int64(0), limits2.BandwidthUsed)
}) })
t.Run("ChangeEmail", func(t *testing.T) { t.Run("ChangeEmail", func(t *testing.T) {
@ -1687,6 +1702,86 @@ func TestPaymentsWalletPayments(t *testing.T) {
}) })
} }
type mockDepositWallets struct {
address blockchain.Address
payments []payments.WalletPaymentWithConfirmations
}
func (dw mockDepositWallets) Claim(_ context.Context, _ uuid.UUID) (blockchain.Address, error) {
return dw.address, nil
}
func (dw mockDepositWallets) Get(_ context.Context, _ uuid.UUID) (blockchain.Address, error) {
return dw.address, nil
}
func (dw mockDepositWallets) Payments(_ context.Context, _ blockchain.Address, _ int, _ int64) (p []payments.WalletPayment, err error) {
return
}
func (dw mockDepositWallets) PaymentsWithConfirmations(_ context.Context, _ blockchain.Address) ([]payments.WalletPaymentWithConfirmations, error) {
return dw.payments, nil
}
func TestWalletPaymentsWithConfirmations(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
sat := planet.Satellites[0]
service := sat.API.Console.Service
paymentsService := service.Payments()
user, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Test User",
Email: "test@mail.test",
Password: "example",
}, 1)
require.NoError(t, err)
now := time.Now()
wallet := blockchaintest.NewAddress()
var expected []payments.WalletPaymentWithConfirmations
for i := 0; i < 3; i++ {
expected = append(expected, payments.WalletPaymentWithConfirmations{
From: blockchaintest.NewAddress().Hex(),
To: wallet.Hex(),
TokenValue: currency.AmountFromBaseUnits(int64(i), currency.StorjToken).AsDecimal(),
USDValue: currency.AmountFromBaseUnits(int64(i), currency.USDollarsMicro).AsDecimal(),
Status: payments.PaymentStatusConfirmed,
BlockHash: blockchaintest.NewHash().Hex(),
BlockNumber: int64(i),
Transaction: blockchaintest.NewHash().Hex(),
LogIndex: i,
Timestamp: now,
Confirmations: int64(i),
BonusTokens: decimal.NewFromInt(int64(i)),
})
}
paymentsService.TestSwapDepositWallets(mockDepositWallets{address: wallet, payments: expected})
reqCtx := console.WithUser(ctx, user)
walletPayments, err := paymentsService.WalletPaymentsWithConfirmations(reqCtx)
require.NoError(t, err)
require.NotZero(t, len(walletPayments))
for i, wp := range walletPayments {
require.Equal(t, expected[i].From, wp.From)
require.Equal(t, expected[i].To, wp.To)
require.Equal(t, expected[i].TokenValue, wp.TokenValue)
require.Equal(t, expected[i].USDValue, wp.USDValue)
require.Equal(t, expected[i].Status, wp.Status)
require.Equal(t, expected[i].BlockHash, wp.BlockHash)
require.Equal(t, expected[i].BlockNumber, wp.BlockNumber)
require.Equal(t, expected[i].Transaction, wp.Transaction)
require.Equal(t, expected[i].LogIndex, wp.LogIndex)
require.Equal(t, expected[i].Timestamp, wp.Timestamp)
}
})
}
func TestPaymentsPurchase(t *testing.T) { func TestPaymentsPurchase(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0, SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
@ -2049,7 +2144,7 @@ func TestProjectInvitations(t *testing.T) {
// resending an active invitation should fail. // resending an active invitation should fail.
invites, err = service.InviteProjectMembers(ctx2, project.ID, []string{user3.Email}) invites, err = service.InviteProjectMembers(ctx2, project.ID, []string{user3.Email})
require.True(t, console.ErrProjectInviteActive.Has(err)) require.True(t, console.ErrAlreadyInvited.Has(err))
require.Empty(t, invites) require.Empty(t, invites)
// expire the invitation. // expire the invitation.

View File

@ -7,17 +7,22 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"net" "net"
"sort"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"storj.io/common/identity/testidentity"
"storj.io/common/nodetag"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/rpc/rpcpeer" "storj.io/common/rpc/rpcpeer"
"storj.io/common/rpc/rpcstatus" "storj.io/common/rpc/rpcstatus"
"storj.io/common/signing"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
"storj.io/storj/storagenode/contact"
) )
func TestSatelliteContactEndpoint(t *testing.T) { func TestSatelliteContactEndpoint(t *testing.T) {
@ -177,3 +182,143 @@ func TestSatellitePingMe_Failure(t *testing.T) {
require.Nil(t, resp) require.Nil(t, resp)
}) })
} }
func TestSatelliteContactEndpoint_WithNodeTags(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
StorageNode: func(index int, config *storagenode.Config) {
config.Server.DisableQUIC = true
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{},
})
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
nodeInfo := planet.StorageNodes[0].Contact.Service.Local()
ident := planet.StorageNodes[0].Identity
peer := rpcpeer.Peer{
Addr: &net.TCPAddr{
IP: net.ParseIP(nodeInfo.Address),
Port: 5,
},
State: tls.ConnectionState{
PeerCertificates: []*x509.Certificate{ident.Leaf, ident.CA},
},
}
unsignedTags := &pb.NodeTagSet{
NodeId: ident.ID.Bytes(),
Tags: []*pb.Tag{
{
Name: "soc",
Value: []byte{1},
},
{
Name: "foo",
Value: []byte("bar"),
},
},
}
signedTags, err := nodetag.Sign(ctx, unsignedTags, signing.SignerFromFullIdentity(planet.Satellites[0].Identity))
require.NoError(t, err)
peerCtx := rpcpeer.NewContext(ctx, &peer)
resp, err := planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
Address: nodeInfo.Address,
Version: &nodeInfo.Version,
Capacity: &nodeInfo.Capacity,
Operator: &nodeInfo.Operator,
DebounceLimit: 3,
Features: 0xf,
SignedTags: &pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signedTags,
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
tags, err := planet.Satellites[0].DB.OverlayCache().GetNodeTags(ctx, ident.ID)
require.NoError(t, err)
require.Len(t, tags, 2)
sort.Slice(tags, func(i, j int) bool {
return tags[i].Name < tags[j].Name
})
require.Equal(t, "foo", tags[0].Name)
require.Equal(t, "bar", string(tags[0].Value))
require.Equal(t, "soc", tags[1].Name)
require.Equal(t, []byte{1}, tags[1].Value)
})
}
func TestSatelliteContactEndpoint_WithWrongNodeTags(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
StorageNode: func(index int, config *storagenode.Config) {
config.Server.DisableQUIC = true
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{},
})
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
nodeInfo := planet.StorageNodes[0].Contact.Service.Local()
ident := planet.StorageNodes[0].Identity
peer := rpcpeer.Peer{
Addr: &net.TCPAddr{
IP: net.ParseIP(nodeInfo.Address),
Port: 5,
},
State: tls.ConnectionState{
PeerCertificates: []*x509.Certificate{ident.Leaf, ident.CA},
},
}
wrongNodeID := testidentity.MustPregeneratedIdentity(99, storj.LatestIDVersion()).ID
unsignedTags := &pb.NodeTagSet{
NodeId: wrongNodeID.Bytes(),
Tags: []*pb.Tag{
{
Name: "soc",
Value: []byte{1},
},
{
Name: "foo",
Value: []byte("bar"),
},
},
}
signedTags, err := nodetag.Sign(ctx, unsignedTags, signing.SignerFromFullIdentity(planet.Satellites[0].Identity))
require.NoError(t, err)
peerCtx := rpcpeer.NewContext(ctx, &peer)
resp, err := planet.Satellites[0].Contact.Endpoint.CheckIn(peerCtx, &pb.CheckInRequest{
Address: nodeInfo.Address,
Version: &nodeInfo.Version,
Capacity: &nodeInfo.Capacity,
Operator: &nodeInfo.Operator,
DebounceLimit: 3,
Features: 0xf,
SignedTags: &pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signedTags,
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
tags, err := planet.Satellites[0].DB.OverlayCache().GetNodeTags(ctx, ident.ID)
require.NoError(t, err)
require.Len(t, tags, 0)
})
}

View File

@ -114,6 +114,10 @@ func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (
req.Operator.WalletFeatures = nil req.Operator.WalletFeatures = nil
} }
} }
err = endpoint.service.processNodeTags(ctx, nodeID, req.SignedTags)
if err != nil {
endpoint.log.Info("failed to update node tags", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
}
nodeInfo := overlay.NodeCheckInInfo{ nodeInfo := overlay.NodeCheckInInfo{
NodeID: peerID.ID, NodeID: peerID.ID,

View File

@ -12,11 +12,13 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/nodetag"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/rpc" "storj.io/common/rpc"
"storj.io/common/rpc/quic" "storj.io/common/rpc/quic"
"storj.io/common/rpc/rpcstatus" "storj.io/common/rpc/rpcstatus"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -49,19 +51,22 @@ type Service struct {
timeout time.Duration timeout time.Duration
idLimiter *RateLimiter idLimiter *RateLimiter
allowPrivateIP bool allowPrivateIP bool
nodeTagAuthority nodetag.Authority
} }
// NewService creates a new contact service. // NewService creates a new contact service.
func NewService(log *zap.Logger, self *overlay.NodeDossier, overlay *overlay.Service, peerIDs overlay.PeerIdentities, dialer rpc.Dialer, config Config) *Service { func NewService(log *zap.Logger, self *overlay.NodeDossier, overlay *overlay.Service, peerIDs overlay.PeerIdentities, dialer rpc.Dialer, authority nodetag.Authority, config Config) *Service {
return &Service{ return &Service{
log: log, log: log,
self: self, self: self,
overlay: overlay, overlay: overlay,
peerIDs: peerIDs, peerIDs: peerIDs,
dialer: dialer, dialer: dialer,
timeout: config.Timeout, timeout: config.Timeout,
idLimiter: NewRateLimiter(config.RateLimitInterval, config.RateLimitBurst, config.RateLimitCacheSize), idLimiter: NewRateLimiter(config.RateLimitInterval, config.RateLimitBurst, config.RateLimitCacheSize),
allowPrivateIP: config.AllowPrivateIP, allowPrivateIP: config.AllowPrivateIP,
nodeTagAuthority: authority,
} }
} }
@ -151,3 +156,56 @@ func (service *Service) pingNodeQUIC(ctx context.Context, nodeurl storj.NodeURL)
return nil return nil
} }
func (service *Service) processNodeTags(ctx context.Context, nodeID storj.NodeID, req *pb.SignedNodeTagSets) error {
if req != nil {
tags := nodeselection.NodeTags{}
for _, t := range req.Tags {
verifiedTags, signerID, err := verifyTags(ctx, service.nodeTagAuthority, nodeID, t)
if err != nil {
service.log.Info("Failed to verify tags.", zap.Error(err), zap.Stringer("NodeID", nodeID))
continue
}
ts := time.Unix(verifiedTags.Timestamp, 0)
for _, vt := range verifiedTags.Tags {
tags = append(tags, nodeselection.NodeTag{
NodeID: nodeID,
Name: vt.Name,
Value: vt.Value,
SignedAt: ts,
Signer: signerID,
})
}
}
if len(tags) > 0 {
err := service.overlay.UpdateNodeTags(ctx, tags)
if err != nil {
return Error.Wrap(err)
}
}
}
return nil
}
func verifyTags(ctx context.Context, authority nodetag.Authority, nodeID storj.NodeID, t *pb.SignedNodeTagSet) (*pb.NodeTagSet, storj.NodeID, error) {
signerID, err := storj.NodeIDFromBytes(t.SignerNodeId)
if err != nil {
return nil, signerID, errs.New("failed to parse signerNodeID from verifiedTags: '%x', %s", t.SignerNodeId, err.Error())
}
verifiedTags, err := authority.Verify(ctx, t)
if err != nil {
return nil, signerID, errs.New("received node tags with wrong/unknown signature: '%x', %s", t.Signature, err.Error())
}
signedNodeID, err := storj.NodeIDFromBytes(verifiedTags.NodeId)
if err != nil {
return nil, signerID, errs.New("failed to parse nodeID from verifiedTags: '%x', %s", verifiedTags.NodeId, err.Error())
}
if signedNodeID != nodeID {
return nil, signerID, errs.New("the tag is signed for a different node. Expected NodeID: '%s', Received NodeID: '%s'", nodeID, signedNodeID)
}
return verifiedTags, signerID, nil
}

View File

@ -0,0 +1,149 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package contact
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/identity/testidentity"
"storj.io/common/nodetag"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/testcontext"
)
func TestVerifyTags(t *testing.T) {
ctx := testcontext.New(t)
snIdentity := testidentity.MustPregeneratedIdentity(0, storj.LatestIDVersion())
signerIdentity := testidentity.MustPregeneratedIdentity(1, storj.LatestIDVersion())
signer := signing.SignerFromFullIdentity(signerIdentity)
authority := nodetag.Authority{
signing.SignerFromFullIdentity(signerIdentity),
}
t.Run("ok tags", func(t *testing.T) {
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: snIdentity.ID.Bytes(),
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, signer)
require.NoError(t, err)
verifiedTags, signerID, err := verifyTags(ctx, authority, snIdentity.ID, tags)
require.NoError(t, err)
require.Equal(t, signerIdentity.ID, signerID)
require.Len(t, verifiedTags.Tags, 1)
require.Equal(t, "foo", verifiedTags.Tags[0].Name)
require.Equal(t, []byte("bar"), verifiedTags.Tags[0].Value)
})
t.Run("wrong signer ID", func(t *testing.T) {
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: snIdentity.ID.Bytes(),
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, signer)
require.NoError(t, err)
tags.SignerNodeId = []byte{1, 2, 3, 4}
_, _, err = verifyTags(ctx, authority, snIdentity.ID, tags)
require.Error(t, err)
require.ErrorContains(t, err, "01020304")
require.ErrorContains(t, err, "failed to parse signerNodeID")
})
t.Run("wrong signature", func(t *testing.T) {
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: snIdentity.ID.Bytes(),
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, signer)
require.NoError(t, err)
tags.Signature = []byte{4, 3, 2, 1}
_, _, err = verifyTags(ctx, authority, snIdentity.ID, tags)
require.Error(t, err)
require.ErrorContains(t, err, "04030201")
require.ErrorContains(t, err, "wrong/unknown signature")
})
t.Run("unknown signer", func(t *testing.T) {
otherSignerIdentity := testidentity.MustPregeneratedIdentity(2, storj.LatestIDVersion())
otherSigner := signing.SignerFromFullIdentity(otherSignerIdentity)
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: snIdentity.ID.Bytes(),
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, otherSigner)
require.NoError(t, err)
_, _, err = verifyTags(ctx, authority, snIdentity.ID, tags)
require.Error(t, err)
require.ErrorContains(t, err, "wrong/unknown signature")
})
t.Run("signed for different node", func(t *testing.T) {
otherNodeID := testidentity.MustPregeneratedIdentity(3, storj.LatestIDVersion()).ID
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: otherNodeID.Bytes(),
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, signer)
require.NoError(t, err)
_, _, err = verifyTags(ctx, authority, snIdentity.ID, tags)
require.Error(t, err)
require.ErrorContains(t, err, snIdentity.ID.String())
require.ErrorContains(t, err, "the tag is signed for a different node")
})
t.Run("wrong NodeID", func(t *testing.T) {
tags, err := nodetag.Sign(ctx, &pb.NodeTagSet{
NodeId: []byte{4, 4, 4},
Tags: []*pb.Tag{
{
Name: "foo",
Value: []byte("bar"),
},
},
}, signer)
require.NoError(t, err)
_, _, err = verifyTags(ctx, authority, snIdentity.ID, tags)
require.Error(t, err)
require.ErrorContains(t, err, "040404")
require.ErrorContains(t, err, "failed to parse nodeID")
})
}

View File

@ -34,6 +34,7 @@ import (
"storj.io/storj/satellite/console/consoleauth" "storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/console/dbcleanup" "storj.io/storj/satellite/console/dbcleanup"
"storj.io/storj/satellite/console/emailreminders" "storj.io/storj/satellite/console/emailreminders"
"storj.io/storj/satellite/gc/sender"
"storj.io/storj/satellite/mailservice" "storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/zombiedeletion" "storj.io/storj/satellite/metabase/zombiedeletion"
@ -142,6 +143,10 @@ type Core struct {
ConsoleDBCleanup struct { ConsoleDBCleanup struct {
Chore *dbcleanup.Chore Chore *dbcleanup.Chore
} }
GarbageCollection struct {
Sender *sender.Service
}
} }
// New creates a new satellite. // New creates a new satellite.
@ -244,7 +249,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup overlay { // setup overlay
peer.Overlay.DB = peer.DB.OverlayCache() peer.Overlay.DB = peer.DB.OverlayCache()
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay) peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, peer.DB.NodeEvents(), config.Placement.CreateFilters, config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
@ -491,7 +496,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Payments.StorjscanService = storjscan.NewService(log.Named("storjscan-service"), peer.Payments.StorjscanService = storjscan.NewService(log.Named("storjscan-service"),
peer.DB.Wallets(), peer.DB.Wallets(),
peer.DB.StorjscanPayments(), peer.DB.StorjscanPayments(),
peer.Payments.StorjscanClient) peer.Payments.StorjscanClient,
pc.Storjscan.Confirmations,
pc.BonusRate)
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
@ -512,6 +519,10 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
debug.Cycle("Payments Storjscan", peer.Payments.StorjscanChore.TransactionCycle), debug.Cycle("Payments Storjscan", peer.Payments.StorjscanChore.TransactionCycle),
) )
choreObservers := map[billing.ObserverBilling]billing.Observer{
billing.ObserverUpgradeUser: console.NewUpgradeUserObserver(peer.DB.Console(), peer.DB.Billing(), config.Console.UsageLimits, config.Console.UserBalanceForUpgrade),
}
peer.Payments.BillingChore = billing.NewChore( peer.Payments.BillingChore = billing.NewChore(
peer.Log.Named("payments.billing:chore"), peer.Log.Named("payments.billing:chore"),
[]billing.PaymentType{peer.Payments.StorjscanService}, []billing.PaymentType{peer.Payments.StorjscanService},
@ -519,6 +530,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
config.Payments.BillingConfig.Interval, config.Payments.BillingConfig.Interval,
config.Payments.BillingConfig.DisableLoop, config.Payments.BillingConfig.DisableLoop,
config.Payments.BonusRate, config.Payments.BonusRate,
choreObservers,
) )
peer.Services.Add(lifecycle.Item{ peer.Services.Add(lifecycle.Item{
Name: "billing:chore", Name: "billing:chore",
@ -534,6 +546,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.DB.StripeCoinPayments(), peer.DB.StripeCoinPayments(),
peer.Payments.Accounts, peer.Payments.Accounts,
peer.DB.Console().Users(), peer.DB.Console().Users(),
peer.DB.Wallets(),
peer.DB.StorjscanPayments(),
console.NewAccountFreezeService(db.Console().AccountFreezeEvents(), db.Console().Users(), db.Console().Projects(), peer.Analytics.Service), console.NewAccountFreezeService(db.Console().AccountFreezeEvents(), db.Console().Users(), db.Console().Projects(), peer.Analytics.Service),
peer.Analytics.Service, peer.Analytics.Service,
config.AccountFreeze, config.AccountFreeze,
@ -562,6 +576,22 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
}) })
} }
{ // setup garbage collection
peer.GarbageCollection.Sender = sender.NewService(
peer.Log.Named("gc-sender"),
config.GarbageCollection,
peer.Dialer,
peer.Overlay.DB,
)
peer.Services.Add(lifecycle.Item{
Name: "gc-sender",
Run: peer.GarbageCollection.Sender.Run,
})
peer.Debug.Server.Panel.Add(
debug.Cycle("Garbage Collection", peer.GarbageCollection.Sender.Loop))
}
return peer, nil return peer, nil
} }

View File

@ -22,6 +22,7 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/gc/bloomfilter" "storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop" "storj.io/storj/satellite/metabase/rangedloop"
@ -299,6 +300,157 @@ func TestGarbageCollectionWithCopies(t *testing.T) {
}) })
} }
// TestGarbageCollectionWithCopies checks that server-side copy elements are not
// affecting GC and nothing unexpected was deleted from storage nodes.
func TestGarbageCollectionWithCopiesWithDuplicateMetadata(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
access := planet.Uplinks[0].Access[planet.Satellites[0].NodeURL().ID]
accessString, err := access.Serialize()
require.NoError(t, err)
gcsender := planet.Satellites[0].GarbageCollection.Sender
gcsender.Config.AccessGrant = accessString
// configure filter uploader
config := planet.Satellites[0].Config.GarbageCollectionBF
config.AccessGrant = accessString
project, err := planet.Uplinks[0].OpenProject(ctx, satellite)
require.NoError(t, err)
defer ctx.Check(project.Close)
allSpaceUsedForPieces := func() (all int64) {
for _, node := range planet.StorageNodes {
_, piecesContent, _, err := node.Storage2.Store.SpaceUsedTotalAndBySatellite(ctx)
require.NoError(t, err)
all += piecesContent
}
return all
}
expectedRemoteData := testrand.Bytes(8 * memory.KiB)
expectedInlineData := testrand.Bytes(1 * memory.KiB)
encryptedSize, err := encryption.CalcEncryptedSize(int64(len(expectedRemoteData)), storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: 29 * 256 * memory.B.Int32(), // hardcoded value from uplink
})
require.NoError(t, err)
redundancyStrategy, err := planet.Satellites[0].Config.Metainfo.RS.RedundancyStrategy()
require.NoError(t, err)
pieceSize := eestream.CalcPieceSize(encryptedSize, redundancyStrategy.ErasureScheme)
singleRemoteUsed := pieceSize * int64(len(planet.StorageNodes))
totalUsedByNodes := 2 * singleRemoteUsed // two remote objects
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "remote", expectedRemoteData))
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "inline", expectedInlineData))
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "remote-no-copy", expectedRemoteData))
_, err = project.CopyObject(ctx, "testbucket", "remote", "testbucket", "remote-copy", nil)
require.NoError(t, err)
_, err = project.CopyObject(ctx, "testbucket", "inline", "testbucket", "inline-copy", nil)
require.NoError(t, err)
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
afterTotalUsedByNodes := allSpaceUsedForPieces()
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)
// Wait for bloom filter observer to finish
rangedloopConfig := planet.Satellites[0].Config.RangedLoop
observer := bloomfilter.NewObserver(zaptest.NewLogger(t), config, planet.Satellites[0].Overlay.DB)
segments := rangedloop.NewMetabaseRangeSplitter(planet.Satellites[0].Metabase.DB, rangedloopConfig.AsOfSystemInterval, rangedloopConfig.BatchSize)
rangedLoop := rangedloop.NewService(zap.NewNop(), planet.Satellites[0].Config.RangedLoop, segments,
[]rangedloop.Observer{observer})
_, err = rangedLoop.RunOnce(ctx)
require.NoError(t, err)
// send to storagenode
err = gcsender.RunOnce(ctx)
require.NoError(t, err)
for _, node := range planet.StorageNodes {
node.Storage2.RetainService.TestWaitUntilEmpty()
}
// we should see all space used by all objects
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)
for _, toDelete := range []string{
// delete ancestors, no change in used space
"remote",
"inline",
// delete object without copy, used space should be decreased
"remote-no-copy",
} {
_, err = project.DeleteObject(ctx, "testbucket", toDelete)
require.NoError(t, err)
}
planet.WaitForStorageNodeDeleters(ctx)
// run GC
_, err = rangedLoop.RunOnce(ctx)
require.NoError(t, err)
// send to storagenode
err = gcsender.RunOnce(ctx)
require.NoError(t, err)
for _, node := range planet.StorageNodes {
node.Storage2.RetainService.TestWaitUntilEmpty()
}
// verify that we deleted only pieces for "remote-no-copy" object
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)
// delete rest of objects to verify that everything will be removed also from SNs
for _, toDelete := range []string{
"remote-copy",
"inline-copy",
} {
_, err = project.DeleteObject(ctx, "testbucket", toDelete)
require.NoError(t, err)
}
planet.WaitForStorageNodeDeleters(ctx)
// run GC
_, err = rangedLoop.RunOnce(ctx)
require.NoError(t, err)
// send to storagenode
err = gcsender.RunOnce(ctx)
require.NoError(t, err)
for _, node := range planet.StorageNodes {
node.Storage2.RetainService.TestWaitUntilEmpty()
}
// verify that nothing more was deleted from storage nodes after GC
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.EqualValues(t, totalUsedByNodes, afterTotalUsedByNodes)
})
}
func getSegment(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Satellite, upl *testplanet.Uplink, bucket, path string) (_ metabase.ObjectLocation, _ metabase.Segment) { func getSegment(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Satellite, upl *testplanet.Uplink, bucket, path string) (_ metabase.ObjectLocation, _ metabase.Segment) {
access := upl.Access[satellite.ID()] access := upl.Access[satellite.ID()]

View File

@ -201,13 +201,21 @@ func (cache *NodeAliasCache) EnsurePiecesToAliases(ctx context.Context, pieces P
// ConvertAliasesToPieces converts alias pieces to pieces. // ConvertAliasesToPieces converts alias pieces to pieces.
func (cache *NodeAliasCache) ConvertAliasesToPieces(ctx context.Context, aliasPieces AliasPieces) (_ Pieces, err error) { func (cache *NodeAliasCache) ConvertAliasesToPieces(ctx context.Context, aliasPieces AliasPieces) (_ Pieces, err error) {
return cache.convertAliasesToPieces(ctx, aliasPieces, make(Pieces, len(aliasPieces)))
}
// convertAliasesToPieces converts AliasPieces by populating Pieces with converted data.
func (cache *NodeAliasCache) convertAliasesToPieces(ctx context.Context, aliasPieces AliasPieces, pieces Pieces) (_ Pieces, err error) {
if len(aliasPieces) == 0 { if len(aliasPieces) == 0 {
return Pieces{}, nil return Pieces{}, nil
} }
if len(aliasPieces) != len(pieces) {
return Pieces{}, Error.New("aliasPieces and pieces length must be equal")
}
latest := cache.getLatest() latest := cache.getLatest()
pieces := make(Pieces, len(aliasPieces))
var missing []NodeAlias var missing []NodeAlias
for i, aliasPiece := range aliasPieces { for i, aliasPiece := range aliasPieces {
@ -224,13 +232,13 @@ func (cache *NodeAliasCache) ConvertAliasesToPieces(ctx context.Context, aliasPi
var err error var err error
latest, err = cache.refresh(ctx, nil, missing) latest, err = cache.refresh(ctx, nil, missing)
if err != nil { if err != nil {
return nil, Error.New("failed to refresh node alias db: %w", err) return Pieces{}, Error.New("failed to refresh node alias db: %w", err)
} }
for i, aliasPiece := range aliasPieces { for i, aliasPiece := range aliasPieces {
node, ok := latest.Node(aliasPiece.Alias) node, ok := latest.Node(aliasPiece.Alias)
if !ok { if !ok {
return nil, Error.New("aliases missing in database: %v", missing) return Pieces{}, Error.New("aliases missing in database: %v", missing)
} }
pieces[i].Number = aliasPiece.Number pieces[i].Number = aliasPiece.Number
pieces[i].StorageNode = node pieces[i].StorageNode = node

View File

@ -52,6 +52,10 @@ type FinishCopyObject struct {
NewSegmentKeys []EncryptedKeyAndNonce NewSegmentKeys []EncryptedKeyAndNonce
// If set, copy the object by duplicating the metadata and
// remote_alias_pieces list, rather than using segment_copies.
DuplicateMetadata bool
// VerifyLimits holds a callback by which the caller can interrupt the copy // VerifyLimits holds a callback by which the caller can interrupt the copy
// if it turns out completing the copy would exceed a limit. // if it turns out completing the copy would exceed a limit.
// It will be called only once. // It will be called only once.
@ -147,47 +151,96 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
plainSizes := make([]int32, sourceObject.SegmentCount) plainSizes := make([]int32, sourceObject.SegmentCount)
plainOffsets := make([]int64, sourceObject.SegmentCount) plainOffsets := make([]int64, sourceObject.SegmentCount)
inlineDatas := make([][]byte, sourceObject.SegmentCount) inlineDatas := make([][]byte, sourceObject.SegmentCount)
placementConstraints := make([]storj.PlacementConstraint, sourceObject.SegmentCount)
remoteAliasPiecesLists := make([][]byte, sourceObject.SegmentCount)
redundancySchemes := make([]int64, sourceObject.SegmentCount) redundancySchemes := make([]int64, sourceObject.SegmentCount)
err = withRows(db.db.QueryContext(ctx, `
SELECT if opts.DuplicateMetadata {
position, err = withRows(db.db.QueryContext(ctx, `
expires_at, SELECT
root_piece_id, position,
encrypted_size, plain_offset, plain_size, expires_at,
redundancy, root_piece_id,
inline_data encrypted_size, plain_offset, plain_size,
FROM segments redundancy,
WHERE stream_id = $1 remote_alias_pieces,
ORDER BY position ASC placement,
LIMIT $2 inline_data
FROM segments
WHERE stream_id = $1
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error { `, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0 index := 0
for rows.Next() { for rows.Next() {
err := rows.Scan( err := rows.Scan(
&positions[index], &positions[index],
&expiresAts[index], &expiresAts[index],
&rootPieceIDs[index], &rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index], &encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index], &redundancySchemes[index],
&inlineDatas[index], &remoteAliasPiecesLists[index],
) &placementConstraints[index],
if err != nil { &inlineDatas[index],
)
if err != nil {
return err
}
index++
}
if err := rows.Err(); err != nil {
return err return err
} }
index++
}
if err := rows.Err(); err != nil { if index != int(sourceObject.SegmentCount) {
return err return Error.New("could not load all of the segment information")
} }
if index != int(sourceObject.SegmentCount) { return nil
return Error.New("could not load all of the segment information") })
} } else {
err = withRows(db.db.QueryContext(ctx, `
SELECT
position,
expires_at,
root_piece_id,
encrypted_size, plain_offset, plain_size,
redundancy,
inline_data
FROM segments
WHERE stream_id = $1
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}
return nil if err := rows.Err(); err != nil {
}) return err
}
if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}
return nil
})
}
if err != nil { if err != nil {
return Error.New("unable to copy object: %w", err) return Error.New("unable to copy object: %w", err)
} }
@ -275,6 +328,7 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
root_piece_id, root_piece_id,
redundancy, redundancy,
encrypted_size, plain_offset, plain_size, encrypted_size, plain_offset, plain_size,
remote_alias_pieces, placement,
inline_data inline_data
) SELECT ) SELECT
$1, UNNEST($2::INT8[]), UNNEST($3::timestamptz[]), $1, UNNEST($2::INT8[]), UNNEST($3::timestamptz[]),
@ -282,12 +336,14 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
UNNEST($6::BYTEA[]), UNNEST($6::BYTEA[]),
UNNEST($7::INT8[]), UNNEST($7::INT8[]),
UNNEST($8::INT4[]), UNNEST($9::INT8[]), UNNEST($10::INT4[]), UNNEST($8::INT4[]), UNNEST($9::INT8[]), UNNEST($10::INT4[]),
UNNEST($11::BYTEA[]) UNNEST($11::BYTEA[]), UNNEST($12::INT2[]),
UNNEST($13::BYTEA[])
`, opts.NewStreamID, pgutil.Int8Array(newSegments.Positions), pgutil.NullTimestampTZArray(expiresAts), `, opts.NewStreamID, pgutil.Int8Array(newSegments.Positions), pgutil.NullTimestampTZArray(expiresAts),
pgutil.ByteaArray(newSegments.EncryptedKeyNonces), pgutil.ByteaArray(newSegments.EncryptedKeys), pgutil.ByteaArray(newSegments.EncryptedKeyNonces), pgutil.ByteaArray(newSegments.EncryptedKeys),
pgutil.ByteaArray(rootPieceIDs), pgutil.ByteaArray(rootPieceIDs),
pgutil.Int8Array(redundancySchemes), pgutil.Int8Array(redundancySchemes),
pgutil.Int4Array(encryptedSizes), pgutil.Int8Array(plainOffsets), pgutil.Int4Array(plainSizes), pgutil.Int4Array(encryptedSizes), pgutil.Int8Array(plainOffsets), pgutil.Int4Array(plainSizes),
pgutil.ByteaArray(remoteAliasPiecesLists), pgutil.PlacementConstraintArray(placementConstraints),
pgutil.ByteaArray(inlineDatas), pgutil.ByteaArray(inlineDatas),
) )
if err != nil { if err != nil {
@ -298,15 +354,17 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
return nil return nil
} }
_, err = tx.ExecContext(ctx, ` if !opts.DuplicateMetadata {
INSERT INTO segment_copies ( _, err = tx.ExecContext(ctx, `
stream_id, ancestor_stream_id INSERT INTO segment_copies (
) VALUES ( stream_id, ancestor_stream_id
$1, $2 ) VALUES (
) $1, $2
`, opts.NewStreamID, ancestorStreamID) )
if err != nil { `, opts.NewStreamID, ancestorStreamID)
return Error.New("unable to copy object: %w", err) if err != nil {
return Error.New("unable to copy object: %w", err)
}
} }
return nil return nil

File diff suppressed because it is too large Load Diff

View File

@ -65,11 +65,6 @@ type deletedRemoteSegmentInfo struct {
RepairedAt *time.Time RepairedAt *time.Time
} }
// DeleteObjectAnyStatusAllVersions contains arguments necessary for deleting all object versions.
type DeleteObjectAnyStatusAllVersions struct {
ObjectLocation
}
// DeleteObjectsAllVersions contains arguments necessary for deleting all versions of multiple objects from the same bucket. // DeleteObjectsAllVersions contains arguments necessary for deleting all versions of multiple objects from the same bucket.
type DeleteObjectsAllVersions struct { type DeleteObjectsAllVersions struct {
Locations []ObjectLocation Locations []ObjectLocation
@ -566,66 +561,6 @@ func (db *DB) DeletePendingObject(ctx context.Context, opts DeletePendingObject)
return result, nil return result, nil
} }
// DeleteObjectAnyStatusAllVersions deletes all object versions.
func (db *DB) DeleteObjectAnyStatusAllVersions(ctx context.Context, opts DeleteObjectAnyStatusAllVersions) (result DeleteObjectResult, err error) {
defer mon.Task()(&ctx)(&err)
if db.config.ServerSideCopy {
return DeleteObjectResult{}, errs.New("method cannot be used when server-side copy is enabled")
}
if err := opts.Verify(); err != nil {
return DeleteObjectResult{}, err
}
err = withRows(db.db.QueryContext(ctx, `
WITH deleted_objects AS (
DELETE FROM objects
WHERE
project_id = $1 AND
bucket_name = $2 AND
object_key = $3
RETURNING
version, stream_id,
created_at, expires_at,
status, segment_count,
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key,
total_plain_size, total_encrypted_size, fixed_segment_size,
encryption
), deleted_segments AS (
DELETE FROM segments
WHERE segments.stream_id IN (SELECT deleted_objects.stream_id FROM deleted_objects)
RETURNING segments.stream_id,segments.root_piece_id, segments.remote_alias_pieces
)
SELECT
deleted_objects.version, deleted_objects.stream_id,
deleted_objects.created_at, deleted_objects.expires_at,
deleted_objects.status, deleted_objects.segment_count,
deleted_objects.encrypted_metadata_nonce, deleted_objects.encrypted_metadata, deleted_objects.encrypted_metadata_encrypted_key,
deleted_objects.total_plain_size, deleted_objects.total_encrypted_size, deleted_objects.fixed_segment_size,
deleted_objects.encryption,
deleted_segments.root_piece_id, deleted_segments.remote_alias_pieces
FROM deleted_objects
LEFT JOIN deleted_segments ON deleted_objects.stream_id = deleted_segments.stream_id
`, opts.ProjectID, []byte(opts.BucketName), opts.ObjectKey))(func(rows tagsql.Rows) error {
result.Objects, result.Segments, err = db.scanObjectDeletion(ctx, opts.ObjectLocation, rows)
return err
})
if err != nil {
return DeleteObjectResult{}, err
}
if len(result.Objects) == 0 {
return DeleteObjectResult{}, ErrObjectNotFound.Wrap(Error.New("no rows deleted"))
}
mon.Meter("object_delete").Mark(len(result.Objects))
mon.Meter("segment_delete").Mark(len(result.Segments))
return result, nil
}
// DeleteObjectsAllVersions deletes all versions of multiple objects from the same bucket. // DeleteObjectsAllVersions deletes all versions of multiple objects from the same bucket.
func (db *DB) DeleteObjectsAllVersions(ctx context.Context, opts DeleteObjectsAllVersions) (result DeleteObjectResult, err error) { func (db *DB) DeleteObjectsAllVersions(ctx context.Context, opts DeleteObjectsAllVersions) (result DeleteObjectResult, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -321,7 +321,7 @@ func TestDeleteBucketWithCopies(t *testing.T) {
metabasetest.CreateObjectCopy{ metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
CopyObjectStream: &copyObjectStream, CopyObjectStream: &copyObjectStream,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{ _, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: metabase.BucketLocation{ Bucket: metabase.BucketLocation{
@ -362,7 +362,7 @@ func TestDeleteBucketWithCopies(t *testing.T) {
copyObj, _, copySegments := metabasetest.CreateObjectCopy{ copyObj, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
CopyObjectStream: &copyObjectStream, CopyObjectStream: &copyObjectStream,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{ _, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: metabase.BucketLocation{ Bucket: metabase.BucketLocation{
@ -420,12 +420,78 @@ func TestDeleteBucketWithCopies(t *testing.T) {
metabasetest.CreateObjectCopy{ metabasetest.CreateObjectCopy{
OriginalObject: originalObj1, OriginalObject: originalObj1,
CopyObjectStream: &copyObjectStream1, CopyObjectStream: &copyObjectStream1,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
copyObj2, _, copySegments2 := metabasetest.CreateObjectCopy{ copyObj2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj2, OriginalObject: originalObj2,
CopyObjectStream: &copyObjectStream2, CopyObjectStream: &copyObjectStream2,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
// done preparing, delete bucket 1
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: metabase.BucketLocation{
ProjectID: projectID,
BucketName: "bucket2",
},
BatchSize: 2,
})
require.NoError(t, err)
// Prepare for check.
// obj1 is the same as before, copyObj2 should now be the original
for i := range copySegments2 {
copySegments2[i].Pieces = originalSegments2[i].Pieces
}
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(originalObj1),
metabase.RawObject(copyObj2),
},
Segments: append(copySegments2, metabasetest.SegmentsToRaw(originalSegments1)...),
}.Check(ctx, t, db)
})
t.Run("delete bucket which has one ancestor and one copy with duplicate metadata", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
originalObjStream1 := metabasetest.RandObjectStream()
originalObjStream1.BucketName = "bucket1"
projectID := originalObjStream1.ProjectID
originalObjStream2 := metabasetest.RandObjectStream()
originalObjStream2.ProjectID = projectID
originalObjStream2.BucketName = "bucket2"
originalObj1, originalSegments1 := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjStream1,
},
}.Run(ctx, t, db, originalObjStream1, byte(numberOfSegments))
originalObj2, originalSegments2 := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjStream2,
},
}.Run(ctx, t, db, originalObjStream2, byte(numberOfSegments))
copyObjectStream1 := metabasetest.RandObjectStream()
copyObjectStream1.ProjectID = projectID
copyObjectStream1.BucketName = "bucket2" // copy from bucket 1 to bucket 2
copyObjectStream2 := metabasetest.RandObjectStream()
copyObjectStream2.ProjectID = projectID
copyObjectStream2.BucketName = "bucket1" // copy from bucket 2 to bucket 1
metabasetest.CreateObjectCopy{
OriginalObject: originalObj1,
CopyObjectStream: &copyObjectStream1,
}.Run(ctx, t, db, true)
copyObj2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj2,
CopyObjectStream: &copyObjectStream2,
}.Run(ctx, t, db, true)
// done preparing, delete bucket 1 // done preparing, delete bucket 1
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{ _, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{

View File

@ -466,205 +466,6 @@ func TestDeleteObjectExactVersion(t *testing.T) {
}) })
} }
func TestDeleteObjectAnyStatusAllVersions(t *testing.T) {
metabasetest.RunWithConfig(t, noServerSideCopyConfig, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := metabasetest.RandObjectStream()
location := obj.Location()
now := time.Now()
for _, test := range metabasetest.InvalidObjectLocations(location) {
test := test
t.Run(test.Name, func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: test.ObjectLocation},
ErrClass: test.ErrClass,
ErrText: test.ErrText,
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
}
t.Run("Object missing", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete non existing object version", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
ErrClass: &metabase.ErrObjectNotFound,
ErrText: "metabase: no rows deleted",
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete partial object", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.BeginObjectExactVersion{
Opts: metabase.BeginObjectExactVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
},
Version: 1,
}.Check(ctx, t, db)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{{
ObjectStream: obj,
CreatedAt: now,
Status: metabase.Pending,
Encryption: metabasetest.DefaultEncryption,
}},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete object without segments", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
encryptedMetadata := testrand.Bytes(1024)
encryptedMetadataNonce := testrand.Nonce()
encryptedMetadataKey := testrand.Bytes(265)
object, _ := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: obj,
EncryptedMetadataNonce: encryptedMetadataNonce[:],
EncryptedMetadata: encryptedMetadata,
EncryptedMetadataEncryptedKey: encryptedMetadataKey,
},
}.Run(ctx, t, db, obj, 0)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{object},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete object with segments", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
object := metabasetest.CreateObject(ctx, t, db, obj, 2)
expectedSegmentInfo := metabase.DeletedSegmentInfo{
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
}
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{
ObjectLocation: location,
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{object},
Segments: []metabase.DeletedSegmentInfo{expectedSegmentInfo, expectedSegmentInfo},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete object with inline segment", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
metabasetest.BeginObjectExactVersion{
Opts: metabase.BeginObjectExactVersion{
ObjectStream: obj,
Encryption: metabasetest.DefaultEncryption,
},
Version: obj.Version,
}.Check(ctx, t, db)
metabasetest.CommitInlineSegment{
Opts: metabase.CommitInlineSegment{
ObjectStream: obj,
Position: metabase.SegmentPosition{Part: 0, Index: 0},
EncryptedKey: testrand.Bytes(32),
EncryptedKeyNonce: testrand.Bytes(32),
InlineData: testrand.Bytes(1024),
PlainSize: 512,
PlainOffset: 0,
},
}.Check(ctx, t, db)
object := metabasetest.CommitObject{
Opts: metabase.CommitObject{
ObjectStream: obj,
},
}.Check(ctx, t, db)
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{object},
},
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
t.Run("Delete multiple versions of the same object at once", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
expected := metabase.DeleteObjectResult{}
// committed object
obj := metabasetest.RandObjectStream()
expected.Objects = append(expected.Objects, metabasetest.CreateObject(ctx, t, db, obj, 1))
expected.Segments = append(expected.Segments, metabase.DeletedSegmentInfo{
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
})
// pending objects
for i := 1; i <= 10; i++ {
obj.StreamID = testrand.UUID()
obj.Version = metabase.NextVersion
pendingObject, err := db.BeginObjectNextVersion(ctx, metabase.BeginObjectNextVersion{
ObjectStream: obj,
})
require.NoError(t, err)
// nil ZombieDeletionDeadline because while deleting we are not returning this value with object metadata
pendingObject.ZombieDeletionDeadline = nil
expected.Objects = append(expected.Objects, pendingObject)
}
metabasetest.DeleteObjectAnyStatusAllVersions{
Opts: metabase.DeleteObjectAnyStatusAllVersions{ObjectLocation: obj.Location()},
Result: expected,
}.Check(ctx, t, db)
metabasetest.Verify{}.Check(ctx, t, db)
})
})
}
func TestDeleteObjectsAllVersions(t *testing.T) { func TestDeleteObjectsAllVersions(t *testing.T) {
metabasetest.RunWithConfig(t, noServerSideCopyConfig, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) { metabasetest.RunWithConfig(t, noServerSideCopyConfig, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := metabasetest.RandObjectStream() obj := metabasetest.RandObjectStream()
@ -987,7 +788,7 @@ func TestDeleteCopy(t *testing.T) {
copyObj, _, copySegments := metabasetest.CreateObjectCopy{ copyObj, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
var copies []metabase.RawCopy var copies []metabase.RawCopy
if numberOfSegments > 0 { if numberOfSegments > 0 {
@ -1042,10 +843,10 @@ func TestDeleteCopy(t *testing.T) {
copyObject1, _, _ := metabasetest.CreateObjectCopy{ copyObject1, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{ copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
metabasetest.DeleteObjectExactVersion{ metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{ Opts: metabase.DeleteObjectExactVersion{
@ -1092,7 +893,7 @@ func TestDeleteCopy(t *testing.T) {
copyObject, _, copySegments := metabasetest.CreateObjectCopy{ copyObject, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
metabasetest.DeleteObjectExactVersion{ metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{ Opts: metabase.DeleteObjectExactVersion{
@ -1134,10 +935,10 @@ func TestDeleteCopy(t *testing.T) {
copyObject1, _, copySegments1 := metabasetest.CreateObjectCopy{ copyObject1, _, copySegments1 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{ copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
_, err := db.DeleteObjectExactVersion(ctx, metabase.DeleteObjectExactVersion{ _, err := db.DeleteObjectExactVersion(ctx, metabase.DeleteObjectExactVersion{
Version: originalObj.Version, Version: originalObj.Version,
@ -1206,6 +1007,201 @@ func TestDeleteCopy(t *testing.T) {
}) })
} }
func TestDeleteCopyWithDuplicateMetadata(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
for _, numberOfSegments := range []int{0, 1, 3} {
t.Run(fmt.Sprintf("%d segments", numberOfSegments), func(t *testing.T) {
t.Run("delete copy", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
originalObjStream := metabasetest.RandObjectStream()
originalObj, originalSegments := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjStream,
EncryptedMetadata: testrand.Bytes(64),
EncryptedMetadataNonce: testrand.Nonce().Bytes(),
EncryptedMetadataEncryptedKey: testrand.Bytes(265),
},
}.Run(ctx, t, db, originalObjStream, byte(numberOfSegments))
copyObj, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
// check that copy went OK
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(originalObj),
metabase.RawObject(copyObj),
},
Segments: append(metabasetest.SegmentsToRaw(originalSegments), copySegments...),
}.Check(ctx, t, db)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
ObjectLocation: copyObj.Location(),
Version: copyObj.Version,
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{copyObj},
Segments: rawSegmentsToDeletedSegmentInfo(copySegments),
},
}.Check(ctx, t, db)
// Verify that we are back at the original single object
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(originalObj),
},
Segments: metabasetest.SegmentsToRaw(originalSegments),
}.Check(ctx, t, db)
})
t.Run("delete one of two copies", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
originalObjectStream := metabasetest.RandObjectStream()
originalObj, originalSegments := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjectStream,
EncryptedMetadata: testrand.Bytes(64),
EncryptedMetadataNonce: testrand.Nonce().Bytes(),
EncryptedMetadataEncryptedKey: testrand.Bytes(265),
},
}.Run(ctx, t, db, originalObjectStream, byte(numberOfSegments))
copyObject1, _, copySegments1 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
ObjectLocation: copyObject1.Location(),
Version: copyObject1.Version,
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{copyObject1},
Segments: rawSegmentsToDeletedSegmentInfo(copySegments1),
},
}.Check(ctx, t, db)
// Verify that only one of the copies is deleted
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(originalObj),
metabase.RawObject(copyObject2),
},
Segments: append(metabasetest.SegmentsToRaw(originalSegments), copySegments2...),
}.Check(ctx, t, db)
})
t.Run("delete original", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
originalObjectStream := metabasetest.RandObjectStream()
originalObj, originalSegments := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjectStream,
EncryptedMetadata: testrand.Bytes(64),
EncryptedMetadataNonce: testrand.Nonce().Bytes(),
EncryptedMetadataEncryptedKey: testrand.Bytes(265),
},
}.Run(ctx, t, db, originalObjectStream, byte(numberOfSegments))
copyObject, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
ObjectLocation: originalObj.Location(),
Version: originalObj.Version,
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{originalObj},
Segments: rawSegmentsToDeletedSegmentInfo(copySegments),
},
}.Check(ctx, t, db)
for i := range copySegments {
copySegments[i].Pieces = originalSegments[i].Pieces
}
// verify that the copy is left
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(copyObject),
},
Segments: copySegments,
}.Check(ctx, t, db)
})
t.Run("delete original and leave two copies", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
originalObjectStream := metabasetest.RandObjectStream()
originalObj, originalSegments := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: originalObjectStream,
EncryptedMetadata: testrand.Bytes(64),
EncryptedMetadataNonce: testrand.Nonce().Bytes(),
EncryptedMetadataEncryptedKey: testrand.Bytes(265),
},
}.Run(ctx, t, db, originalObjectStream, byte(numberOfSegments))
copyObject1, _, copySegments1 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
copyObject2, _, copySegments2 := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
_, err := db.DeleteObjectExactVersion(ctx, metabase.DeleteObjectExactVersion{
Version: originalObj.Version,
ObjectLocation: originalObj.Location(),
})
require.NoError(t, err)
var expectedAncestorStreamID uuid.UUID
if numberOfSegments > 0 {
segments, err := db.TestingAllSegments(ctx)
require.NoError(t, err)
require.NotEmpty(t, segments)
if segments[0].StreamID == copyObject1.StreamID {
expectedAncestorStreamID = copyObject1.StreamID
} else {
expectedAncestorStreamID = copyObject2.StreamID
}
}
// set pieces in expected ancestor for verifcation
for _, segments := range [][]metabase.RawSegment{copySegments1, copySegments2} {
for i := range segments {
if segments[i].StreamID == expectedAncestorStreamID {
segments[i].Pieces = originalSegments[i].Pieces
}
}
}
// verify that two functioning copies are left and the original object is gone
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(copyObject1),
metabase.RawObject(copyObject2),
},
Segments: append(copySegments1, copySegments2...),
}.Check(ctx, t, db)
})
})
}
})
}
func TestDeleteObjectLastCommitted(t *testing.T) { func TestDeleteObjectLastCommitted(t *testing.T) {
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) { metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := metabasetest.RandObjectStream() obj := metabasetest.RandObjectStream()
@ -1402,3 +1398,12 @@ func TestDeleteObjectLastCommitted(t *testing.T) {
}) })
}) })
} }
func rawSegmentsToDeletedSegmentInfo(segments []metabase.RawSegment) []metabase.DeletedSegmentInfo {
result := make([]metabase.DeletedSegmentInfo, len(segments))
for i := range segments {
result[i].RootPieceID = segments[i].RootPieceID
result[i].Pieces = segments[i].Pieces
}
return result
}

View File

@ -345,7 +345,53 @@ func TestGetObjectLastCommitted(t *testing.T) {
copiedObj, _, _ := metabasetest.CreateObjectCopy{ copiedObj, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObject, OriginalObject: originalObject,
CopyObjectStream: &copyObjStream, CopyObjectStream: &copyObjStream,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{
Version: 1,
ObjectLocation: obj.Location(),
},
Result: metabase.DeleteObjectResult{
Objects: []metabase.Object{originalObject},
},
}.Check(ctx, t, db)
metabasetest.GetObjectLastCommitted{
Opts: metabase.GetObjectLastCommitted{
ObjectLocation: copiedObj.Location(),
},
Result: copiedObj,
}.Check(ctx, t, db)
metabasetest.Verify{Objects: []metabase.RawObject{
{
ObjectStream: metabase.ObjectStream{
ProjectID: copiedObj.ProjectID,
BucketName: copiedObj.BucketName,
ObjectKey: copiedObj.ObjectKey,
Version: copiedObj.Version,
StreamID: copiedObj.StreamID,
},
CreatedAt: now,
Status: metabase.Committed,
Encryption: metabasetest.DefaultEncryption,
EncryptedMetadata: copiedObj.EncryptedMetadata,
EncryptedMetadataNonce: copiedObj.EncryptedMetadataNonce,
EncryptedMetadataEncryptedKey: copiedObj.EncryptedMetadataEncryptedKey,
},
}}.Check(ctx, t, db)
})
t.Run("Get latest copied object version with duplicate metadata", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
copyObjStream := metabasetest.RandObjectStream()
originalObject := metabasetest.CreateObject(ctx, t, db, obj, 0)
copiedObj, _, _ := metabasetest.CreateObjectCopy{
OriginalObject: originalObject,
CopyObjectStream: &copyObjStream,
}.Run(ctx, t, db, true)
metabasetest.DeleteObjectExactVersion{ metabasetest.DeleteObjectExactVersion{
Opts: metabase.DeleteObjectExactVersion{ Opts: metabase.DeleteObjectExactVersion{
@ -1114,7 +1160,7 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
copyObj, _, newSegments := metabasetest.CreateObjectCopy{ copyObj, _, newSegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
metabasetest.GetLatestObjectLastSegment{ metabasetest.GetLatestObjectLastSegment{
Opts: metabase.GetLatestObjectLastSegment{ Opts: metabase.GetLatestObjectLastSegment{
@ -1150,6 +1196,54 @@ func TestGetLatestObjectLastSegment(t *testing.T) {
}.Check(ctx, t, db) }.Check(ctx, t, db)
}) })
t.Run("Get segment copy with duplicate metadata", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
objStream := metabasetest.RandObjectStream()
originalObj, originalSegments := metabasetest.CreateTestObject{
CommitObject: &metabase.CommitObject{
ObjectStream: objStream,
EncryptedMetadata: testrand.Bytes(64),
EncryptedMetadataNonce: testrand.Nonce().Bytes(),
EncryptedMetadataEncryptedKey: testrand.Bytes(265),
},
}.Run(ctx, t, db, objStream, 1)
copyObj, _, newSegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObj,
}.Run(ctx, t, db, true)
metabasetest.GetLatestObjectLastSegment{
Opts: metabase.GetLatestObjectLastSegment{
ObjectLocation: originalObj.Location(),
},
Result: originalSegments[0],
}.Check(ctx, t, db)
copySegmentGet := originalSegments[0]
copySegmentGet.StreamID = copyObj.StreamID
copySegmentGet.EncryptedETag = nil
copySegmentGet.InlineData = []byte{}
copySegmentGet.EncryptedKey = newSegments[0].EncryptedKey
copySegmentGet.EncryptedKeyNonce = newSegments[0].EncryptedKeyNonce
metabasetest.GetLatestObjectLastSegment{
Opts: metabase.GetLatestObjectLastSegment{
ObjectLocation: copyObj.Location(),
},
Result: copySegmentGet,
}.Check(ctx, t, db)
metabasetest.Verify{
Objects: []metabase.RawObject{
metabase.RawObject(originalObj),
metabase.RawObject(copyObj),
},
Segments: append(metabasetest.SegmentsToRaw(originalSegments), newSegments...),
}.Check(ctx, t, db)
})
t.Run("Get empty inline segment copy", func(t *testing.T) { t.Run("Get empty inline segment copy", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db) defer metabasetest.DeleteAll{}.Check(ctx, t, db)

View File

@ -282,7 +282,51 @@ func TestListSegments(t *testing.T) {
_, _, copySegments := metabasetest.CreateObjectCopy{ _, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObject, OriginalObject: originalObject,
CopyObjectStream: &copyStream, CopyObjectStream: &copyStream,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
expectedSegments := []metabase.Segment{}
for _, segment := range copySegments {
expectedSegments = append(expectedSegments, metabase.Segment(segment))
}
metabasetest.ListSegments{
Opts: metabase.ListSegments{
StreamID: copyStream.StreamID,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments,
},
}.Check(ctx, t, db)
if numberOfSegments > 0 {
expectedSegments[0].Pieces = originalSegments[0].Pieces
}
metabasetest.ListSegments{
Opts: metabase.ListSegments{
StreamID: copyStream.StreamID,
UpdateFirstWithAncestor: true,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments,
},
}.Check(ctx, t, db)
}
})
t.Run("segments from copy with duplicate metadata", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db)
for _, numberOfSegments := range []byte{0, 1, 2, 10} {
originalObjectStream := metabasetest.RandObjectStream()
originalObject, originalSegments := metabasetest.CreateTestObject{}.
Run(ctx, t, db, originalObjectStream, numberOfSegments)
copyStream := metabasetest.RandObjectStream()
_, _, copySegments := metabasetest.CreateObjectCopy{
OriginalObject: originalObject,
CopyObjectStream: &copyStream,
}.Run(ctx, t, db, true)
expectedSegments := []metabase.Segment{} expectedSegments := []metabase.Segment{}
for _, segment := range copySegments { for _, segment := range copySegments {

View File

@ -255,12 +255,15 @@ func (db *DB) IterateLoopSegments(ctx context.Context, opts IterateLoopSegments,
return err return err
} }
loopIteratorBatchSizeLimit.Ensure(&opts.BatchSize)
it := &loopSegmentIterator{ it := &loopSegmentIterator{
db: db, db: db,
asOfSystemTime: opts.AsOfSystemTime, asOfSystemTime: opts.AsOfSystemTime,
asOfSystemInterval: opts.AsOfSystemInterval, asOfSystemInterval: opts.AsOfSystemInterval,
batchSize: opts.BatchSize, batchSize: opts.BatchSize,
batchPieces: make([]Pieces, opts.BatchSize),
curIndex: 0, curIndex: 0,
cursor: loopSegmentIteratorCursor{ cursor: loopSegmentIteratorCursor{
@ -277,8 +280,6 @@ func (db *DB) IterateLoopSegments(ctx context.Context, opts IterateLoopSegments,
it.cursor.EndStreamID = uuid.Max() it.cursor.EndStreamID = uuid.Max()
} }
loopIteratorBatchSizeLimit.Ensure(&it.batchSize)
it.curRows, err = it.doNextQuery(ctx) it.curRows, err = it.doNextQuery(ctx)
if err != nil { if err != nil {
return err return err
@ -298,7 +299,10 @@ func (db *DB) IterateLoopSegments(ctx context.Context, opts IterateLoopSegments,
type loopSegmentIterator struct { type loopSegmentIterator struct {
db *DB db *DB
batchSize int batchSize int
// batchPieces are reused between result pages to reduce memory consumption
batchPieces []Pieces
asOfSystemTime time.Time asOfSystemTime time.Time
asOfSystemInterval time.Duration asOfSystemInterval time.Duration
@ -399,7 +403,14 @@ func (it *loopSegmentIterator) scanItem(ctx context.Context, item *LoopSegmentEn
return Error.New("failed to scan segments: %w", err) return Error.New("failed to scan segments: %w", err)
} }
item.Pieces, err = it.db.aliasCache.ConvertAliasesToPieces(ctx, item.AliasPieces) // allocate new Pieces only if existing have not enough capacity
if cap(it.batchPieces[it.curIndex]) < len(item.AliasPieces) {
it.batchPieces[it.curIndex] = make(Pieces, len(item.AliasPieces))
} else {
it.batchPieces[it.curIndex] = it.batchPieces[it.curIndex][:len(item.AliasPieces)]
}
item.Pieces, err = it.db.aliasCache.convertAliasesToPieces(ctx, item.AliasPieces, it.batchPieces[it.curIndex])
if err != nil { if err != nil {
return Error.New("failed to convert aliases to pieces: %w", err) return Error.New("failed to convert aliases to pieces: %w", err)
} }

View File

@ -321,7 +321,10 @@ type CreateObjectCopy struct {
} }
// Run creates the copy. // Run creates the copy.
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB) (copyObj metabase.Object, expectedOriginalSegments []metabase.RawSegment, expectedCopySegments []metabase.RawSegment) { //
// The duplicateMetadata argument is a hack and it will be great to get rid of it once
// duplicateMetadata is no longer an option.
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB, duplicateMetadata bool) (copyObj metabase.Object, expectedOriginalSegments []metabase.RawSegment, expectedCopySegments []metabase.RawSegment) {
var copyStream metabase.ObjectStream var copyStream metabase.ObjectStream
if cc.CopyObjectStream != nil { if cc.CopyObjectStream != nil {
copyStream = *cc.CopyObjectStream copyStream = *cc.CopyObjectStream
@ -360,6 +363,11 @@ func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metab
} else { } else {
expectedCopySegments[i].InlineData = []byte{} expectedCopySegments[i].InlineData = []byte{}
} }
if duplicateMetadata {
expectedCopySegments[i].Pieces = make(metabase.Pieces, len(expectedOriginalSegments[i].Pieces))
copy(expectedCopySegments[i].Pieces, expectedOriginalSegments[i].Pieces)
}
} }
opts := cc.FinishObject opts := cc.FinishObject
@ -374,6 +382,7 @@ func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metab
NewEncryptedMetadataKey: testrand.Bytes(32), NewEncryptedMetadataKey: testrand.Bytes(32),
} }
} }
opts.DuplicateMetadata = duplicateMetadata
copyObj, err := db.FinishCopyObject(ctx, *opts) copyObj, err := db.FinishCopyObject(ctx, *opts)
require.NoError(t, err) require.NoError(t, err)

View File

@ -454,29 +454,6 @@ func (step DeletePendingObject) Check(ctx *testcontext.Context, t testing.TB, db
require.Zero(t, diff) require.Zero(t, diff)
} }
// DeleteObjectAnyStatusAllVersions is for testing metabase.DeleteObjectAnyStatusAllVersions.
type DeleteObjectAnyStatusAllVersions struct {
Opts metabase.DeleteObjectAnyStatusAllVersions
Result metabase.DeleteObjectResult
ErrClass *errs.Class
ErrText string
}
// Check runs the test.
func (step DeleteObjectAnyStatusAllVersions) Check(ctx *testcontext.Context, t testing.TB, db *metabase.DB) {
result, err := db.DeleteObjectAnyStatusAllVersions(ctx, step.Opts)
checkError(t, err, step.ErrClass, step.ErrText)
sortObjects(result.Objects)
sortObjects(step.Result.Objects)
sortDeletedSegments(result.Segments)
sortDeletedSegments(step.Result.Segments)
diff := cmp.Diff(step.Result, result, DefaultTimeDiff())
require.Zero(t, diff)
}
// DeleteObjectsAllVersions is for testing metabase.DeleteObjectsAllVersions. // DeleteObjectsAllVersions is for testing metabase.DeleteObjectsAllVersions.
type DeleteObjectsAllVersions struct { type DeleteObjectsAllVersions struct {
Opts metabase.DeleteObjectsAllVersions Opts metabase.DeleteObjectsAllVersions

View File

@ -31,6 +31,7 @@ import (
"storj.io/storj/satellite/metabase/rangedloop" "storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/metabase/rangedloop/rangedlooptest" "storj.io/storj/satellite/metabase/rangedloop/rangedlooptest"
"storj.io/storj/satellite/metrics" "storj.io/storj/satellite/metrics"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/repair/checker" "storj.io/storj/satellite/repair/checker"
) )
@ -425,6 +426,7 @@ func TestAllInOne(t *testing.T) {
log.Named("repair:checker"), log.Named("repair:checker"),
satellite.DB.RepairQueue(), satellite.DB.RepairQueue(),
satellite.Overlay.Service, satellite.Overlay.Service,
overlay.NewPlacementRules().CreateFilters,
satellite.Config.Checker, satellite.Config.Checker,
), ),
}) })

View File

@ -193,7 +193,7 @@ func TestGetStreamPieceCountByNodeID(t *testing.T) {
_, _, _ = metabasetest.CreateObjectCopy{ _, _, _ = metabasetest.CreateObjectCopy{
OriginalObject: originalObj, OriginalObject: originalObj,
CopyObjectStream: &copyStream, CopyObjectStream: &copyStream,
}.Run(ctx, t, db) }.Run(ctx, t, db, false)
metabasetest.GetStreamPieceCountByNodeID{ metabasetest.GetStreamPieceCountByNodeID{
Opts: metabase.GetStreamPieceCountByNodeID{ Opts: metabase.GetStreamPieceCountByNodeID{

View File

@ -4,6 +4,7 @@
package metainfo package metainfo
import ( import (
"bytes"
"context" "context"
"sync" "sync"
@ -24,9 +25,11 @@ import (
const MaxUserAgentLength = 500 const MaxUserAgentLength = 500
// ensureAttribution ensures that the bucketName has the partner information specified by project-level user agent, or header user agent. // ensureAttribution ensures that the bucketName has the partner information specified by project-level user agent, or header user agent.
// If `forceBucketUpdate` is true, then the buckets table will be updated if necessary (needed for bucket creation). Otherwise, it is sufficient
// to only ensure the attribution exists in the value attributions db.
// //
// Assumes that the user has permissions sufficient for authenticating. // Assumes that the user has permissions sufficient for authenticating.
func (endpoint *Endpoint) ensureAttribution(ctx context.Context, header *pb.RequestHeader, keyInfo *console.APIKeyInfo, bucketName, projectUserAgent []byte) (err error) { func (endpoint *Endpoint) ensureAttribution(ctx context.Context, header *pb.RequestHeader, keyInfo *console.APIKeyInfo, bucketName, projectUserAgent []byte, forceBucketUpdate bool) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if header == nil { if header == nil {
@ -36,13 +39,15 @@ func (endpoint *Endpoint) ensureAttribution(ctx context.Context, header *pb.Requ
return nil return nil
} }
if conncache := drpccache.FromContext(ctx); conncache != nil { if !forceBucketUpdate {
cache := conncache.LoadOrCreate(attributionCheckCacheKey{}, if conncache := drpccache.FromContext(ctx); conncache != nil {
func() interface{} { cache := conncache.LoadOrCreate(attributionCheckCacheKey{},
return &attributionCheckCache{} func() interface{} {
}).(*attributionCheckCache) return &attributionCheckCache{}
if !cache.needsCheck(string(bucketName)) { }).(*attributionCheckCache)
return nil if !cache.needsCheck(string(bucketName)) {
return nil
}
} }
} }
@ -62,7 +67,7 @@ func (endpoint *Endpoint) ensureAttribution(ctx context.Context, header *pb.Requ
return err return err
} }
err = endpoint.tryUpdateBucketAttribution(ctx, header, keyInfo.ProjectID, bucketName, userAgent) err = endpoint.tryUpdateBucketAttribution(ctx, header, keyInfo.ProjectID, bucketName, userAgent, forceBucketUpdate)
if errs2.IsRPC(err, rpcstatus.NotFound) || errs2.IsRPC(err, rpcstatus.AlreadyExists) { if errs2.IsRPC(err, rpcstatus.NotFound) || errs2.IsRPC(err, rpcstatus.AlreadyExists) {
return nil return nil
} }
@ -110,7 +115,7 @@ func TrimUserAgent(userAgent []byte) ([]byte, error) {
return userAgent, nil return userAgent, nil
} }
func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header *pb.RequestHeader, projectID uuid.UUID, bucketName []byte, userAgent []byte) (err error) { func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header *pb.RequestHeader, projectID uuid.UUID, bucketName []byte, userAgent []byte, forceBucketUpdate bool) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if header == nil { if header == nil {
@ -118,26 +123,17 @@ func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header
} }
// check if attribution is set for given bucket // check if attribution is set for given bucket
_, err = endpoint.attributions.Get(ctx, projectID, bucketName) attrInfo, err := endpoint.attributions.Get(ctx, projectID, bucketName)
if err == nil { if err == nil {
// bucket has already an attribution, no need to update if !forceBucketUpdate {
return nil // bucket has already an attribution, no need to update
} return nil
if !attribution.ErrBucketNotAttributed.Has(err) { }
// try only to set the attribution, when it's missing } else if !attribution.ErrBucketNotAttributed.Has(err) {
endpoint.log.Error("error while getting attribution from DB", zap.Error(err)) endpoint.log.Error("error while getting attribution from DB", zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, err.Error()) return rpcstatus.Error(rpcstatus.Internal, err.Error())
} }
empty, err := endpoint.isBucketEmpty(ctx, projectID, bucketName)
if err != nil {
endpoint.log.Error("internal", zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, Error.Wrap(err).Error())
}
if !empty {
return rpcstatus.Errorf(rpcstatus.AlreadyExists, "bucket %q is not empty, Partner %q cannot be attributed", bucketName, userAgent)
}
// checks if bucket exists before updates it or makes a new entry // checks if bucket exists before updates it or makes a new entry
bucket, err := endpoint.buckets.GetBucket(ctx, bucketName, projectID) bucket, err := endpoint.buckets.GetBucket(ctx, bucketName, projectID)
if err != nil { if err != nil {
@ -147,8 +143,36 @@ func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header
endpoint.log.Error("error while getting bucket", zap.ByteString("bucketName", bucketName), zap.Error(err)) endpoint.log.Error("error while getting bucket", zap.ByteString("bucketName", bucketName), zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution") return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution")
} }
if bucket.UserAgent != nil {
return rpcstatus.Errorf(rpcstatus.AlreadyExists, "bucket %q already has attribution, Partner %q cannot be attributed", bucketName, userAgent) if attrInfo != nil {
// bucket user agent and value attributions user agent already set
if bytes.Equal(bucket.UserAgent, attrInfo.UserAgent) {
return nil
}
// make sure bucket user_agent matches value_attribution
userAgent = attrInfo.UserAgent
}
empty, err := endpoint.isBucketEmpty(ctx, projectID, bucketName)
if err != nil {
endpoint.log.Error("internal", zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, Error.Wrap(err).Error())
}
if !empty {
return rpcstatus.Errorf(rpcstatus.AlreadyExists, "bucket %q is not empty, Partner %q cannot be attributed", bucketName, userAgent)
}
if attrInfo == nil {
// update attribution table
_, err = endpoint.attributions.Insert(ctx, &attribution.Info{
ProjectID: projectID,
BucketName: bucketName,
UserAgent: userAgent,
})
if err != nil {
endpoint.log.Error("error while inserting attribution to DB", zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, err.Error())
}
} }
// update bucket information // update bucket information
@ -159,17 +183,6 @@ func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header
return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution") return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution")
} }
// update attribution table
_, err = endpoint.attributions.Insert(ctx, &attribution.Info{
ProjectID: projectID,
BucketName: bucketName,
UserAgent: userAgent,
})
if err != nil {
endpoint.log.Error("error while inserting attribution to DB", zap.Error(err))
return rpcstatus.Error(rpcstatus.Internal, err.Error())
}
return nil return nil
} }

View File

@ -381,3 +381,146 @@ func TestBucketAttributionConcurrentUpload(t *testing.T) {
require.Equal(t, []byte(config.UserAgent), attributionInfo.UserAgent) require.Equal(t, []byte(config.UserAgent), attributionInfo.UserAgent)
}) })
} }
func TestAttributionDeletedBucketRecreated(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
upl := planet.Uplinks[0]
proj := upl.Projects[0].ID
bucket := "testbucket"
ua1 := []byte("minio")
ua2 := []byte("not minio")
require.NoError(t, satellite.DB.Console().Projects().UpdateUserAgent(ctx, proj, ua1))
require.NoError(t, upl.CreateBucket(ctx, satellite, bucket))
b, err := satellite.DB.Buckets().GetBucket(ctx, []byte(bucket), proj)
require.NoError(t, err)
require.Equal(t, ua1, b.UserAgent)
// test recreate with same user agent
require.NoError(t, upl.DeleteBucket(ctx, satellite, bucket))
require.NoError(t, upl.CreateBucket(ctx, satellite, bucket))
b, err = satellite.DB.Buckets().GetBucket(ctx, []byte(bucket), proj)
require.NoError(t, err)
require.Equal(t, ua1, b.UserAgent)
// test recreate with different user agent
// should still have original user agent
require.NoError(t, upl.DeleteBucket(ctx, satellite, bucket))
upl.Config.UserAgent = string(ua2)
require.NoError(t, upl.CreateBucket(ctx, satellite, bucket))
require.NoError(t, err)
require.Equal(t, ua1, b.UserAgent)
})
}
func TestAttributionBeginObject(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
upl := planet.Uplinks[0]
proj := upl.Projects[0].ID
ua := []byte("minio")
tests := []struct {
name string
vaAttrBefore, bktAttrBefore, bktAttrAfter bool
}{
// test for existence of user_agent in buckets table given the different possibilities of preconditions of user_agent
// in value_attributions and bucket_metainfos to make sure nothing breaks and outcome is expected.
{
name: "attribution exists in VA and bucket",
vaAttrBefore: true,
bktAttrBefore: true,
bktAttrAfter: true,
},
{
name: "attribution exists in VA and NOT bucket",
vaAttrBefore: true,
bktAttrBefore: false,
bktAttrAfter: false,
},
{
name: "attribution exists in bucket and NOT VA",
vaAttrBefore: false,
bktAttrBefore: true,
bktAttrAfter: true,
},
{
name: "attribution exists in neither VA nor buckets",
vaAttrBefore: false,
bktAttrBefore: false,
bktAttrAfter: true,
},
}
for i, tt := range tests {
t.Run(tt.name, func(*testing.T) {
bucketName := fmt.Sprintf("bucket-%d", i)
var expectedBktUA []byte
var config uplink.Config
if tt.bktAttrBefore || tt.vaAttrBefore {
config.UserAgent = string(ua)
}
if tt.bktAttrAfter {
expectedBktUA = ua
}
p, err := config.OpenProject(ctx, upl.Access[satellite.ID()])
require.NoError(t, err)
_, err = p.CreateBucket(ctx, bucketName)
require.NoError(t, err)
require.NoError(t, p.Close())
if !tt.bktAttrBefore && tt.vaAttrBefore {
// remove user agent from bucket
err = satellite.API.DB.Buckets().UpdateUserAgent(ctx, proj, bucketName, nil)
require.NoError(t, err)
}
_, err = satellite.API.DB.Attribution().Get(ctx, proj, []byte(bucketName))
if !tt.bktAttrBefore && !tt.vaAttrBefore {
require.Error(t, err)
} else {
require.NoError(t, err)
}
b, err := satellite.API.DB.Buckets().GetBucket(ctx, []byte(bucketName), proj)
require.NoError(t, err)
if !tt.bktAttrBefore {
require.Nil(t, b.UserAgent)
} else {
require.Equal(t, expectedBktUA, b.UserAgent)
}
config.UserAgent = string(ua)
p, err = config.OpenProject(ctx, upl.Access[satellite.ID()])
require.NoError(t, err)
upload, err := p.UploadObject(ctx, bucketName, fmt.Sprintf("foobar-%d", i), nil)
require.NoError(t, err)
_, err = upload.Write([]byte("content"))
require.NoError(t, err)
err = upload.Commit()
require.NoError(t, err)
attr, err := satellite.API.DB.Attribution().Get(ctx, proj, []byte(bucketName))
require.NoError(t, err)
require.Equal(t, ua, attr.UserAgent)
b, err = satellite.API.DB.Buckets().GetBucket(ctx, []byte(bucketName), proj)
require.NoError(t, err)
require.Equal(t, expectedBktUA, b.UserAgent)
})
}
})
}

View File

@ -141,9 +141,12 @@ type Config struct {
RateLimiter RateLimiterConfig `help:"rate limiter configuration"` RateLimiter RateLimiterConfig `help:"rate limiter configuration"`
UploadLimiter UploadLimiterConfig `help:"object upload limiter configuration"` UploadLimiter UploadLimiterConfig `help:"object upload limiter configuration"`
ProjectLimits ProjectLimitConfig `help:"project limit configuration"` ProjectLimits ProjectLimitConfig `help:"project limit configuration"`
// TODO remove this flag when server-side copy implementation will be finished // TODO remove this flag when server-side copy implementation will be finished
ServerSideCopy bool `help:"enable code for server-side copy, deprecated. please leave this to true." default:"true"` ServerSideCopy bool `help:"enable code for server-side copy, deprecated. please leave this to true." default:"true"`
ServerSideCopyDisabled bool `help:"disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy" default:"false"` ServerSideCopyDisabled bool `help:"disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy" default:"false"`
ServerSideCopyDuplicateMetadata bool `help:"perform server-side copy by duplicating metadata, instead of using segment_copies" default:"false"`
// TODO remove when we benchmarking are done and decision is made. // TODO remove when we benchmarking are done and decision is made.
TestListingQuery bool `default:"false" help:"test the new query for non-recursive listing"` TestListingQuery bool `default:"false" help:"test the new query for non-recursive listing"`
} }

View File

@ -71,7 +71,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Name) err = endpoint.validateBucketName(req.Name)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -83,7 +83,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
} else if exists { } else if exists {
// When the bucket exists, try to set the attribution. // When the bucket exists, try to set the attribution.
if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.GetName(), nil); err != nil { if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.GetName(), nil, true); err != nil {
return nil, err return nil, err
} }
return nil, rpcstatus.Error(rpcstatus.AlreadyExists, "bucket already exists") return nil, rpcstatus.Error(rpcstatus.AlreadyExists, "bucket already exists")
@ -119,7 +119,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
} }
// Once we have created the bucket, we can try setting the attribution. // Once we have created the bucket, we can try setting the attribution.
if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.GetName(), project.UserAgent); err != nil { if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.GetName(), project.UserAgent, true); err != nil {
return nil, err return nil, err
} }
@ -180,7 +180,7 @@ func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDelete
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Name) err = endpoint.validateBucketNameLength(req.Name)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }

View File

@ -115,18 +115,29 @@ func TestBucketNameValidation(t *testing.T) {
"192.168.1.234", "testBUCKET", "192.168.1.234", "testBUCKET",
"test/bucket", "test/bucket",
"testbucket-64-0123456789012345678901234567890123456789012345abcd", "testbucket-64-0123456789012345678901234567890123456789012345abcd",
"test\\", "test%",
} }
for _, name := range invalidNames { for _, name := range invalidNames {
_, err = metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
Bucket: []byte(name),
EncryptedObjectKey: []byte("123"),
})
require.Error(t, err, "bucket name: %v", name)
_, err = metainfoClient.CreateBucket(ctx, metaclient.CreateBucketParams{ _, err = metainfoClient.CreateBucket(ctx, metaclient.CreateBucketParams{
Name: []byte(name), Name: []byte(name),
}) })
require.Error(t, err, "bucket name: %v", name) require.Error(t, err, "bucket name: %v", name)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
}
invalidNames = []string{
"", "t", "te",
"testbucket-64-0123456789012345678901234567890123456789012345abcd",
}
for _, name := range invalidNames {
// BeginObject validates only bucket name length
_, err = metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
Bucket: []byte(name),
EncryptedObjectKey: []byte("123"),
})
require.Error(t, err, "bucket name: %v", name)
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
} }
}) })
} }

View File

@ -66,7 +66,8 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "Invalid expiration time") return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "Invalid expiration time")
} }
err = endpoint.validateBucket(ctx, req.Bucket) // we can do just basic name validation because later we are checking bucket in DB
err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -95,7 +96,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error()) return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
} }
if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.Bucket, nil); err != nil { if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.Bucket, nil, false); err != nil {
return nil, err return nil, err
} }
@ -310,7 +311,7 @@ func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetReques
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -409,7 +410,7 @@ func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDown
return nil, err return nil, err
} }
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -804,7 +805,7 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -944,7 +945,7 @@ func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1057,7 +1058,7 @@ func (endpoint *Endpoint) BeginDeleteObject(ctx context.Context, req *pb.ObjectB
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1146,7 +1147,7 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1237,7 +1238,7 @@ func (endpoint *Endpoint) UpdateObjectMetadata(ctx context.Context, req *pb.Obje
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.Bucket) err = endpoint.validateBucketNameLength(req.Bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1499,50 +1500,6 @@ func (endpoint *Endpoint) DeleteCommittedObject(
return deletedObjects, nil return deletedObjects, nil
} }
// DeleteObjectAnyStatus deletes all the pieces of the storage nodes that belongs
// to the specified object.
//
// NOTE: this method is exported for being able to individually test it without
// having import cycles.
// TODO regarding the above note: exporting for testing is fine, but we should name
// it something that will definitely never ever be added to the rpc set in DRPC
// definitions. If we ever decide to add an RPC method called "DeleteObjectAnyStatus",
// DRPC interface definitions is all that is standing in the way from someone
// remotely calling this. We should name this InternalDeleteObjectAnyStatus or
// something.
func (endpoint *Endpoint) DeleteObjectAnyStatus(ctx context.Context, location metabase.ObjectLocation,
) (deletedObjects []*pb.Object, err error) {
defer mon.Task()(&ctx, location.ProjectID.String(), location.BucketName, location.ObjectKey)(&err)
var result metabase.DeleteObjectResult
if endpoint.config.ServerSideCopy {
result, err = endpoint.metabase.DeleteObjectExactVersion(ctx, metabase.DeleteObjectExactVersion{
ObjectLocation: location,
Version: metabase.DefaultVersion,
})
} else {
result, err = endpoint.metabase.DeleteObjectAnyStatusAllVersions(ctx, metabase.DeleteObjectAnyStatusAllVersions{
ObjectLocation: location,
})
}
if err != nil {
return nil, Error.Wrap(err)
}
deletedObjects, err = endpoint.deleteObjectResultToProto(ctx, result)
if err != nil {
endpoint.log.Error("failed to convert delete object result",
zap.Stringer("project", location.ProjectID),
zap.String("bucket", location.BucketName),
zap.Binary("object", []byte(location.ObjectKey)),
zap.Error(err),
)
return nil, err
}
return deletedObjects, nil
}
// DeletePendingObject deletes all the pieces of the storage nodes that belongs // DeletePendingObject deletes all the pieces of the storage nodes that belongs
// to the specified pending object. // to the specified pending object.
// //
@ -1615,7 +1572,7 @@ func (endpoint *Endpoint) BeginMoveObject(ctx context.Context, req *pb.ObjectBeg
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
for _, bucket := range [][]byte{req.Bucket, req.NewBucket} { for _, bucket := range [][]byte{req.Bucket, req.NewBucket} {
err = endpoint.validateBucket(ctx, bucket) err = endpoint.validateBucketNameLength(bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1761,7 +1718,7 @@ func (endpoint *Endpoint) FinishMoveObject(ctx context.Context, req *pb.ObjectFi
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.NewBucket) err = endpoint.validateBucketNameLength(req.NewBucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1840,7 +1797,7 @@ func (endpoint *Endpoint) BeginCopyObject(ctx context.Context, req *pb.ObjectBeg
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
for _, bucket := range [][]byte{req.Bucket, req.NewBucket} { for _, bucket := range [][]byte{req.Bucket, req.NewBucket} {
err = endpoint.validateBucket(ctx, bucket) err = endpoint.validateBucketNameLength(bucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1952,7 +1909,7 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
} }
endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req)) endpoint.usageTracking(keyInfo, req.Header, fmt.Sprintf("%T", req))
err = endpoint.validateBucket(ctx, req.NewBucket) err = endpoint.validateBucketNameLength(req.NewBucket)
if err != nil { if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
} }
@ -1995,6 +1952,7 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
NewEncryptedMetadata: req.NewEncryptedMetadata, NewEncryptedMetadata: req.NewEncryptedMetadata,
NewEncryptedMetadataKeyNonce: req.NewEncryptedMetadataKeyNonce, NewEncryptedMetadataKeyNonce: req.NewEncryptedMetadataKeyNonce,
NewEncryptedMetadataKey: req.NewEncryptedMetadataKey, NewEncryptedMetadataKey: req.NewEncryptedMetadataKey,
DuplicateMetadata: endpoint.config.ServerSideCopyDuplicateMetadata,
VerifyLimits: func(encryptedObjectSize int64, nSegments int64) error { VerifyLimits: func(encryptedObjectSize int64, nSegments int64) error {
return endpoint.addStorageUsageUpToLimit(ctx, keyInfo.ProjectID, encryptedObjectSize, nSegments) return endpoint.addStorageUsageUpToLimit(ctx, keyInfo.ProjectID, encryptedObjectSize, nSegments)
}, },

View File

@ -22,7 +22,9 @@ import (
"storj.io/common/errs2" "storj.io/common/errs2"
"storj.io/common/identity" "storj.io/common/identity"
"storj.io/common/identity/testidentity"
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/nodetag"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/rpc/rpcstatus" "storj.io/common/rpc/rpcstatus"
"storj.io/common/signing" "storj.io/common/signing"
@ -37,6 +39,9 @@ import (
"storj.io/storj/satellite/internalpb" "storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/contact"
"storj.io/uplink" "storj.io/uplink"
"storj.io/uplink/private/metaclient" "storj.io/uplink/private/metaclient"
"storj.io/uplink/private/object" "storj.io/uplink/private/object"
@ -1585,59 +1590,6 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
testDeleteObject(t, createPendingObject, deletePendingObject) testDeleteObject(t, createPendingObject, deletePendingObject)
} }
func TestEndpoint_DeleteObjectAnyStatus(t *testing.T) {
createCommittedObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte) {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucket, key, data)
require.NoError(t, err)
}
deleteCommittedObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID) {
projectID := planet.Uplinks[0].Projects[0].ID
deletedObjects, err := planet.Satellites[0].Metainfo.Endpoint.DeleteObjectAnyStatus(ctx, metabase.ObjectLocation{
ProjectID: projectID,
BucketName: bucket,
ObjectKey: metabase.ObjectKey(encryptedKey),
})
require.NoError(t, err)
require.Len(t, deletedObjects, 1)
}
testDeleteObject(t, createCommittedObject, deleteCommittedObject)
createPendingObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte) {
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
require.NoError(t, err, "failed to retrieve project")
defer func() { require.NoError(t, project.Close()) }()
_, err = project.EnsureBucket(ctx, bucket)
require.NoError(t, err, "failed to create bucket")
info, err := project.BeginUpload(ctx, bucket, key, &uplink.UploadOptions{})
require.NoError(t, err, "failed to start multipart upload")
upload, err := project.UploadPart(ctx, bucket, key, info.UploadID, 1)
require.NoError(t, err, "failed to put object part")
_, err = upload.Write(data)
require.NoError(t, err, "failed to start multipart upload")
require.NoError(t, upload.Commit(), "failed to start multipart upload")
}
deletePendingObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID) {
projectID := planet.Uplinks[0].Projects[0].ID
deletedObjects, err := planet.Satellites[0].Metainfo.Endpoint.DeleteObjectAnyStatus(ctx, metabase.ObjectLocation{
ProjectID: projectID,
BucketName: bucket,
ObjectKey: metabase.ObjectKey(encryptedKey),
})
require.NoError(t, err)
require.Len(t, deletedObjects, 1)
}
testDeleteObject(t, createPendingObject, deletePendingObject)
}
func testDeleteObject(t *testing.T, func testDeleteObject(t *testing.T,
createObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte), createObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte),
deleteObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID), deleteObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID),
@ -2450,3 +2402,100 @@ func TestListUploads(t *testing.T) {
require.Equal(t, 1000, items) require.Equal(t, 1000, items)
}) })
} }
func TestPlacements(t *testing.T) {
ctx := testcontext.New(t)
satelliteIdentity := signing.SignerFromFullIdentity(testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion()))
placementRules := overlay.ConfigurablePlacementRule{}
err := placementRules.Set(fmt.Sprintf(`16:tag("%s", "certified","true")`, satelliteIdentity.ID()))
require.NoError(t, err)
testplanet.Run(t,
testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 12,
UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Min = 3
config.Metainfo.RS.Repair = 4
config.Metainfo.RS.Success = 5
config.Metainfo.RS.Total = 6
config.Metainfo.MaxInlineSegmentSize = 1
config.Placement = placementRules
},
StorageNode: func(index int, config *storagenode.Config) {
if index%2 == 0 {
tags := &pb.NodeTagSet{
NodeId: testidentity.MustPregeneratedSignedIdentity(index+1, storj.LatestIDVersion()).ID.Bytes(),
Timestamp: time.Now().Unix(),
Tags: []*pb.Tag{
{
Name: "certified",
Value: []byte("true"),
},
},
}
signed, err := nodetag.Sign(ctx, tags, satelliteIdentity)
require.NoError(t, err)
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
Tags: []*pb.SignedNodeTagSet{
signed,
},
})
}
},
},
},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
buckets := satellite.API.Buckets.Service
uplink := planet.Uplinks[0]
projectID := uplink.Projects[0].ID
// create buckets with different placement (placement 16 is configured above)
createGeofencedBucket(t, ctx, buckets, projectID, "constrained", 16)
objectNo := 10
for i := 0; i < objectNo; i++ {
// upload an object to one of the global buckets
err := uplink.Upload(ctx, satellite, "constrained", "testobject"+strconv.Itoa(i), make([]byte, 10240))
require.NoError(t, err)
}
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := uplink.DialMetainfo(ctx, satellite, apiKey)
require.NoError(t, err)
defer func() {
_ = metainfoClient.Close()
}()
objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
Bucket: []byte("constrained"),
})
require.NoError(t, err)
require.Len(t, objects, objectNo)
for _, listedObject := range objects {
o, err := metainfoClient.DownloadObject(ctx, metaclient.DownloadObjectParams{
Bucket: []byte("constrained"),
EncryptedObjectKey: listedObject.EncryptedObjectKey,
})
require.NoError(t, err)
for _, limit := range o.DownloadedSegments[0].Limits {
if limit != nil {
// starting from 2 (first identity used for satellite, SN with even index are fine)
for i := 2; i < 11; i += 2 {
require.NotEqual(t, testidentity.MustPregeneratedSignedIdentity(i, storj.LatestIDVersion()).ID, limit.Limit.StorageNodeId)
}
}
}
}
},
)
}

View File

@ -247,9 +247,7 @@ func (endpoint *Endpoint) checkRate(ctx context.Context, projectID uuid.UUID) (e
return nil return nil
} }
func (endpoint *Endpoint) validateBucket(ctx context.Context, bucket []byte) (err error) { func (endpoint *Endpoint) validateBucketNameLength(bucket []byte) (err error) {
defer mon.Task()(&ctx)(&err)
if len(bucket) == 0 { if len(bucket) == 0 {
return Error.Wrap(buckets.ErrNoBucket.New("")) return Error.Wrap(buckets.ErrNoBucket.New(""))
} }
@ -258,11 +256,19 @@ func (endpoint *Endpoint) validateBucket(ctx context.Context, bucket []byte) (er
return Error.New("bucket name must be at least 3 and no more than 63 characters long") return Error.New("bucket name must be at least 3 and no more than 63 characters long")
} }
return nil
}
func (endpoint *Endpoint) validateBucketName(bucket []byte) error {
if err := endpoint.validateBucketNameLength(bucket); err != nil {
return err
}
// Regexp not used because benchmark shows it will be slower for valid bucket names // Regexp not used because benchmark shows it will be slower for valid bucket names
// https://gist.github.com/mniewrzal/49de3af95f36e63e88fac24f565e444c // https://gist.github.com/mniewrzal/49de3af95f36e63e88fac24f565e444c
labels := bytes.Split(bucket, []byte(".")) labels := bytes.Split(bucket, []byte("."))
for _, label := range labels { for _, label := range labels {
err = validateBucketLabel(label) err := validateBucketLabel(label)
if err != nil { if err != nil {
return err return err
} }
@ -284,8 +290,8 @@ func validateBucketLabel(label []byte) error {
return Error.New("bucket label must start with a lowercase letter or number") return Error.New("bucket label must start with a lowercase letter or number")
} }
if label[0] == '-' || label[len(label)-1] == '-' { if !isLowerLetter(label[len(label)-1]) && !isDigit(label[len(label)-1]) {
return Error.New("bucket label cannot start or end with a hyphen") return Error.New("bucket label must end with a lowercase letter or number")
} }
for i := 1; i < len(label)-1; i++ { for i := 1; i < len(label)-1; i++ {

View File

@ -2,7 +2,7 @@
// See LICENSE for copying information. // See LICENSE for copying information.
// Package uploadselection implements node selection logic for uploads. // Package uploadselection implements node selection logic for uploads.
package uploadselection package nodeselection
import ( import (
"github.com/spacemonkeygo/monkit/v3" "github.com/spacemonkeygo/monkit/v3"

View File

@ -0,0 +1,181 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeselection
import (
"bytes"
"storj.io/common/storj"
"storj.io/common/storj/location"
)
// NodeFilter can decide if a Node should be part of the selection or not.
type NodeFilter interface {
MatchInclude(node *SelectedNode) bool
}
// NodeFilters is a collection of multiple node filters (all should vote with true).
type NodeFilters []NodeFilter
// NodeFilterFunc is helper to use func as NodeFilter.
type NodeFilterFunc func(node *SelectedNode) bool
// MatchInclude implements NodeFilter interface.
func (n NodeFilterFunc) MatchInclude(node *SelectedNode) bool {
return n(node)
}
// ExcludeAllFilter will never select any node.
type ExcludeAllFilter struct{}
// MatchInclude implements NodeFilter interface.
func (ExcludeAllFilter) MatchInclude(node *SelectedNode) bool { return false }
// MatchInclude implements NodeFilter interface.
func (n NodeFilters) MatchInclude(node *SelectedNode) bool {
for _, filter := range n {
if !filter.MatchInclude(node) {
return false
}
}
return true
}
// WithCountryFilter is a helper to create a new filter with additional CountryFilter.
func (n NodeFilters) WithCountryFilter(permit location.Set) NodeFilters {
return append(n, NewCountryFilter(permit))
}
// WithAutoExcludeSubnets is a helper to create a new filter with additional AutoExcludeSubnets.
func (n NodeFilters) WithAutoExcludeSubnets() NodeFilters {
return append(n, NewAutoExcludeSubnets())
}
// WithExcludedIDs is a helper to create a new filter with additional WithExcludedIDs.
func (n NodeFilters) WithExcludedIDs(ds []storj.NodeID) NodeFilters {
return append(n, ExcludedIDs(ds))
}
var _ NodeFilter = NodeFilters{}
// CountryCodeExclude is a specific CountryFilter which excludes all nodes with the given country code.
type CountryCodeExclude []location.CountryCode
// MatchInclude implements NodeFilter interface.
func (c CountryCodeExclude) MatchInclude(node *SelectedNode) bool {
for _, code := range c {
if code == location.None {
continue
}
if node.CountryCode == code {
return false
}
}
return true
}
var _ NodeFilter = CountryCodeExclude{}
// CountryFilter can select nodes based on the condition of the country code.
type CountryFilter struct {
permit location.Set
}
// NewCountryFilter creates a new CountryFilter.
func NewCountryFilter(permit location.Set) NodeFilter {
return &CountryFilter{
permit: permit,
}
}
// MatchInclude implements NodeFilter interface.
func (p *CountryFilter) MatchInclude(node *SelectedNode) bool {
return p.permit.Contains(node.CountryCode)
}
var _ NodeFilter = &CountryFilter{}
// AutoExcludeSubnets pick at most one node from network.
//
// Stateful!!! should be re-created for each new selection request.
// It should only be used as the last filter.
type AutoExcludeSubnets struct {
seenSubnets map[string]struct{}
}
// NewAutoExcludeSubnets creates an initialized AutoExcludeSubnets.
func NewAutoExcludeSubnets() *AutoExcludeSubnets {
return &AutoExcludeSubnets{
seenSubnets: map[string]struct{}{},
}
}
// MatchInclude implements NodeFilter interface.
func (a *AutoExcludeSubnets) MatchInclude(node *SelectedNode) bool {
if _, found := a.seenSubnets[node.LastNet]; found {
return false
}
a.seenSubnets[node.LastNet] = struct{}{}
return true
}
var _ NodeFilter = &AutoExcludeSubnets{}
// ExcludedNetworks will exclude nodes with specified networks.
type ExcludedNetworks []string
// MatchInclude implements NodeFilter interface.
func (e ExcludedNetworks) MatchInclude(node *SelectedNode) bool {
for _, id := range e {
if id == node.LastNet {
return false
}
}
return true
}
var _ NodeFilter = ExcludedNetworks{}
// ExcludedIDs can blacklist NodeIDs.
type ExcludedIDs []storj.NodeID
// MatchInclude implements NodeFilter interface.
func (e ExcludedIDs) MatchInclude(node *SelectedNode) bool {
for _, id := range e {
if id == node.ID {
return false
}
}
return true
}
var _ NodeFilter = ExcludedIDs{}
// TagFilter matches nodes with specific tags.
type TagFilter struct {
signer storj.NodeID
name string
value []byte
}
// NewTagFilter creates a new tag filter.
func NewTagFilter(id storj.NodeID, name string, value []byte) TagFilter {
return TagFilter{
signer: id,
name: name,
value: value,
}
}
// MatchInclude implements NodeFilter interface.
func (t TagFilter) MatchInclude(node *SelectedNode) bool {
for _, tag := range node.Tags {
if tag.Name == t.name && bytes.Equal(tag.Value, t.value) && tag.Signer == t.signer {
return true
}
}
return false
}
var _ NodeFilter = TagFilter{}

View File

@ -0,0 +1,175 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeselection
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/identity/testidentity"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testcontext"
"storj.io/common/testrand"
)
func TestNodeFilter_AutoExcludeSubnet(t *testing.T) {
criteria := NodeFilters{}.WithAutoExcludeSubnets()
assert.True(t, criteria.MatchInclude(&SelectedNode{
LastNet: "192.168.0.1",
}))
assert.False(t, criteria.MatchInclude(&SelectedNode{
LastNet: "192.168.0.1",
}))
assert.True(t, criteria.MatchInclude(&SelectedNode{
LastNet: "192.168.1.1",
}))
}
func TestCriteria_ExcludeNodeID(t *testing.T) {
included := testrand.NodeID()
excluded := testrand.NodeID()
criteria := NodeFilters{}.WithExcludedIDs([]storj.NodeID{excluded})
assert.False(t, criteria.MatchInclude(&SelectedNode{
ID: excluded,
}))
assert.True(t, criteria.MatchInclude(&SelectedNode{
ID: included,
}))
}
func TestCriteria_NodeIDAndSubnet(t *testing.T) {
excluded := testrand.NodeID()
criteria := NodeFilters{}.
WithExcludedIDs([]storj.NodeID{excluded}).
WithAutoExcludeSubnets()
// due to node id criteria
assert.False(t, criteria.MatchInclude(&SelectedNode{
ID: excluded,
LastNet: "192.168.0.1",
}))
// should be included as previous one excluded and
// not stored for subnet exclusion
assert.True(t, criteria.MatchInclude(&SelectedNode{
ID: testrand.NodeID(),
LastNet: "192.168.0.2",
}))
}
func TestCriteria_Geofencing(t *testing.T) {
eu := NodeFilters{}.WithCountryFilter(EuCountries)
us := NodeFilters{}.WithCountryFilter(location.NewSet(location.UnitedStates))
cases := []struct {
name string
country location.CountryCode
criteria NodeFilters
expected bool
}{
{
name: "US matches US selector",
country: location.UnitedStates,
criteria: us,
expected: true,
},
{
name: "Germany is EU",
country: location.Germany,
criteria: eu,
expected: true,
},
{
name: "US is not eu",
country: location.UnitedStates,
criteria: eu,
expected: false,
},
{
name: "Empty country doesn't match region",
country: location.CountryCode(0),
criteria: eu,
expected: false,
},
{
name: "Empty country doesn't match country",
country: location.CountryCode(0),
criteria: us,
expected: false,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
assert.Equal(t, c.expected, c.criteria.MatchInclude(&SelectedNode{
CountryCode: c.country,
}))
})
}
}
// BenchmarkNodeFilterFullTable checks performances of rule evaluation on ALL storage nodes.
func BenchmarkNodeFilterFullTable(b *testing.B) {
filters := NodeFilters{}
filters = append(filters, NodeFilterFunc(func(node *SelectedNode) bool {
return true
}))
filters = append(filters, NodeFilterFunc(func(node *SelectedNode) bool {
return true
}))
filters = append(filters, NodeFilterFunc(func(node *SelectedNode) bool {
return true
}))
filters = filters.WithAutoExcludeSubnets()
benchmarkFilter(b, filters)
}
func benchmarkFilter(b *testing.B, filters NodeFilters) {
nodeNo := 25000
if testing.Short() {
nodeNo = 20
}
nodes := generatedSelectedNodes(b, nodeNo)
b.ResetTimer()
c := 0
for j := 0; j < b.N; j++ {
for n := 0; n < len(nodes); n++ {
if filters.MatchInclude(nodes[n]) {
c++
}
}
}
}
func generatedSelectedNodes(b *testing.B, nodeNo int) []*SelectedNode {
nodes := make([]*SelectedNode, nodeNo)
ctx := testcontext.New(b)
for i := 0; i < nodeNo; i++ {
node := SelectedNode{}
identity, err := testidentity.NewTestIdentity(ctx)
require.NoError(b, err)
node.ID = identity.ID
node.LastNet = fmt.Sprintf("192.168.%d.0", i%256)
node.LastIPPort = fmt.Sprintf("192.168.%d.%d:%d", i%256, i%65536, i%1000+1000)
node.CountryCode = []location.CountryCode{location.None, location.UnitedStates, location.Germany, location.Hungary, location.Austria}[i%5]
nodes[i] = &node
}
return nodes
}

View File

@ -0,0 +1,69 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeselection
import (
"time"
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/storj/location"
)
// NodeTag is a tag associated with a node (approved by signer).
type NodeTag struct {
NodeID storj.NodeID
SignedAt time.Time
Signer storj.NodeID
Name string
Value []byte
}
// NodeTags is a collection of multiple NodeTag.
type NodeTags []NodeTag
// FindBySignerAndName selects first tag with same name / NodeID.
func (n NodeTags) FindBySignerAndName(signer storj.NodeID, name string) (NodeTag, error) {
for _, tag := range n {
if tag.Name == name && signer == tag.Signer {
return tag, nil
}
}
return NodeTag{}, errs.New("tags not found")
}
// SelectedNode is used as a result for creating orders limits.
type SelectedNode struct {
ID storj.NodeID
Address *pb.NodeAddress
LastNet string
LastIPPort string
CountryCode location.CountryCode
Tags NodeTags
}
// Clone returns a deep clone of the selected node.
func (node *SelectedNode) Clone() *SelectedNode {
copy := pb.CopyNode(&pb.Node{Id: node.ID, Address: node.Address})
tags := make([]NodeTag, len(node.Tags))
for ix, tag := range node.Tags {
tags[ix] = NodeTag{
NodeID: tag.NodeID,
SignedAt: tag.SignedAt,
Signer: tag.Signer,
Name: tag.Name,
Value: tag.Value,
}
}
return &SelectedNode{
ID: copy.Id,
Address: copy.Address,
LastNet: node.LastNet,
LastIPPort: node.LastIPPort,
CountryCode: node.CountryCode,
Tags: tags,
}
}

View File

@ -0,0 +1,44 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeselection
import "storj.io/common/storj/location"
// EuCountries defines the member countries of European Union.
var EuCountries = location.NewSet(
location.Austria,
location.Belgium,
location.Bulgaria,
location.Croatia,
location.Cyprus,
location.Czechia,
location.Denmark,
location.Estonia,
location.Finland,
location.France,
location.Germany,
location.Greece,
location.Hungary,
location.Ireland,
location.Italy,
location.Lithuania,
location.Latvia,
location.Luxembourg,
location.Malta,
location.Netherlands,
location.Poland,
location.Portugal,
location.Romania,
location.Slovenia,
location.Slovakia,
location.Spain,
location.Sweden,
)
// EeaCountries defined the EEA countries.
var EeaCountries = EuCountries.With(
location.Iceland,
location.Liechtenstein,
location.Norway,
)

View File

@ -1,14 +1,14 @@
// Copyright (C) 2020 Storj Labs, Inc. // Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
package uploadselection package nodeselection
import ( import (
mathrand "math/rand" // Using mathrand here because crypto-graphic randomness is not required and simplifies code. mathrand "math/rand" // Using mathrand here because crypto-graphic randomness is not required and simplifies code.
) )
// SelectByID implements selection from nodes with every node having equal probability. // SelectByID implements selection from nodes with every node having equal probability.
type SelectByID []*Node type SelectByID []*SelectedNode
var _ Selector = (SelectByID)(nil) var _ Selector = (SelectByID)(nil)
@ -16,16 +16,16 @@ var _ Selector = (SelectByID)(nil)
func (nodes SelectByID) Count() int { return len(nodes) } func (nodes SelectByID) Count() int { return len(nodes) }
// Select selects upto n nodes. // Select selects upto n nodes.
func (nodes SelectByID) Select(n int, criteria Criteria) []*Node { func (nodes SelectByID) Select(n int, nodeFilter NodeFilter) []*SelectedNode {
if n <= 0 { if n <= 0 {
return nil return nil
} }
selected := []*Node{} selected := []*SelectedNode{}
for _, idx := range mathrand.Perm(len(nodes)) { for _, idx := range mathrand.Perm(len(nodes)) {
node := nodes[idx] node := nodes[idx]
if !criteria.MatchInclude(node) { if !nodeFilter.MatchInclude(node) {
continue continue
} }
@ -46,12 +46,12 @@ var _ Selector = (SelectBySubnet)(nil)
// Subnet groups together nodes with the same subnet. // Subnet groups together nodes with the same subnet.
type Subnet struct { type Subnet struct {
Net string Net string
Nodes []*Node Nodes []*SelectedNode
} }
// SelectBySubnetFromNodes creates SelectBySubnet selector from nodes. // SelectBySubnetFromNodes creates SelectBySubnet selector from nodes.
func SelectBySubnetFromNodes(nodes []*Node) SelectBySubnet { func SelectBySubnetFromNodes(nodes []*SelectedNode) SelectBySubnet {
bynet := map[string][]*Node{} bynet := map[string][]*SelectedNode{}
for _, node := range nodes { for _, node := range nodes {
bynet[node.LastNet] = append(bynet[node.LastNet], node) bynet[node.LastNet] = append(bynet[node.LastNet], node)
} }
@ -71,17 +71,17 @@ func SelectBySubnetFromNodes(nodes []*Node) SelectBySubnet {
func (subnets SelectBySubnet) Count() int { return len(subnets) } func (subnets SelectBySubnet) Count() int { return len(subnets) }
// Select selects upto n nodes. // Select selects upto n nodes.
func (subnets SelectBySubnet) Select(n int, criteria Criteria) []*Node { func (subnets SelectBySubnet) Select(n int, filter NodeFilter) []*SelectedNode {
if n <= 0 { if n <= 0 {
return nil return nil
} }
selected := []*Node{} selected := []*SelectedNode{}
for _, idx := range mathrand.Perm(len(subnets)) { for _, idx := range mathrand.Perm(len(subnets)) {
subnet := subnets[idx] subnet := subnets[idx]
node := subnet.Nodes[mathrand.Intn(len(subnet.Nodes))] node := subnet.Nodes[mathrand.Intn(len(subnet.Nodes))]
if !criteria.MatchInclude(node) { if !filter.MatchInclude(node) {
continue continue
} }

View File

@ -1,7 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc. // Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
package uploadselection_test package nodeselection_test
import ( import (
"testing" "testing"
@ -12,7 +12,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/satellite/nodeselection/uploadselection" "storj.io/storj/satellite/nodeselection"
) )
func TestSelectByID(t *testing.T) { func TestSelectByID(t *testing.T) {
@ -24,35 +24,26 @@ func TestSelectByID(t *testing.T) {
// create 3 nodes, 2 with same subnet // create 3 nodes, 2 with same subnet
lastNetDuplicate := "1.0.1" lastNetDuplicate := "1.0.1"
subnetA1 := &uploadselection.Node{ subnetA1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".4:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".4:8080", LastIPPort: lastNetDuplicate + ".4:8080",
} }
subnetA2 := &uploadselection.Node{ subnetA2 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".5:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".5:8080", LastIPPort: lastNetDuplicate + ".5:8080",
} }
lastNetSingle := "1.0.2" lastNetSingle := "1.0.2"
subnetB1 := &uploadselection.Node{ subnetB1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetSingle + ".5:8080",
},
LastNet: lastNetSingle, LastNet: lastNetSingle,
LastIPPort: lastNetSingle + ".5:8080", LastIPPort: lastNetSingle + ".5:8080",
} }
nodes := []*uploadselection.Node{subnetA1, subnetA2, subnetB1} nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
selector := uploadselection.SelectByID(nodes) selector := nodeselection.SelectByID(nodes)
const ( const (
reqCount = 2 reqCount = 2
@ -63,7 +54,7 @@ func TestSelectByID(t *testing.T) {
// perform many node selections that selects 2 nodes // perform many node selections that selects 2 nodes
for i := 0; i < executionCount; i++ { for i := 0; i < executionCount; i++ {
selectedNodes := selector.Select(reqCount, uploadselection.Criteria{}) selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
require.Len(t, selectedNodes, reqCount) require.Len(t, selectedNodes, reqCount)
for _, node := range selectedNodes { for _, node := range selectedNodes {
selectedNodeCount[node.ID]++ selectedNodeCount[node.ID]++
@ -93,35 +84,26 @@ func TestSelectBySubnet(t *testing.T) {
// create 3 nodes, 2 with same subnet // create 3 nodes, 2 with same subnet
lastNetDuplicate := "1.0.1" lastNetDuplicate := "1.0.1"
subnetA1 := &uploadselection.Node{ subnetA1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".4:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".4:8080", LastIPPort: lastNetDuplicate + ".4:8080",
} }
subnetA2 := &uploadselection.Node{ subnetA2 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".5:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".5:8080", LastIPPort: lastNetDuplicate + ".5:8080",
} }
lastNetSingle := "1.0.2" lastNetSingle := "1.0.2"
subnetB1 := &uploadselection.Node{ subnetB1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetSingle + ".5:8080",
},
LastNet: lastNetSingle, LastNet: lastNetSingle,
LastIPPort: lastNetSingle + ".5:8080", LastIPPort: lastNetSingle + ".5:8080",
} }
nodes := []*uploadselection.Node{subnetA1, subnetA2, subnetB1} nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
selector := uploadselection.SelectBySubnetFromNodes(nodes) selector := nodeselection.SelectBySubnetFromNodes(nodes)
const ( const (
reqCount = 2 reqCount = 2
@ -132,7 +114,7 @@ func TestSelectBySubnet(t *testing.T) {
// perform many node selections that selects 2 nodes // perform many node selections that selects 2 nodes
for i := 0; i < executionCount; i++ { for i := 0; i < executionCount; i++ {
selectedNodes := selector.Select(reqCount, uploadselection.Criteria{}) selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
require.Len(t, selectedNodes, reqCount) require.Len(t, selectedNodes, reqCount)
for _, node := range selectedNodes { for _, node := range selectedNodes {
selectedNodeCount[node.ID]++ selectedNodeCount[node.ID]++
@ -174,35 +156,26 @@ func TestSelectBySubnetOneAtATime(t *testing.T) {
// create 3 nodes, 2 with same subnet // create 3 nodes, 2 with same subnet
lastNetDuplicate := "1.0.1" lastNetDuplicate := "1.0.1"
subnetA1 := &uploadselection.Node{ subnetA1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".4:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".4:8080", LastIPPort: lastNetDuplicate + ".4:8080",
} }
subnetA2 := &uploadselection.Node{ subnetA2 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetDuplicate + ".5:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".5:8080", LastIPPort: lastNetDuplicate + ".5:8080",
} }
lastNetSingle := "1.0.2" lastNetSingle := "1.0.2"
subnetB1 := &uploadselection.Node{ subnetB1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(),
Address: lastNetSingle + ".5:8080",
},
LastNet: lastNetSingle, LastNet: lastNetSingle,
LastIPPort: lastNetSingle + ".5:8080", LastIPPort: lastNetSingle + ".5:8080",
} }
nodes := []*uploadselection.Node{subnetA1, subnetA2, subnetB1} nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
selector := uploadselection.SelectBySubnetFromNodes(nodes) selector := nodeselection.SelectBySubnetFromNodes(nodes)
const ( const (
reqCount = 1 reqCount = 1
@ -213,7 +186,7 @@ func TestSelectBySubnetOneAtATime(t *testing.T) {
// perform many node selections that selects 1 node // perform many node selections that selects 1 node
for i := 0; i < executionCount; i++ { for i := 0; i < executionCount; i++ {
selectedNodes := selector.Select(reqCount, uploadselection.Criteria{}) selectedNodes := selector.Select(reqCount, nodeselection.NodeFilters{})
require.Len(t, selectedNodes, reqCount) require.Len(t, selectedNodes, reqCount)
for _, node := range selectedNodes { for _, node := range selectedNodes {
selectedNodeCount[node.ID]++ selectedNodeCount[node.ID]++
@ -247,49 +220,35 @@ func TestSelectFiltered(t *testing.T) {
// create 3 nodes, 2 with same subnet // create 3 nodes, 2 with same subnet
lastNetDuplicate := "1.0.1" lastNetDuplicate := "1.0.1"
firstID := testrand.NodeID() firstID := testrand.NodeID()
subnetA1 := &uploadselection.Node{ subnetA1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: firstID,
ID: firstID,
Address: lastNetDuplicate + ".4:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".4:8080", LastIPPort: lastNetDuplicate + ".4:8080",
} }
secondID := testrand.NodeID() secondID := testrand.NodeID()
subnetA2 := &uploadselection.Node{ subnetA2 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: secondID,
ID: secondID,
Address: lastNetDuplicate + ".5:8080",
},
LastNet: lastNetDuplicate, LastNet: lastNetDuplicate,
LastIPPort: lastNetDuplicate + ".5:8080", LastIPPort: lastNetDuplicate + ".5:8080",
} }
thirdID := testrand.NodeID() thirdID := testrand.NodeID()
lastNetSingle := "1.0.2" lastNetSingle := "1.0.2"
subnetB1 := &uploadselection.Node{ subnetB1 := &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: thirdID,
ID: thirdID,
Address: lastNetSingle + ".5:8080",
},
LastNet: lastNetSingle, LastNet: lastNetSingle,
LastIPPort: lastNetSingle + ".5:8080", LastIPPort: lastNetSingle + ".5:8080",
} }
nodes := []*uploadselection.Node{subnetA1, subnetA2, subnetB1} nodes := []*nodeselection.SelectedNode{subnetA1, subnetA2, subnetB1}
selector := uploadselection.SelectByID(nodes) selector := nodeselection.SelectByID(nodes)
assert.Len(t, selector.Select(3, uploadselection.Criteria{}), 3) assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}), 3)
assert.Len(t, selector.Select(3, uploadselection.Criteria{ExcludeNodeIDs: []storj.NodeID{firstID}}), 2) assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
assert.Len(t, selector.Select(3, uploadselection.Criteria{}), 3) assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}), 3)
assert.Len(t, selector.Select(3, uploadselection.Criteria{ExcludeNodeIDs: []storj.NodeID{firstID, secondID}}), 1) assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{firstID, secondID})), 1)
assert.Len(t, selector.Select(3, uploadselection.Criteria{ assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithAutoExcludeSubnets()), 2)
AutoExcludeSubnets: map[string]struct{}{}, assert.Len(t, selector.Select(3, nodeselection.NodeFilters{}.WithExcludedIDs([]storj.NodeID{thirdID}).WithAutoExcludeSubnets()), 1)
}), 2)
assert.Len(t, selector.Select(3, uploadselection.Criteria{
ExcludeNodeIDs: []storj.NodeID{thirdID},
AutoExcludeSubnets: map[string]struct{}{},
}), 1)
} }

View File

@ -1,7 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc. // Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
package uploadselection package nodeselection
import ( import (
"context" "context"
@ -10,7 +10,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/storj/location"
) )
// ErrNotEnoughNodes is when selecting nodes failed with the given parameters. // ErrNotEnoughNodes is when selecting nodes failed with the given parameters.
@ -42,11 +41,11 @@ type Selector interface {
Count() int Count() int
// Select selects up-to n nodes which are included by the criteria. // Select selects up-to n nodes which are included by the criteria.
// empty criteria includes all the nodes // empty criteria includes all the nodes
Select(n int, criteria Criteria) []*Node Select(n int, nodeFilter NodeFilter) []*SelectedNode
} }
// NewState returns a state based on the input. // NewState returns a state based on the input.
func NewState(reputableNodes, newNodes []*Node) *State { func NewState(reputableNodes, newNodes []*SelectedNode) *State {
state := &State{} state := &State{}
state.netByID = map[storj.NodeID]string{} state.netByID = map[storj.NodeID]string{}
@ -70,15 +69,13 @@ func NewState(reputableNodes, newNodes []*Node) *State {
// Request contains arguments for State.Request. // Request contains arguments for State.Request.
type Request struct { type Request struct {
Count int Count int
NewFraction float64 NewFraction float64
ExcludedIDs []storj.NodeID NodeFilters NodeFilters
Placement storj.PlacementConstraint
ExcludedCountryCodes []string
} }
// Select selects requestedCount nodes where there will be newFraction nodes. // Select selects requestedCount nodes where there will be newFraction nodes.
func (state *State) Select(ctx context.Context, request Request) (_ []*Node, err error) { func (state *State) Select(ctx context.Context, request Request) (_ []*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
state.mu.RLock() state.mu.RLock()
@ -87,41 +84,23 @@ func (state *State) Select(ctx context.Context, request Request) (_ []*Node, err
totalCount := request.Count totalCount := request.Count
newCount := int(float64(totalCount) * request.NewFraction) newCount := int(float64(totalCount) * request.NewFraction)
var selected []*Node var selected []*SelectedNode
var reputableNodes Selector var reputableNodes Selector
var newNodes Selector var newNodes Selector
var criteria Criteria
if request.ExcludedIDs != nil {
criteria.ExcludeNodeIDs = request.ExcludedIDs
}
for _, code := range request.ExcludedCountryCodes {
criteria.ExcludedCountryCodes = append(criteria.ExcludedCountryCodes, location.ToCountryCode(code))
}
criteria.Placement = request.Placement
criteria.AutoExcludeSubnets = make(map[string]struct{})
for _, id := range request.ExcludedIDs {
if net, ok := state.netByID[id]; ok {
criteria.AutoExcludeSubnets[net] = struct{}{}
}
}
reputableNodes = state.distinct.Reputable reputableNodes = state.distinct.Reputable
newNodes = state.distinct.New newNodes = state.distinct.New
// Get a random selection of new nodes out of the cache first so that if there aren't // Get a random selection of new nodes out of the cache first so that if there aren't
// enough new nodes on the network, we can fall back to using reputable nodes instead. // enough new nodes on the network, we can fall back to using reputable nodes instead.
selected = append(selected, selected = append(selected,
newNodes.Select(newCount, criteria)...) newNodes.Select(newCount, request.NodeFilters)...)
// Get all the remaining reputable nodes. // Get all the remaining reputable nodes.
reputableCount := totalCount - len(selected) reputableCount := totalCount - len(selected)
selected = append(selected, selected = append(selected,
reputableNodes.Select(reputableCount, criteria)...) reputableNodes.Select(reputableCount, request.NodeFilters)...)
if len(selected) < totalCount { if len(selected) < totalCount {
return selected, ErrNotEnoughNodes.New("requested from cache %d, found %d", totalCount, len(selected)) return selected, ErrNotEnoughNodes.New("requested from cache %d, found %d", totalCount, len(selected))
@ -136,3 +115,19 @@ func (state *State) Stats() Stats {
return state.stats return state.stats
} }
// ExcludeNetworksBasedOnNodes will create a NodeFilter which exclude all nodes which shares subnet with the specified ones.
func (state *State) ExcludeNetworksBasedOnNodes(ds []storj.NodeID) NodeFilter {
uniqueExcludedNet := make(map[string]struct{}, len(ds))
for _, id := range ds {
net := state.netByID[id]
uniqueExcludedNet[net] = struct{}{}
}
excludedNet := make([]string, len(uniqueExcludedNet))
i := 0
for net := range uniqueExcludedNet {
excludedNet[i] = net
i++
}
return ExcludedNetworks(excludedNet)
}

View File

@ -1,7 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc. // Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information. // See LICENSE for copying information.
package uploadselection_test package nodeselection_test
import ( import (
"strconv" "strconv"
@ -10,10 +10,9 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/satellite/nodeselection/uploadselection" "storj.io/storj/satellite/nodeselection"
) )
func TestState_SelectNonDistinct(t *testing.T) { func TestState_SelectNonDistinct(t *testing.T) {
@ -29,18 +28,17 @@ func TestState_SelectNonDistinct(t *testing.T) {
createRandomNodes(3, "1.0.4", false), createRandomNodes(3, "1.0.4", false),
) )
state := uploadselection.NewState(reputableNodes, newNodes) state := nodeselection.NewState(reputableNodes, newNodes)
require.Equal(t, uploadselection.Stats{ require.Equal(t, nodeselection.Stats{
New: 5, New: 5,
Reputable: 5, Reputable: 5,
}, state.Stats()) }, state.Stats())
{ // select 5 non-distinct subnet reputable nodes { // select 5 non-distinct subnet reputable nodes
const selectCount = 5 const selectCount = 5
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: 0, NewFraction: 0,
ExcludedIDs: nil,
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, selected, selectCount) require.Len(t, selected, selectCount)
@ -49,10 +47,9 @@ func TestState_SelectNonDistinct(t *testing.T) {
{ // select 6 non-distinct subnet reputable and new nodes (50%) { // select 6 non-distinct subnet reputable and new nodes (50%)
const selectCount = 6 const selectCount = 6
const newFraction = 0.5 const newFraction = 0.5
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: newFraction, NewFraction: newFraction,
ExcludedIDs: nil,
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, selected, selectCount) require.Len(t, selected, selectCount)
@ -63,10 +60,9 @@ func TestState_SelectNonDistinct(t *testing.T) {
{ // select 10 distinct subnet reputable and new nodes (100%), falling back to 5 reputable { // select 10 distinct subnet reputable and new nodes (100%), falling back to 5 reputable
const selectCount = 10 const selectCount = 10
const newFraction = 1.0 const newFraction = 1.0
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: newFraction, NewFraction: newFraction,
ExcludedIDs: nil,
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, selected, selectCount) require.Len(t, selected, selectCount)
@ -88,18 +84,17 @@ func TestState_SelectDistinct(t *testing.T) {
createRandomNodes(3, "1.0.4", true), createRandomNodes(3, "1.0.4", true),
) )
state := uploadselection.NewState(reputableNodes, newNodes) state := nodeselection.NewState(reputableNodes, newNodes)
require.Equal(t, uploadselection.Stats{ require.Equal(t, nodeselection.Stats{
New: 2, New: 2,
Reputable: 2, Reputable: 2,
}, state.Stats()) }, state.Stats())
{ // select 2 distinct subnet reputable nodes { // select 2 distinct subnet reputable nodes
const selectCount = 2 const selectCount = 2
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: 0, NewFraction: 0,
ExcludedIDs: nil,
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, selected, selectCount) require.Len(t, selected, selectCount)
@ -107,10 +102,9 @@ func TestState_SelectDistinct(t *testing.T) {
{ // try to select 5 distinct subnet reputable nodes, but there are only two 2 in the state { // try to select 5 distinct subnet reputable nodes, but there are only two 2 in the state
const selectCount = 5 const selectCount = 5
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: 0, NewFraction: 0,
ExcludedIDs: nil,
}) })
require.Error(t, err) require.Error(t, err)
require.Len(t, selected, 2) require.Len(t, selected, 2)
@ -119,10 +113,9 @@ func TestState_SelectDistinct(t *testing.T) {
{ // select 4 distinct subnet reputable and new nodes (50%) { // select 4 distinct subnet reputable and new nodes (50%)
const selectCount = 4 const selectCount = 4
const newFraction = 0.5 const newFraction = 0.5
selected, err := state.Select(ctx, uploadselection.Request{ selected, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: newFraction, NewFraction: newFraction,
ExcludedIDs: nil,
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, selected, selectCount) require.Len(t, selected, selectCount)
@ -144,15 +137,14 @@ func TestState_Select_Concurrent(t *testing.T) {
createRandomNodes(3, "1.0.4", false), createRandomNodes(3, "1.0.4", false),
) )
state := uploadselection.NewState(reputableNodes, newNodes) state := nodeselection.NewState(reputableNodes, newNodes)
var group errgroup.Group var group errgroup.Group
group.Go(func() error { group.Go(func() error {
const selectCount = 5 const selectCount = 5
nodes, err := state.Select(ctx, uploadselection.Request{ nodes, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: 0.5, NewFraction: 0.5,
ExcludedIDs: nil,
}) })
require.Len(t, nodes, selectCount) require.Len(t, nodes, selectCount)
return err return err
@ -160,10 +152,9 @@ func TestState_Select_Concurrent(t *testing.T) {
group.Go(func() error { group.Go(func() error {
const selectCount = 4 const selectCount = 4
nodes, err := state.Select(ctx, uploadselection.Request{ nodes, err := state.Select(ctx, nodeselection.Request{
Count: selectCount, Count: selectCount,
NewFraction: 0.5, NewFraction: 0.5,
ExcludedIDs: nil,
}) })
require.Len(t, nodes, selectCount) require.Len(t, nodes, selectCount)
return err return err
@ -172,15 +163,13 @@ func TestState_Select_Concurrent(t *testing.T) {
} }
// createRandomNodes creates n random nodes all in the subnet. // createRandomNodes creates n random nodes all in the subnet.
func createRandomNodes(n int, subnet string, shareNets bool) []*uploadselection.Node { func createRandomNodes(n int, subnet string, shareNets bool) []*nodeselection.SelectedNode {
xs := make([]*uploadselection.Node, n) xs := make([]*nodeselection.SelectedNode, n)
for i := range xs { for i := range xs {
addr := subnet + "." + strconv.Itoa(i) + ":8080" addr := subnet + "." + strconv.Itoa(i) + ":8080"
xs[i] = &uploadselection.Node{ xs[i] = &nodeselection.SelectedNode{
NodeURL: storj.NodeURL{ ID: testrand.NodeID(),
ID: testrand.NodeID(), LastNet: addr,
Address: addr,
},
LastIPPort: addr, LastIPPort: addr,
} }
if shareNets { if shareNets {
@ -193,8 +182,8 @@ func createRandomNodes(n int, subnet string, shareNets bool) []*uploadselection.
} }
// joinNodes appends all slices into a single slice. // joinNodes appends all slices into a single slice.
func joinNodes(lists ...[]*uploadselection.Node) []*uploadselection.Node { func joinNodes(lists ...[]*nodeselection.SelectedNode) []*nodeselection.SelectedNode {
xs := []*uploadselection.Node{} xs := []*nodeselection.SelectedNode{}
for _, list := range lists { for _, list := range lists {
xs = append(xs, list...) xs = append(xs, list...)
} }
@ -202,8 +191,8 @@ func joinNodes(lists ...[]*uploadselection.Node) []*uploadselection.Node {
} }
// intersectLists returns nodes that exist in both lists compared by ID. // intersectLists returns nodes that exist in both lists compared by ID.
func intersectLists(as, bs []*uploadselection.Node) []*uploadselection.Node { func intersectLists(as, bs []*nodeselection.SelectedNode) []*nodeselection.SelectedNode {
var xs []*uploadselection.Node var xs []*nodeselection.SelectedNode
next: next:
for _, a := range as { for _, a := range as {

View File

@ -1,56 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information
package uploadselection
import (
"storj.io/common/storj"
"storj.io/common/storj/location"
)
// Criteria to filter nodes.
type Criteria struct {
ExcludeNodeIDs []storj.NodeID
AutoExcludeSubnets map[string]struct{} // initialize it with empty map to keep only one node per subnet.
Placement storj.PlacementConstraint
ExcludedCountryCodes []location.CountryCode
}
// MatchInclude returns with true if node is selected.
func (c *Criteria) MatchInclude(node *Node) bool {
if ContainsID(c.ExcludeNodeIDs, node.ID) {
return false
}
if !c.Placement.AllowedCountry(node.CountryCode) {
return false
}
if c.AutoExcludeSubnets != nil {
if _, excluded := c.AutoExcludeSubnets[node.LastNet]; excluded {
return false
}
c.AutoExcludeSubnets[node.LastNet] = struct{}{}
}
for _, code := range c.ExcludedCountryCodes {
if code.String() == "" {
continue
}
if node.CountryCode == code {
return false
}
}
return true
}
// ContainsID returns whether ids contain id.
func ContainsID(ids []storj.NodeID, id storj.NodeID) bool {
for _, k := range ids {
if k == id {
return true
}
}
return false
}

View File

@ -1,140 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information
package uploadselection
import (
"testing"
"github.com/stretchr/testify/assert"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testrand"
)
func TestCriteria_AutoExcludeSubnet(t *testing.T) {
criteria := Criteria{
AutoExcludeSubnets: map[string]struct{}{},
}
assert.True(t, criteria.MatchInclude(&Node{
LastNet: "192.168.0.1",
}))
assert.False(t, criteria.MatchInclude(&Node{
LastNet: "192.168.0.1",
}))
assert.True(t, criteria.MatchInclude(&Node{
LastNet: "192.168.1.1",
}))
}
func TestCriteria_ExcludeNodeID(t *testing.T) {
included := testrand.NodeID()
excluded := testrand.NodeID()
criteria := Criteria{
ExcludeNodeIDs: []storj.NodeID{excluded},
}
assert.False(t, criteria.MatchInclude(&Node{
NodeURL: storj.NodeURL{
ID: excluded,
Address: "localhost",
},
}))
assert.True(t, criteria.MatchInclude(&Node{
NodeURL: storj.NodeURL{
ID: included,
Address: "localhost",
},
}))
}
func TestCriteria_NodeIDAndSubnet(t *testing.T) {
excluded := testrand.NodeID()
criteria := Criteria{
ExcludeNodeIDs: []storj.NodeID{excluded},
AutoExcludeSubnets: map[string]struct{}{},
}
// due to node id criteria
assert.False(t, criteria.MatchInclude(&Node{
NodeURL: storj.NodeURL{
ID: excluded,
Address: "192.168.0.1",
},
}))
// should be included as previous one excluded and
// not stored for subnet exclusion
assert.True(t, criteria.MatchInclude(&Node{
NodeURL: storj.NodeURL{
ID: testrand.NodeID(),
Address: "192.168.0.2",
},
}))
}
func TestCriteria_Geofencing(t *testing.T) {
eu := Criteria{
Placement: storj.EU,
}
us := Criteria{
Placement: storj.US,
}
cases := []struct {
name string
country location.CountryCode
criteria Criteria
expected bool
}{
{
name: "US matches US selector",
country: location.UnitedStates,
criteria: us,
expected: true,
},
{
name: "Germany is EU",
country: location.Germany,
criteria: eu,
expected: true,
},
{
name: "US is not eu",
country: location.UnitedStates,
criteria: eu,
expected: false,
},
{
name: "Empty country doesn't match region",
country: location.CountryCode(0),
criteria: eu,
expected: false,
},
{
name: "Empty country doesn't match country",
country: location.CountryCode(0),
criteria: us,
expected: false,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
assert.Equal(t, c.expected, c.criteria.MatchInclude(&Node{
CountryCode: c.country,
}))
})
}
}

View File

@ -1,27 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package uploadselection
import (
"storj.io/common/storj"
"storj.io/common/storj/location"
)
// Node defines necessary information for node-selection.
type Node struct {
storj.NodeURL
LastNet string
LastIPPort string
CountryCode location.CountryCode
}
// Clone returns a deep clone of the selected node.
func (node *Node) Clone() *Node {
return &Node{
NodeURL: node.NodeURL,
LastNet: node.LastNet,
LastIPPort: node.LastIPPort,
CountryCode: node.CountryCode,
}
}

View File

@ -11,6 +11,7 @@ import (
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
storj "storj.io/common/storj" storj "storj.io/common/storj"
"storj.io/storj/satellite/nodeselection"
overlay "storj.io/storj/satellite/overlay" overlay "storj.io/storj/satellite/overlay"
) )
@ -38,10 +39,10 @@ func (m *MockOverlayForOrders) EXPECT() *MockOverlayForOrdersMockRecorder {
} }
// CachedGetOnlineNodesForGet mocks base method. // CachedGetOnlineNodesForGet mocks base method.
func (m *MockOverlayForOrders) CachedGetOnlineNodesForGet(arg0 context.Context, arg1 []storj.NodeID) (map[storj.NodeID]*overlay.SelectedNode, error) { func (m *MockOverlayForOrders) CachedGetOnlineNodesForGet(arg0 context.Context, arg1 []storj.NodeID) (map[storj.NodeID]*nodeselection.SelectedNode, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CachedGetOnlineNodesForGet", arg0, arg1) ret := m.ctrl.Call(m, "CachedGetOnlineNodesForGet", arg0, arg1)
ret0, _ := ret[0].(map[storj.NodeID]*overlay.SelectedNode) ret0, _ := ret[0].(map[storj.NodeID]*nodeselection.SelectedNode)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -18,6 +18,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/internalpb" "storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -43,7 +44,7 @@ type Config struct {
// //
//go:generate mockgen -destination mock_test.go -package orders . OverlayForOrders //go:generate mockgen -destination mock_test.go -package orders . OverlayForOrders
type Overlay interface { type Overlay interface {
CachedGetOnlineNodesForGet(context.Context, []storj.NodeID) (map[storj.NodeID]*overlay.SelectedNode, error) CachedGetOnlineNodesForGet(context.Context, []storj.NodeID) (map[storj.NodeID]*nodeselection.SelectedNode, error)
GetOnlineNodesForAuditRepair(context.Context, []storj.NodeID) (map[storj.NodeID]*overlay.NodeReputation, error) GetOnlineNodesForAuditRepair(context.Context, []storj.NodeID) (map[storj.NodeID]*overlay.NodeReputation, error)
Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error)
IsOnline(node *overlay.NodeDossier) bool IsOnline(node *overlay.NodeDossier) bool
@ -53,10 +54,11 @@ type Overlay interface {
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {
log *zap.Logger log *zap.Logger
satellite signing.Signer satellite signing.Signer
overlay Overlay overlay Overlay
orders DB orders DB
placementRules overlay.PlacementRules
encryptionKeys EncryptionKeys encryptionKeys EncryptionKeys
@ -69,17 +71,18 @@ type Service struct {
// NewService creates new service for creating order limits. // NewService creates new service for creating order limits.
func NewService( func NewService(
log *zap.Logger, satellite signing.Signer, overlay Overlay, log *zap.Logger, satellite signing.Signer, overlay Overlay,
orders DB, config Config, orders DB, placementRules overlay.PlacementRules, config Config,
) (*Service, error) { ) (*Service, error) {
if config.EncryptionKeys.Default.IsZero() { if config.EncryptionKeys.Default.IsZero() {
return nil, Error.New("encryption keys must be specified to include encrypted metadata") return nil, Error.New("encryption keys must be specified to include encrypted metadata")
} }
return &Service{ return &Service{
log: log, log: log,
satellite: satellite, satellite: satellite,
overlay: overlay, overlay: overlay,
orders: orders, orders: orders,
placementRules: placementRules,
encryptionKeys: config.EncryptionKeys, encryptionKeys: config.EncryptionKeys,
@ -145,8 +148,9 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucket metabas
} }
if segment.Placement != storj.EveryCountry { if segment.Placement != storj.EveryCountry {
filter := service.placementRules(segment.Placement)
for id, node := range nodes { for id, node := range nodes {
if !segment.Placement.AllowedCountry(node.CountryCode) { if !filter.MatchInclude(node) {
delete(nodes, id) delete(nodes, id)
} }
} }
@ -235,7 +239,7 @@ func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID sto
} }
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes. // CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabase.BucketLocation, nodes []*overlay.SelectedNode, pieceExpiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) { func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabase.BucketLocation, nodes []*nodeselection.SelectedNode, pieceExpiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
signer, err := NewSignerPut(service, pieceExpiration, time.Now(), maxPieceSize, bucket) signer, err := NewSignerPut(service, pieceExpiration, time.Now(), maxPieceSize, bucket)
@ -254,7 +258,7 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucket metabas
} }
// ReplacePutOrderLimits replaces order limits for uploading pieces to nodes. // ReplacePutOrderLimits replaces order limits for uploading pieces to nodes.
func (service *Service) ReplacePutOrderLimits(ctx context.Context, rootPieceID storj.PieceID, addressedLimits []*pb.AddressedOrderLimit, nodes []*overlay.SelectedNode, pieceNumbers []int32) (_ []*pb.AddressedOrderLimit, err error) { func (service *Service) ReplacePutOrderLimits(ctx context.Context, rootPieceID storj.PieceID, addressedLimits []*pb.AddressedOrderLimit, nodes []*nodeselection.SelectedNode, pieceNumbers []int32) (_ []*pb.AddressedOrderLimit, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
pieceIDDeriver := rootPieceID.Deriver() pieceIDDeriver := rootPieceID.Deriver()
@ -457,7 +461,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, segment
} }
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of segment to newNodes. // CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of segment to newNodes.
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, segment metabase.Segment, getOrderLimits []*pb.AddressedOrderLimit, healthySet map[int32]struct{}, newNodes []*overlay.SelectedNode, optimalThresholdMultiplier float64, numPiecesInExcludedCountries int) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) { func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, segment metabase.Segment, getOrderLimits []*pb.AddressedOrderLimit, healthySet map[int32]struct{}, newNodes []*nodeselection.SelectedNode, optimalThresholdMultiplier float64, numPiecesInExcludedCountries int) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// Create the order limits for being used to upload the repaired pieces // Create the order limits for being used to upload the repaired pieces
@ -590,7 +594,7 @@ func (service *Service) DecryptOrderMetadata(ctx context.Context, order *pb.Orde
return key.DecryptMetadata(order.SerialNumber, order.EncryptedMetadata) return key.DecryptMetadata(order.SerialNumber, order.EncryptedMetadata)
} }
func resolveStorageNode_Selected(node *overlay.SelectedNode, resolveDNS bool) *pb.Node { func resolveStorageNode_Selected(node *nodeselection.SelectedNode, resolveDNS bool) *pb.Node {
return resolveStorageNode(&pb.Node{ return resolveStorageNode(&pb.Node{
Id: node.ID, Id: node.ID,
Address: node.Address, Address: node.Address,

View File

@ -19,6 +19,7 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/orders" "storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -30,10 +31,10 @@ func TestGetOrderLimits(t *testing.T) {
bucket := metabase.BucketLocation{ProjectID: testrand.UUID(), BucketName: "bucket1"} bucket := metabase.BucketLocation{ProjectID: testrand.UUID(), BucketName: "bucket1"}
pieces := metabase.Pieces{} pieces := metabase.Pieces{}
nodes := map[storj.NodeID]*overlay.SelectedNode{} nodes := map[storj.NodeID]*nodeselection.SelectedNode{}
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
nodeID := testrand.NodeID() nodeID := testrand.NodeID()
nodes[nodeID] = &overlay.SelectedNode{ nodes[nodeID] = &nodeselection.SelectedNode{
ID: nodeID, ID: nodeID,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: fmt.Sprintf("host%d.com", i), Address: fmt.Sprintf("host%d.com", i),
@ -55,14 +56,16 @@ func TestGetOrderLimits(t *testing.T) {
CachedGetOnlineNodesForGet(gomock.Any(), gomock.Any()). CachedGetOnlineNodesForGet(gomock.Any(), gomock.Any()).
Return(nodes, nil).AnyTimes() Return(nodes, nil).AnyTimes()
service, err := orders.NewService(zaptest.NewLogger(t), k, overlayService, orders.NewNoopDB(), orders.Config{ service, err := orders.NewService(zaptest.NewLogger(t), k, overlayService, orders.NewNoopDB(),
EncryptionKeys: orders.EncryptionKeys{ overlay.NewPlacementRules().CreateFilters,
Default: orders.EncryptionKey{ orders.Config{
ID: orders.EncryptionKeyID{1, 2, 3, 4, 5, 6, 7, 8}, EncryptionKeys: orders.EncryptionKeys{
Key: testrand.Key(), Default: orders.EncryptionKey{
ID: orders.EncryptionKeyID{1, 2, 3, 4, 5, 6, 7, 8},
Key: testrand.Key(),
},
}, },
}, })
})
require.NoError(t, err) require.NoError(t, err)
segment := metabase.Segment{ segment := metabase.Segment{

View File

@ -355,7 +355,7 @@ func BenchmarkNodeSelection(b *testing.B) {
} }
}) })
service, err := overlay.NewService(zap.NewNop(), overlaydb, db.NodeEvents(), "", "", overlay.Config{ service, err := overlay.NewService(zap.NewNop(), overlaydb, db.NodeEvents(), overlay.NewPlacementRules().CreateFilters, "", "", overlay.Config{
Node: nodeSelectionConfig, Node: nodeSelectionConfig,
NodeSelectionCache: overlay.UploadSelectionCacheConfig{ NodeSelectionCache: overlay.UploadSelectionCacheConfig{
Staleness: time.Hour, Staleness: time.Hour,

View File

@ -11,6 +11,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/sync2" "storj.io/common/sync2"
"storj.io/storj/satellite/nodeselection"
) )
// DownloadSelectionDB implements the database for download selection cache. // DownloadSelectionDB implements the database for download selection cache.
@ -18,7 +19,7 @@ import (
// architecture: Database // architecture: Database
type DownloadSelectionDB interface { type DownloadSelectionDB interface {
// SelectAllStorageNodesDownload returns nodes that are ready for downloading // SelectAllStorageNodesDownload returns nodes that are ready for downloading
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
} }
// DownloadSelectionCacheConfig contains configuration for the selection cache. // DownloadSelectionCacheConfig contains configuration for the selection cache.
@ -35,15 +36,17 @@ type DownloadSelectionCache struct {
db DownloadSelectionDB db DownloadSelectionDB
config DownloadSelectionCacheConfig config DownloadSelectionCacheConfig
cache sync2.ReadCacheOf[*DownloadSelectionCacheState] cache sync2.ReadCacheOf[*DownloadSelectionCacheState]
placementRules PlacementRules
} }
// NewDownloadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to download data from. // NewDownloadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to download data from.
func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, config DownloadSelectionCacheConfig) (*DownloadSelectionCache, error) { func NewDownloadSelectionCache(log *zap.Logger, db DownloadSelectionDB, placementRules PlacementRules, config DownloadSelectionCacheConfig) (*DownloadSelectionCache, error) {
cache := &DownloadSelectionCache{ cache := &DownloadSelectionCache{
log: log, log: log,
db: db, db: db,
config: config, placementRules: placementRules,
config: config,
} }
return cache, cache.cache.Init(config.Staleness/2, config.Staleness, cache.read) return cache, cache.cache.Init(config.Staleness/2, config.Staleness, cache.read)
} }
@ -84,11 +87,11 @@ func (cache *DownloadSelectionCache) GetNodeIPsFromPlacement(ctx context.Context
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
return state.IPsFromPlacement(nodes, placement), nil return state.FilteredIPs(nodes, cache.placementRules(placement)), nil
} }
// GetNodes gets nodes by ID from the cache, and refreshes the cache if it is stale. // GetNodes gets nodes by ID from the cache, and refreshes the cache if it is stale.
func (cache *DownloadSelectionCache) GetNodes(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) { func (cache *DownloadSelectionCache) GetNodes(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
state, err := cache.cache.Get(ctx, time.Now()) state, err := cache.cache.Get(ctx, time.Now())
@ -110,12 +113,12 @@ func (cache *DownloadSelectionCache) Size(ctx context.Context) (int, error) {
// DownloadSelectionCacheState contains state of download selection cache. // DownloadSelectionCacheState contains state of download selection cache.
type DownloadSelectionCacheState struct { type DownloadSelectionCacheState struct {
// byID returns IP based on storj.NodeID // byID returns IP based on storj.NodeID
byID map[storj.NodeID]*SelectedNode // TODO: optimize, avoid pointery structures for performance byID map[storj.NodeID]*nodeselection.SelectedNode // TODO: optimize, avoid pointery structures for performance
} }
// NewDownloadSelectionCacheState creates a new state from the nodes. // NewDownloadSelectionCacheState creates a new state from the nodes.
func NewDownloadSelectionCacheState(nodes []*SelectedNode) *DownloadSelectionCacheState { func NewDownloadSelectionCacheState(nodes []*nodeselection.SelectedNode) *DownloadSelectionCacheState {
byID := map[storj.NodeID]*SelectedNode{} byID := map[storj.NodeID]*nodeselection.SelectedNode{}
for _, n := range nodes { for _, n := range nodes {
byID[n.ID] = n byID[n.ID] = n
} }
@ -140,11 +143,11 @@ func (state *DownloadSelectionCacheState) IPs(nodes []storj.NodeID) map[storj.No
return xs return xs
} }
// IPsFromPlacement returns node ip:port for nodes that are in state. Results are filtered out by placement. // FilteredIPs returns node ip:port for nodes that are in state. Results are filtered out..
func (state *DownloadSelectionCacheState) IPsFromPlacement(nodes []storj.NodeID, placement storj.PlacementConstraint) map[storj.NodeID]string { func (state *DownloadSelectionCacheState) FilteredIPs(nodes []storj.NodeID, filter nodeselection.NodeFilters) map[storj.NodeID]string {
xs := make(map[storj.NodeID]string, len(nodes)) xs := make(map[storj.NodeID]string, len(nodes))
for _, nodeID := range nodes { for _, nodeID := range nodes {
if n, exists := state.byID[nodeID]; exists && placement.AllowedCountry(n.CountryCode) { if n, exists := state.byID[nodeID]; exists && filter.MatchInclude(n) {
xs[nodeID] = n.LastIPPort xs[nodeID] = n.LastIPPort
} }
} }
@ -152,8 +155,8 @@ func (state *DownloadSelectionCacheState) IPsFromPlacement(nodes []storj.NodeID,
} }
// Nodes returns node ip:port for nodes that are in state. // Nodes returns node ip:port for nodes that are in state.
func (state *DownloadSelectionCacheState) Nodes(nodes []storj.NodeID) map[storj.NodeID]*SelectedNode { func (state *DownloadSelectionCacheState) Nodes(nodes []storj.NodeID) map[storj.NodeID]*nodeselection.SelectedNode {
xs := make(map[storj.NodeID]*SelectedNode, len(nodes)) xs := make(map[storj.NodeID]*nodeselection.SelectedNode, len(nodes))
for _, nodeID := range nodes { for _, nodeID := range nodes {
if n, exists := state.byID[nodeID]; exists { if n, exists := state.byID[nodeID]; exists {
xs[nodeID] = n.Clone() // TODO: optimize the clones xs[nodeID] = n.Clone() // TODO: optimize the clones

View File

@ -16,6 +16,7 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest" "storj.io/storj/satellite/satellitedb/satellitedbtest"
) )
@ -30,6 +31,7 @@ func TestDownloadSelectionCacheState_Refresh(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(), cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(), db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
downloadSelectionCacheConfig, downloadSelectionCacheConfig,
) )
require.NoError(t, err) require.NoError(t, err)
@ -62,6 +64,7 @@ func TestDownloadSelectionCacheState_GetNodeIPs(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(), cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(), db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
downloadSelectionCacheConfig, downloadSelectionCacheConfig,
) )
require.NoError(t, err) require.NoError(t, err)
@ -87,7 +90,7 @@ func TestDownloadSelectionCacheState_IPs(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
defer ctx.Cleanup() defer ctx.Cleanup()
node := &overlay.SelectedNode{ node := &nodeselection.SelectedNode{
ID: testrand.NodeID(), ID: testrand.NodeID(),
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: "1.0.1.1:8080", Address: "1.0.1.1:8080",
@ -96,7 +99,7 @@ func TestDownloadSelectionCacheState_IPs(t *testing.T) {
LastIPPort: "1.0.1.1:8080", LastIPPort: "1.0.1.1:8080",
} }
state := overlay.NewDownloadSelectionCacheState([]*overlay.SelectedNode{node}) state := overlay.NewDownloadSelectionCacheState([]*nodeselection.SelectedNode{node})
require.Equal(t, state.Size(), 1) require.Equal(t, state.Size(), 1)
ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID}) ips := state.IPs([]storj.NodeID{testrand.NodeID(), node.ID})
@ -113,6 +116,7 @@ func TestDownloadSelectionCache_GetNodes(t *testing.T) {
// create new cache and select nodes // create new cache and select nodes
cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(), cache, err := overlay.NewDownloadSelectionCache(zap.NewNop(),
db.OverlayCache(), db.OverlayCache(),
overlay.NewPlacementRules().CreateFilters,
overlay.DownloadSelectionCacheConfig{ overlay.DownloadSelectionCacheConfig{
Staleness: time.Hour, Staleness: time.Hour,
OnlineWindow: time.Hour, OnlineWindow: time.Hour,

View File

@ -0,0 +1,151 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
import (
"fmt"
"strconv"
"strings"
"github.com/jtolio/mito"
"github.com/spf13/pflag"
"github.com/zeebo/errs"
"golang.org/x/exp/slices"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/storj/satellite/nodeselection"
)
// PlacementRules can crate filter based on the placement identifier.
type PlacementRules func(constraint storj.PlacementConstraint) (filter nodeselection.NodeFilters)
// ConfigurablePlacementRule can include the placement definitions for each known identifier.
type ConfigurablePlacementRule struct {
placements map[storj.PlacementConstraint]nodeselection.NodeFilters
}
// String implements pflag.Value.
func (d *ConfigurablePlacementRule) String() string {
parts := []string{}
for id, filter := range d.placements {
// we can hide the internal rules...
if id > 9 {
// TODO: we need proper String implementation for all the used filters
parts = append(parts, fmt.Sprintf("%d:%s", id, filter))
}
}
return strings.Join(parts, ";")
}
// Set implements pflag.Value.
func (d *ConfigurablePlacementRule) Set(s string) error {
if d.placements == nil {
d.placements = make(map[storj.PlacementConstraint]nodeselection.NodeFilters)
}
d.AddLegacyStaticRules()
return d.AddPlacementFromString(s)
}
// Type implements pflag.Value.
func (d *ConfigurablePlacementRule) Type() string {
return "placement-rule"
}
var _ pflag.Value = &ConfigurablePlacementRule{}
// NewPlacementRules creates a fully initialized NewPlacementRules.
func NewPlacementRules() *ConfigurablePlacementRule {
return &ConfigurablePlacementRule{
placements: map[storj.PlacementConstraint]nodeselection.NodeFilters{},
}
}
// AddLegacyStaticRules initializes all the placement rules defined earlier in static golang code.
func (d *ConfigurablePlacementRule) AddLegacyStaticRules() {
d.placements[storj.EEA] = nodeselection.NodeFilters{nodeselection.NewCountryFilter(nodeselection.EeaCountries)}
d.placements[storj.EU] = nodeselection.NodeFilters{nodeselection.NewCountryFilter(nodeselection.EuCountries)}
d.placements[storj.US] = nodeselection.NodeFilters{nodeselection.NewCountryFilter(location.NewSet(location.UnitedStates))}
d.placements[storj.DE] = nodeselection.NodeFilters{nodeselection.NewCountryFilter(location.NewSet(location.Germany))}
d.placements[storj.NR] = nodeselection.NodeFilters{nodeselection.NewCountryFilter(location.NewFullSet().Without(location.Russia, location.Belarus, location.None))}
}
// AddPlacementRule registers a new placement.
func (d *ConfigurablePlacementRule) AddPlacementRule(id storj.PlacementConstraint, filters nodeselection.NodeFilters) {
d.placements[id] = filters
}
// AddPlacementFromString parses placement definition form string representations from id:definition;id:definition;...
func (d *ConfigurablePlacementRule) AddPlacementFromString(definitions string) error {
env := map[any]any{
"country": func(countries ...string) (nodeselection.NodeFilters, error) {
var set location.Set
for _, country := range countries {
code := location.ToCountryCode(country)
if code == location.None {
return nil, errs.New("invalid country code %q", code)
}
set.Include(code)
}
return nodeselection.NodeFilters{nodeselection.NewCountryFilter(set)}, nil
},
"all": func(filters ...nodeselection.NodeFilters) (nodeselection.NodeFilters, error) {
res := nodeselection.NodeFilters{}
for _, filter := range filters {
res = append(res, filter...)
}
return res, nil
},
"tag": func(nodeIDstr string, key string, value any) (nodeselection.NodeFilters, error) {
nodeID, err := storj.NodeIDFromString(nodeIDstr)
if err != nil {
return nil, err
}
var rawValue []byte
switch v := value.(type) {
case string:
rawValue = []byte(v)
case []byte:
rawValue = v
default:
return nil, errs.New("3rd argument of tag() should be string or []byte")
}
res := nodeselection.NodeFilters{
nodeselection.NewTagFilter(nodeID, key, rawValue),
}
return res, nil
},
}
for _, definition := range strings.Split(definitions, ";") {
definition = strings.TrimSpace(definition)
if definition == "" {
continue
}
idDef := strings.SplitN(definition, ":", 2)
val, err := mito.Eval(idDef[1], env)
if err != nil {
return errs.Wrap(err)
}
id, err := strconv.Atoi(idDef[0])
if err != nil {
return errs.Wrap(err)
}
d.placements[storj.PlacementConstraint(id)] = val.(nodeselection.NodeFilters)
}
return nil
}
// CreateFilters implements PlacementCondition.
func (d *ConfigurablePlacementRule) CreateFilters(constraint storj.PlacementConstraint) (filter nodeselection.NodeFilters) {
if constraint == storj.EveryCountry {
return nodeselection.NodeFilters{}
}
if filters, found := d.placements[constraint]; found {
return slices.Clone(filters)
}
return nodeselection.NodeFilters{
nodeselection.ExcludeAllFilter{},
}
}

View File

@ -0,0 +1,147 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/storj/satellite/nodeselection"
)
func TestPlacementFromString(t *testing.T) {
signer, err := storj.NodeIDFromString("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4")
require.NoError(t, err)
t.Run("invalid country-code", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`1:country("ZZZZ")`)
require.Error(t, err)
})
t.Run("single country", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:country("GB")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
t.Run("tag rule", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:tag("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4","foo","bar")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
t.Run("all rules", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:all(country("GB"),tag("12whfK1EDvHJtajBiAUeajQLYcWqxcQmdYQU5zX5cCf6bAxfgu4","foo","bar"))`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
Tags: nodeselection.NodeTags{
{
Signer: signer,
Name: "foo",
Value: []byte("bar"),
},
},
}))
})
t.Run("multi rule", func(t *testing.T) {
p := NewPlacementRules()
err := p.AddPlacementFromString(`11:country("GB");12:country("DE")`)
require.NoError(t, err)
filters := p.placements[storj.PlacementConstraint(11)]
require.NotNil(t, filters)
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
filters = p.placements[storj.PlacementConstraint(12)]
require.NotNil(t, filters)
require.False(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.True(t, filters.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
})
t.Run("legacy geofencing rules", func(t *testing.T) {
p := NewPlacementRules()
p.AddLegacyStaticRules()
t.Run("nr", func(t *testing.T) {
nr := p.placements[storj.NR]
require.True(t, nr.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedKingdom,
}))
require.False(t, nr.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Russia,
}))
require.False(t, nr.MatchInclude(&nodeselection.SelectedNode{
CountryCode: 0,
}))
})
t.Run("us", func(t *testing.T) {
us := p.placements[storj.US]
require.True(t, us.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.UnitedStates,
}))
require.False(t, us.MatchInclude(&nodeselection.SelectedNode{
CountryCode: location.Germany,
}))
require.False(t, us.MatchInclude(&nodeselection.SelectedNode{
CountryCode: 0,
}))
})
})
}

View File

@ -26,6 +26,7 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation" "storj.io/storj/satellite/reputation"
) )
@ -147,10 +148,10 @@ func TestOnlineOffline(t *testing.T) {
require.Empty(t, offline) require.Empty(t, offline)
require.Len(t, online, 2) require.Len(t, online, 2)
require.False(t, slices.ContainsFunc(online, func(node overlay.SelectedNode) bool { require.False(t, slices.ContainsFunc(online, func(node nodeselection.SelectedNode) bool {
return node.ID == unreliableNodeID return node.ID == unreliableNodeID
})) }))
require.False(t, slices.ContainsFunc(offline, func(node overlay.SelectedNode) bool { require.False(t, slices.ContainsFunc(offline, func(node nodeselection.SelectedNode) bool {
return node.ID == unreliableNodeID return node.ID == unreliableNodeID
})) }))
}) })
@ -192,7 +193,7 @@ func TestEnsureMinimumRequested(t *testing.T) {
reputable := map[storj.NodeID]bool{} reputable := map[storj.NodeID]bool{}
countReputable := func(selected []*overlay.SelectedNode) (count int) { countReputable := func(selected []*nodeselection.SelectedNode) (count int) {
for _, n := range selected { for _, n := range selected {
if reputable[n.ID] { if reputable[n.ID] {
count++ count++

View File

@ -21,6 +21,7 @@ import (
"storj.io/storj/satellite/geoip" "storj.io/storj/satellite/geoip"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/nodeevents" "storj.io/storj/satellite/nodeevents"
"storj.io/storj/satellite/nodeselection"
) )
// ErrEmptyNode is returned when the nodeID is empty. // ErrEmptyNode is returned when the nodeID is empty.
@ -53,20 +54,18 @@ type DB interface {
// current reputation status. // current reputation status.
GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (map[storj.NodeID]*NodeReputation, error) GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (map[storj.NodeID]*NodeReputation, error)
// SelectStorageNodes looks up nodes based on criteria // SelectStorageNodes looks up nodes based on criteria
SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*SelectedNode, error) SelectStorageNodes(ctx context.Context, totalNeededNodes, newNodeCount int, criteria *NodeCriteria) ([]*nodeselection.SelectedNode, error)
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes // SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error)
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading // SelectAllStorageNodesDownload returns a nodes that are ready for downloading
SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*SelectedNode, error) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) ([]*nodeselection.SelectedNode, error)
// Get looks up the node by nodeID // Get looks up the node by nodeID
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error) Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes. // KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []SelectedNode, offline []SelectedNode, err error) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error)
// Reliable returns all nodes that are reliable // Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
Reliable(context.Context, *NodeCriteria) (storj.NodeIDList, error) Reliable(ctx context.Context, onlineWindow, asOfSystemInterval time.Duration) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error)
// UpdateReputation updates the DB columns for all reputation fields in ReputationStatus. // UpdateReputation updates the DB columns for all reputation fields in ReputationStatus.
UpdateReputation(ctx context.Context, id storj.NodeID, request ReputationUpdate) error UpdateReputation(ctx context.Context, id storj.NodeID, request ReputationUpdate) error
// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version. // UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version.
@ -131,9 +130,15 @@ type DB interface {
OneTimeFixLastNets(ctx context.Context) error OneTimeFixLastNets(ctx context.Context) error
// IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts). // IterateAllContactedNodes will call cb on all known nodes (used in restore trash contexts).
IterateAllContactedNodes(context.Context, func(context.Context, *SelectedNode) error) error IterateAllContactedNodes(context.Context, func(context.Context, *nodeselection.SelectedNode) error) error
// IterateAllNodeDossiers will call cb on all known nodes (used for invoice generation). // IterateAllNodeDossiers will call cb on all known nodes (used for invoice generation).
IterateAllNodeDossiers(context.Context, func(context.Context, *NodeDossier) error) error IterateAllNodeDossiers(context.Context, func(context.Context, *NodeDossier) error) error
// UpdateNodeTags insert (or refresh) node tags.
UpdateNodeTags(ctx context.Context, tags nodeselection.NodeTags) error
// GetNodeTags returns all nodes for a specific node.
GetNodeTags(ctx context.Context, id storj.NodeID) (nodeselection.NodeTags, error)
} }
// DisqualificationReason is disqualification reason enum type. // DisqualificationReason is disqualification reason enum type.
@ -192,7 +197,6 @@ type NodeCriteria struct {
MinimumVersion string // semver or empty MinimumVersion string // semver or empty
OnlineWindow time.Duration OnlineWindow time.Duration
AsOfSystemInterval time.Duration // only used for CRDB queries AsOfSystemInterval time.Duration // only used for CRDB queries
ExcludedCountries []string
} }
// ReputationStatus indicates current reputation status for a node. // ReputationStatus indicates current reputation status for a node.
@ -273,15 +277,6 @@ type NodeLastContact struct {
LastContactFailure time.Time LastContactFailure time.Time
} }
// SelectedNode is used as a result for creating orders limits.
type SelectedNode struct {
ID storj.NodeID
Address *pb.NodeAddress
LastNet string
LastIPPort string
CountryCode location.CountryCode
}
// NodeReputation is used as a result for creating orders limits for audits. // NodeReputation is used as a result for creating orders limits for audits.
type NodeReputation struct { type NodeReputation struct {
ID storj.NodeID ID storj.NodeID
@ -291,18 +286,6 @@ type NodeReputation struct {
Reputation ReputationStatus Reputation ReputationStatus
} }
// Clone returns a deep clone of the selected node.
func (node *SelectedNode) Clone() *SelectedNode {
copy := pb.CopyNode(&pb.Node{Id: node.ID, Address: node.Address})
return &SelectedNode{
ID: copy.Id,
Address: copy.Address,
LastNet: node.LastNet,
LastIPPort: node.LastIPPort,
CountryCode: node.CountryCode,
}
}
// Service is used to store and handle node information. // Service is used to store and handle node information.
// //
// architecture: Service // architecture: Service
@ -318,13 +301,14 @@ type Service struct {
UploadSelectionCache *UploadSelectionCache UploadSelectionCache *UploadSelectionCache
DownloadSelectionCache *DownloadSelectionCache DownloadSelectionCache *DownloadSelectionCache
LastNetFunc LastNetFunc LastNetFunc LastNetFunc
placementRules PlacementRules
} }
// LastNetFunc is the type of a function that will be used to derive a network from an ip and port. // LastNetFunc is the type of a function that will be used to derive a network from an ip and port.
type LastNetFunc func(config NodeSelectionConfig, ip net.IP, port string) (string, error) type LastNetFunc func(config NodeSelectionConfig, ip net.IP, port string) (string, error)
// NewService returns a new Service. // NewService returns a new Service.
func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr, satelliteName string, config Config) (*Service, error) { func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, placementRules PlacementRules, satelliteAddr, satelliteName string, config Config) (*Service, error) {
err := config.Node.AsOfSystemTime.isValid() err := config.Node.AsOfSystemTime.isValid()
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
@ -338,17 +322,34 @@ func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr,
} }
} }
defaultSelection := nodeselection.NodeFilters{}
if len(config.Node.UploadExcludedCountryCodes) > 0 {
set := location.NewFullSet()
for _, country := range config.Node.UploadExcludedCountryCodes {
countryCode := location.ToCountryCode(country)
if countryCode == location.None {
return nil, Error.New("invalid country %q", country)
}
set.Remove(countryCode)
}
defaultSelection = defaultSelection.WithCountryFilter(set)
}
uploadSelectionCache, err := NewUploadSelectionCache(log, db, uploadSelectionCache, err := NewUploadSelectionCache(log, db,
config.NodeSelectionCache.Staleness, config.Node, config.NodeSelectionCache.Staleness, config.Node,
defaultSelection, placementRules,
) )
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
downloadSelectionCache, err := NewDownloadSelectionCache(log, db, DownloadSelectionCacheConfig{ downloadSelectionCache, err := NewDownloadSelectionCache(log, db,
Staleness: config.NodeSelectionCache.Staleness, placementRules,
OnlineWindow: config.Node.OnlineWindow, DownloadSelectionCacheConfig{
AsOfSystemTime: config.Node.AsOfSystemTime, Staleness: config.NodeSelectionCache.Staleness,
}) OnlineWindow: config.Node.OnlineWindow,
AsOfSystemTime: config.Node.AsOfSystemTime,
})
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
@ -366,6 +367,8 @@ func NewService(log *zap.Logger, db DB, nodeEvents nodeevents.DB, satelliteAddr,
UploadSelectionCache: uploadSelectionCache, UploadSelectionCache: uploadSelectionCache,
DownloadSelectionCache: downloadSelectionCache, DownloadSelectionCache: downloadSelectionCache,
LastNetFunc: MaskOffLastNet, LastNetFunc: MaskOffLastNet,
placementRules: placementRules,
}, nil }, nil
} }
@ -392,7 +395,7 @@ func (service *Service) Get(ctx context.Context, nodeID storj.NodeID) (_ *NodeDo
} }
// CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs. // CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs.
func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) { func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.DownloadSelectionCache.GetNodes(ctx, nodeIDs) return service.DownloadSelectionCache.GetNodes(ctx, nodeIDs)
} }
@ -415,45 +418,8 @@ func (service *Service) IsOnline(node *NodeDossier) bool {
return time.Since(node.Reputation.LastContactSuccess) < service.config.Node.OnlineWindow return time.Since(node.Reputation.LastContactSuccess) < service.config.Node.OnlineWindow
} }
// GetNodesNetworkInOrder returns the /24 subnet for each storage node, in order. If a
// requested node is not in the database, an empty string will be returned corresponding
// to that node's last_net.
func (service *Service) GetNodesNetworkInOrder(ctx context.Context, nodeIDs []storj.NodeID) (lastNets []string, err error) {
defer mon.Task()(&ctx)(nil)
nodes, err := service.DownloadSelectionCache.GetNodes(ctx, nodeIDs)
if err != nil {
return nil, err
}
lastNets = make([]string, len(nodeIDs))
for i, nodeID := range nodeIDs {
if selectedNode, ok := nodes[nodeID]; ok {
lastNets[i] = selectedNode.LastNet
}
}
return lastNets, nil
}
// GetNodesOutOfPlacement checks if nodes from nodeIDs list are in allowed country according to specified geo placement
// and returns list of node ids which are not.
func (service *Service) GetNodesOutOfPlacement(ctx context.Context, nodeIDs []storj.NodeID, placement storj.PlacementConstraint) (offNodes []storj.NodeID, err error) {
defer mon.Task()(&ctx)(nil)
nodes, err := service.DownloadSelectionCache.GetNodes(ctx, nodeIDs)
if err != nil {
return nil, err
}
offNodes = make([]storj.NodeID, 0, len(nodeIDs))
for _, nodeID := range nodeIDs {
if selectedNode, ok := nodes[nodeID]; ok && !placement.AllowedCountry(selectedNode.CountryCode) {
offNodes = append(offNodes, selectedNode.ID)
}
}
return offNodes, nil
}
// FindStorageNodesForGracefulExit searches the overlay network for nodes that meet the provided requirements for graceful-exit requests. // FindStorageNodesForGracefulExit searches the overlay network for nodes that meet the provided requirements for graceful-exit requests.
func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) { func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.UploadSelectionCache.GetNodes(ctx, req) return service.UploadSelectionCache.GetNodes(ctx, req)
} }
@ -462,7 +428,7 @@ func (service *Service) FindStorageNodesForGracefulExit(ctx context.Context, req
// //
// When enabled it uses the cache to select nodes. // When enabled it uses the cache to select nodes.
// When the node selection from the cache fails, it falls back to the old implementation. // When the node selection from the cache fails, it falls back to the old implementation.
func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) { func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 { if service.config.Node.AsOfSystemTime.Enabled && service.config.Node.AsOfSystemTime.DefaultInterval < 0 {
req.AsOfSystemInterval = service.config.Node.AsOfSystemTime.DefaultInterval req.AsOfSystemInterval = service.config.Node.AsOfSystemTime.DefaultInterval
@ -498,7 +464,7 @@ func (service *Service) FindStorageNodesForUpload(ctx context.Context, req FindS
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria. // FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria.
// //
// This does not use a cache. // This does not use a cache.
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*SelectedNode, err error) { func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// TODO: add sanity limits to requested node count // TODO: add sanity limits to requested node count
// TODO: add sanity limits to excluded nodes // TODO: add sanity limits to excluded nodes
@ -572,34 +538,20 @@ func (service *Service) InsertOfflineNodeEvents(ctx context.Context, cooldown ti
return count, err return count, err
} }
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
func (service *Service) KnownReliableInExcludedCountries(ctx context.Context, nodeIds storj.NodeIDList) (reliableInExcluded storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{
OnlineWindow: service.config.Node.OnlineWindow,
ExcludedCountries: service.config.RepairExcludedCountryCodes,
}
return service.db.KnownReliableInExcludedCountries(ctx, criteria, nodeIds)
}
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes. // KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []SelectedNode, offlineNodes []SelectedNode, err error) { func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []nodeselection.SelectedNode, offlineNodes []nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// TODO add as of system time // TODO add as of system time
return service.db.KnownReliable(ctx, nodeIDs, service.config.Node.OnlineWindow, 0) return service.db.KnownReliable(ctx, nodeIDs, service.config.Node.OnlineWindow, 0)
} }
// Reliable filters a set of nodes that are reliable, independent of new. // Reliable returns all nodes that are reliable (separated by whether they are currently online or offline).
func (service *Service) Reliable(ctx context.Context) (nodes storj.NodeIDList, err error) { func (service *Service) Reliable(ctx context.Context) (online []nodeselection.SelectedNode, offline []nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{ // TODO add as of system tim.
OnlineWindow: service.config.Node.OnlineWindow, return service.db.Reliable(ctx, service.config.Node.OnlineWindow, 0)
}
criteria.ExcludedCountries = service.config.RepairExcludedCountryCodes
return service.db.Reliable(ctx, criteria)
} }
// UpdateReputation updates the DB columns for any of the reputation fields. // UpdateReputation updates the DB columns for any of the reputation fields.
@ -782,28 +734,6 @@ func (service *Service) GetMissingPieces(ctx context.Context, pieces metabase.Pi
return maps.Values(missingPiecesMap), nil return maps.Values(missingPiecesMap), nil
} }
// GetReliablePiecesInExcludedCountries returns the list of pieces held by nodes located in excluded countries.
func (service *Service) GetReliablePiecesInExcludedCountries(ctx context.Context, pieces metabase.Pieces) (piecesInExcluded []uint16, err error) {
defer mon.Task()(&ctx)(&err)
var nodeIDs storj.NodeIDList
for _, p := range pieces {
nodeIDs = append(nodeIDs, p.StorageNode)
}
inExcluded, err := service.KnownReliableInExcludedCountries(ctx, nodeIDs)
if err != nil {
return nil, Error.New("error getting nodes %s", err)
}
for _, p := range pieces {
for _, nodeID := range inExcluded {
if nodeID == p.StorageNode {
piecesInExcluded = append(piecesInExcluded, p.Number)
}
}
}
return piecesInExcluded, nil
}
// DQNodesLastSeenBefore disqualifies nodes who have not been contacted since the cutoff time. // DQNodesLastSeenBefore disqualifies nodes who have not been contacted since the cutoff time.
func (service *Service) DQNodesLastSeenBefore(ctx context.Context, cutoff time.Time, limit int) (count int, err error) { func (service *Service) DQNodesLastSeenBefore(ctx context.Context, cutoff time.Time, limit int) (count int, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -840,7 +770,7 @@ func (service *Service) DisqualifyNode(ctx context.Context, nodeID storj.NodeID,
} }
// SelectAllStorageNodesDownload returns a nodes that are ready for downloading. // SelectAllStorageNodesDownload returns a nodes that are ready for downloading.
func (service *Service) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (_ []*SelectedNode, err error) { func (service *Service) SelectAllStorageNodesDownload(ctx context.Context, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (_ []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.db.SelectAllStorageNodesDownload(ctx, onlineWindow, asOf) return service.db.SelectAllStorageNodesDownload(ctx, onlineWindow, asOf)
} }
@ -851,6 +781,16 @@ func (service *Service) ResolveIPAndNetwork(ctx context.Context, target string)
return ResolveIPAndNetwork(ctx, target, service.config.Node, service.LastNetFunc) return ResolveIPAndNetwork(ctx, target, service.config.Node, service.LastNetFunc)
} }
// UpdateNodeTags persists all new and old node tags.
func (service *Service) UpdateNodeTags(ctx context.Context, tags []nodeselection.NodeTag) error {
return service.db.UpdateNodeTags(ctx, tags)
}
// GetNodeTags returns the node tags of a node.
func (service *Service) GetNodeTags(ctx context.Context, id storj.NodeID) (nodeselection.NodeTags, error) {
return service.db.GetNodeTags(ctx, id)
}
// ResolveIPAndNetwork resolves the target address and determines its IP and appropriate last_net, as indicated. // ResolveIPAndNetwork resolves the target address and determines its IP and appropriate last_net, as indicated.
func ResolveIPAndNetwork(ctx context.Context, target string, config NodeSelectionConfig, lastNetFunc LastNetFunc) (ip net.IP, port, network string, err error) { func ResolveIPAndNetwork(ctx context.Context, target string, config NodeSelectionConfig, lastNetFunc LastNetFunc) (ip net.IP, port, network string, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -18,12 +18,12 @@ import (
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/nodeevents" "storj.io/storj/satellite/nodeevents"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation" "storj.io/storj/satellite/reputation"
"storj.io/storj/satellite/satellitedb/satellitedbtest" "storj.io/storj/satellite/satellitedb/satellitedbtest"
@ -74,7 +74,7 @@ func testCache(ctx *testcontext.Context, t *testing.T, store overlay.DB, nodeEve
serviceCtx, serviceCancel := context.WithCancel(ctx) serviceCtx, serviceCancel := context.WithCancel(ctx)
defer serviceCancel() defer serviceCancel()
service, err := overlay.NewService(zaptest.NewLogger(t), store, nodeEvents, "", "", serviceConfig) service, err := overlay.NewService(zaptest.NewLogger(t), store, nodeEvents, overlay.NewPlacementRules().CreateFilters, "", "", serviceConfig)
require.NoError(t, err) require.NoError(t, err)
ctx.Go(func() error { return service.Run(serviceCtx) }) ctx.Go(func() error { return service.Run(serviceCtx) })
defer ctx.Check(service.Close) defer ctx.Check(service.Close)
@ -205,7 +205,7 @@ func TestRandomizedSelection(t *testing.T) {
// select numNodesToSelect nodes selectIterations times // select numNodesToSelect nodes selectIterations times
for i := 0; i < selectIterations; i++ { for i := 0; i < selectIterations; i++ {
var nodes []*overlay.SelectedNode var nodes []*nodeselection.SelectedNode
var err error var err error
if i%2 == 0 { if i%2 == 0 {
@ -326,7 +326,7 @@ func TestRandomizedSelectionCache(t *testing.T) {
// select numNodesToSelect nodes selectIterations times // select numNodesToSelect nodes selectIterations times
for i := 0; i < selectIterations; i++ { for i := 0; i < selectIterations; i++ {
var nodes []*overlay.SelectedNode var nodes []*nodeselection.SelectedNode
var err error var err error
req := overlay.FindStorageNodesRequest{ req := overlay.FindStorageNodesRequest{
RequestedCount: numNodesToSelect, RequestedCount: numNodesToSelect,
@ -670,7 +670,7 @@ func TestSuspendedSelection(t *testing.T) {
} }
} }
var nodes []*overlay.SelectedNode var nodes []*nodeselection.SelectedNode
var err error var err error
numNodesToSelect := 10 numNodesToSelect := 10
@ -816,50 +816,6 @@ func TestVetAndUnvetNode(t *testing.T) {
}) })
} }
func TestReliable(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
service := planet.Satellites[0].Overlay.Service
node := planet.StorageNodes[0]
nodes, err := service.Reliable(ctx)
require.NoError(t, err)
require.Len(t, nodes, 2)
err = planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "FR")
require.NoError(t, err)
// first node should be excluded from Reliable result because of country code
nodes, err = service.Reliable(ctx)
require.NoError(t, err)
require.Len(t, nodes, 1)
require.NotEqual(t, node.ID(), nodes[0])
})
}
func TestKnownReliableInExcludedCountries(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
service := planet.Satellites[0].Overlay.Service
node := planet.StorageNodes[0]
nodes, err := service.Reliable(ctx)
require.NoError(t, err)
require.Len(t, nodes, 2)
err = planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "FR")
require.NoError(t, err)
// first node should be excluded from Reliable result because of country code
nodes, err = service.KnownReliableInExcludedCountries(ctx, nodes)
require.NoError(t, err)
require.Len(t, nodes, 1)
require.Equal(t, node.ID(), nodes[0])
})
}
func TestUpdateReputationNodeEvents(t *testing.T) { func TestUpdateReputationNodeEvents(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0, SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
@ -1049,47 +1005,3 @@ func TestUpdateCheckInBelowMinVersionEvent(t *testing.T) {
require.True(t, ne2.CreatedAt.After(ne1.CreatedAt)) require.True(t, ne2.CreatedAt.After(ne1.CreatedAt))
}) })
} }
func TestService_GetNodesOutOfPlacement(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.Node.AsOfSystemTime.Enabled = false
config.Overlay.Node.AsOfSystemTime.DefaultInterval = 0
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
service := planet.Satellites[0].Overlay.Service
placement := storj.EU
nodeIDs := []storj.NodeID{}
for _, node := range planet.StorageNodes {
nodeIDs = append(nodeIDs, node.ID())
err := service.TestNodeCountryCode(ctx, node.ID(), location.Poland.String())
require.NoError(t, err)
}
require.NoError(t, service.DownloadSelectionCache.Refresh(ctx))
offNodes, err := service.GetNodesOutOfPlacement(ctx, nodeIDs, placement)
require.NoError(t, err)
require.Empty(t, offNodes)
expectedNodeIDs := []storj.NodeID{}
for _, node := range planet.StorageNodes {
expectedNodeIDs = append(expectedNodeIDs, node.ID())
err := service.TestNodeCountryCode(ctx, node.ID(), location.Brazil.String())
require.NoError(t, err)
// we need to refresh cache because node country code was changed
require.NoError(t, service.DownloadSelectionCache.Refresh(ctx))
offNodes, err := service.GetNodesOutOfPlacement(ctx, nodeIDs, placement)
require.NoError(t, err)
require.ElementsMatch(t, expectedNodeIDs, offNodes)
}
})
}

View File

@ -18,6 +18,7 @@ import (
"storj.io/common/storj/location" "storj.io/common/storj/location"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/nodeselection"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest" "storj.io/storj/satellite/satellitedb/satellitedbtest"
) )
@ -101,18 +102,13 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
storj.NodeID{7}, storj.NodeID{8}, storj.NodeID{7}, storj.NodeID{8},
storj.NodeID{9}, storj.NodeID{9},
} }
criteria := &overlay.NodeCriteria{ contains := func(nodeID storj.NodeID) func(node nodeselection.SelectedNode) bool {
OnlineWindow: time.Hour, return func(node nodeselection.SelectedNode) bool {
ExcludedCountries: []string{"FR", "BE"},
}
contains := func(nodeID storj.NodeID) func(node overlay.SelectedNode) bool {
return func(node overlay.SelectedNode) bool {
return node.ID == nodeID return node.ID == nodeID
} }
} }
online, offline, err := cache.KnownReliable(ctx, nodeIds, criteria.OnlineWindow, criteria.AsOfSystemInterval) online, offline, err := cache.KnownReliable(ctx, nodeIds, time.Hour, 0)
require.NoError(t, err) require.NoError(t, err)
// unrealiable nodes shouldn't be in results // unrealiable nodes shouldn't be in results
@ -123,19 +119,26 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{9}))) // not in db require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{9}))) // not in db
require.True(t, slices.ContainsFunc(offline, contains(storj.NodeID{4}))) // offline require.True(t, slices.ContainsFunc(offline, contains(storj.NodeID{4}))) // offline
// KnownReliable is not excluding by country anymore
require.True(t, slices.ContainsFunc(online, contains(storj.NodeID{7}))) // excluded country
require.Len(t, append(online, offline...), 4) require.Len(t, append(online, offline...), 4)
valid, err := cache.Reliable(ctx, criteria) online, offline, err = cache.Reliable(ctx, time.Hour, 0)
require.NoError(t, err) require.NoError(t, err)
require.NotContains(t, valid, storj.NodeID{2}) // disqualified require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{2}))) // disqualified
require.NotContains(t, valid, storj.NodeID{3}) // unknown audit suspended require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{3}))) // unknown audit suspended
require.NotContains(t, valid, storj.NodeID{4}) // offline
require.NotContains(t, valid, storj.NodeID{5}) // gracefully exited require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{5}))) // gracefully exited
require.NotContains(t, valid, storj.NodeID{6}) // offline suspended require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{6}))) // offline suspended
require.NotContains(t, valid, storj.NodeID{7}) // excluded country require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{9}))) // not in db
require.NotContains(t, valid, storj.NodeID{9}) // not in db
require.Len(t, valid, 2) require.True(t, slices.ContainsFunc(offline, contains(storj.NodeID{4}))) // offline
// Reliable is not excluding by country anymore
require.True(t, slices.ContainsFunc(online, contains(storj.NodeID{7}))) // excluded country
require.Len(t, append(online, offline...), 4)
} }
{ // TestUpdateOperator { // TestUpdateOperator

View File

@ -9,9 +9,8 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/pb"
"storj.io/common/sync2" "storj.io/common/sync2"
"storj.io/storj/satellite/nodeselection/uploadselection" "storj.io/storj/satellite/nodeselection"
) )
// UploadSelectionDB implements the database for upload selection cache. // UploadSelectionDB implements the database for upload selection cache.
@ -19,7 +18,7 @@ import (
// architecture: Database // architecture: Database
type UploadSelectionDB interface { type UploadSelectionDB interface {
// SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes // SelectAllStorageNodesUpload returns all nodes that qualify to store data, organized as reputable nodes and new nodes
SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*SelectedNode, err error) SelectAllStorageNodesUpload(ctx context.Context, selectionCfg NodeSelectionConfig) (reputable, new []*nodeselection.SelectedNode, err error)
} }
// UploadSelectionCacheConfig is a configuration for upload selection cache. // UploadSelectionCacheConfig is a configuration for upload selection cache.
@ -36,15 +35,20 @@ type UploadSelectionCache struct {
db UploadSelectionDB db UploadSelectionDB
selectionConfig NodeSelectionConfig selectionConfig NodeSelectionConfig
cache sync2.ReadCacheOf[*uploadselection.State] cache sync2.ReadCacheOf[*nodeselection.State]
defaultFilters nodeselection.NodeFilters
placementRules PlacementRules
} }
// NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data. // NewUploadSelectionCache creates a new cache that keeps a list of all the storage nodes that are qualified to store data.
func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig) (*UploadSelectionCache, error) { func NewUploadSelectionCache(log *zap.Logger, db UploadSelectionDB, staleness time.Duration, config NodeSelectionConfig, defaultFilter nodeselection.NodeFilters, placementRules PlacementRules) (*UploadSelectionCache, error) {
cache := &UploadSelectionCache{ cache := &UploadSelectionCache{
log: log, log: log,
db: db, db: db,
selectionConfig: config, selectionConfig: config,
defaultFilters: defaultFilter,
placementRules: placementRules,
} }
return cache, cache.cache.Init(staleness/2, staleness, cache.read) return cache, cache.cache.Init(staleness/2, staleness, cache.read)
} }
@ -65,7 +69,7 @@ func (cache *UploadSelectionCache) Refresh(ctx context.Context) (err error) {
// refresh calls out to the database and refreshes the cache with the most up-to-date // refresh calls out to the database and refreshes the cache with the most up-to-date
// data from the nodes table, then sets time that the last refresh occurred so we know when // data from the nodes table, then sets time that the last refresh occurred so we know when
// to refresh again in the future. // to refresh again in the future.
func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection.State, err error) { func (cache *UploadSelectionCache) read(ctx context.Context) (_ *nodeselection.State, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
reputableNodes, newNodes, err := cache.db.SelectAllStorageNodesUpload(ctx, cache.selectionConfig) reputableNodes, newNodes, err := cache.db.SelectAllStorageNodesUpload(ctx, cache.selectionConfig)
@ -73,7 +77,7 @@ func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
state := uploadselection.NewState(convSelectedNodesToNodes(reputableNodes), convSelectedNodesToNodes(newNodes)) state := nodeselection.NewState(reputableNodes, newNodes)
mon.IntVal("refresh_cache_size_reputable").Observe(int64(len(reputableNodes))) mon.IntVal("refresh_cache_size_reputable").Observe(int64(len(reputableNodes)))
mon.IntVal("refresh_cache_size_new").Observe(int64(len(newNodes))) mon.IntVal("refresh_cache_size_new").Observe(int64(len(newNodes)))
@ -84,7 +88,7 @@ func (cache *UploadSelectionCache) read(ctx context.Context) (_ *uploadselection
// GetNodes selects nodes from the cache that will be used to upload a file. // GetNodes selects nodes from the cache that will be used to upload a file.
// Every node selected will be from a distinct network. // Every node selected will be from a distinct network.
// If the cache hasn't been refreshed recently it will do so first. // If the cache hasn't been refreshed recently it will do so first.
func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*SelectedNode, err error) { func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*nodeselection.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
state, err := cache.cache.Get(ctx, time.Now()) state, err := cache.cache.Get(ctx, time.Now())
@ -92,18 +96,23 @@ func (cache *UploadSelectionCache) GetNodes(ctx context.Context, req FindStorage
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
selected, err := state.Select(ctx, uploadselection.Request{ filters := cache.placementRules(req.Placement)
Count: req.RequestedCount, if len(req.ExcludedIDs) > 0 {
NewFraction: cache.selectionConfig.NewNodeFraction, filters = append(filters, state.ExcludeNetworksBasedOnNodes(req.ExcludedIDs))
ExcludedIDs: req.ExcludedIDs,
Placement: req.Placement,
ExcludedCountryCodes: cache.selectionConfig.UploadExcludedCountryCodes,
})
if uploadselection.ErrNotEnoughNodes.Has(err) {
err = ErrNotEnoughNodes.Wrap(err)
} }
return convNodesToSelectedNodes(selected), err filters = append(filters, cache.defaultFilters)
filters = filters.WithAutoExcludeSubnets()
selected, err := state.Select(ctx, nodeselection.Request{
Count: req.RequestedCount,
NewFraction: cache.selectionConfig.NewNodeFraction,
NodeFilters: filters,
})
if nodeselection.ErrNotEnoughNodes.Has(err) {
err = ErrNotEnoughNodes.Wrap(err)
}
return selected, err
} }
// Size returns how many reputable nodes and new nodes are in the cache. // Size returns how many reputable nodes and new nodes are in the cache.
@ -115,31 +124,3 @@ func (cache *UploadSelectionCache) Size(ctx context.Context) (reputableNodeCount
stats := state.Stats() stats := state.Stats()
return stats.Reputable, stats.New, nil return stats.Reputable, stats.New, nil
} }
func convNodesToSelectedNodes(nodes []*uploadselection.Node) (xs []*SelectedNode) {
for _, n := range nodes {
xs = append(xs, &SelectedNode{
ID: n.ID,
Address: pb.NodeFromNodeURL(n.NodeURL).Address,
LastNet: n.LastNet,
LastIPPort: n.LastIPPort,
CountryCode: n.CountryCode,
})
}
return xs
}
func convSelectedNodesToNodes(nodes []*SelectedNode) (xs []*uploadselection.Node) {
for _, n := range nodes {
xs = append(xs, &uploadselection.Node{
NodeURL: (&pb.Node{
Id: n.ID,
Address: n.Address,
}).NodeURL(),
LastNet: n.LastNet,
LastIPPort: n.LastIPPort,
CountryCode: n.CountryCode,
})
}
return xs
}

Some files were not shown because too many files have changed in this diff Show More