Compare commits

...

50 Commits

Author SHA1 Message Date
Wilfred Asomani
cd9feb6d09 web/satellite: split projects by whether user owns them
This change groups the list of projects in ProjectSelection.vue into
those owned by the current user and those that are not.

Issue: https://github.com/storj/storj/issues/5972

Change-Id: I20f4e68f84e4ece230afa53e7e7eed507da625af
2023-06-29 21:10:00 +00:00
Egon Elbre
2463b881c6 storagenode/piecestore: fix TestUpload
The test needs to wait for the upload information to be saved to the
database.

Fixes https://github.com/storj/storj/issues/6008

Change-Id: I1f258c923a4b33cbc571f97bad046cec70642a0b
2023-06-29 22:14:22 +03:00
Jeremy Wharton
0ca7d19fc7 web/satellite: show cursor as pointer when selecting session timeout
The cursor now appears as a pointer when interacting with the session
timeout modal's dropdown menu.

Change-Id: I309463b5f3af2df198a8bba5a9122e8a8f39de89
2023-06-29 14:42:21 +00:00
Jeremy Wharton
bd3aaf4f34 web/satellite: adjust mobile header padding and logo
The mobile header's horizontal padding has been adjusted to match the
padding of the page content. Additionally, the size of the logo in the
mobile header has been decreased.

Change-Id: I7559813455fb186f1402783db6022ebdaa59c7ae
2023-06-29 12:18:04 +00:00
Jeremy Wharton
f1fab9edfb web/satellite: update styling of session timeout selection modal
The styling of the session timeout selection modal has been altered to
more closely align with our designs.

Change-Id: Icaf2d16170908af4962075732c6df0f08d7dba2d
2023-06-29 11:43:56 +00:00
Michal Niewrzal
96d3c41c14 satellite/metabase: convert bucket name to bytes for queries
In case some invalid characters in bucket name we need to cast
bucket name to byte array for query argument. This change is
doing this for some missed cases.

Change-Id: I47d0d8e3c85a69bdf63de1137adcd533dcfe50a8
2023-06-29 10:43:35 +00:00
Jeremy Wharton
faf5b960ff satellite/console/dbcleanup: make chore clean up webapp sessions
The console DB cleanup chore has been extended to remove expired webapp
session records.

Resolves #5893

Change-Id: I455b4933552cfde86817a2ef8f9879dd7b0a121d
2023-06-29 09:53:18 +00:00
Vitalii
0d0e8cc8cf web/satellite: fix for too many objects banner in object browser
Fixed 'Too may objects' objects banner being displayed in a wrong place.
I made it simpler and more correct because it's impossible to know the exact count of objects for each path/prefix because s3 client can list only 1000 objects (for now) and our API at the same time calculates object count for the whole bucket (not for particular passphrase).
Added message that user can list all objects using Uplink CLI.

Also removed unused legacy css file.

Issue:
https://github.com/storj/storj/issues/5955

Change-Id: I4b3cff47763ebdb631119b690de876ecf6a22e9d
2023-06-29 11:05:35 +03:00
Jeremy Wharton
706cd0b9fb satellite/console: allow for adding unregistered project members
This change allows members without an account to be invited to a
project. The link in the invitation email will redirect these users to
the registration page containing custom text describing the invitation.

Resolves #5353

Change-Id: I6cba91e57c551ca13c7a9ae49150fc1d374cd6b5
2023-06-28 22:03:03 +00:00
dlamarmorgan
a010459520 satellite/payments/stripe/{client,server}: add set invoice status cmd
Change-Id: I6d9b4fe2ed4b17d63d858f7ceefd7c14adb062bb
2023-06-28 21:04:34 +00:00
Wilfred Asomani
14beb9ad85 web/satellite: update button hover state to match the figma
This change updates the buttons same as the "New Folder" button to
have hover state consistent with the design in the figma.

It also fixes an issue where table view will be shown by default even
if the user has less than 8 projects.

Issue: https://github.com/storj/storj/issues/5971

Change-Id: Ic8b99496e1990550a9ea1550c3c2bd80bf997aa9
2023-06-28 20:13:30 +00:00
Vitalii
6f078acb8d web/satellite: add gallery view caching
Add caching for gallery view previews and map.

Issue:
https://github.com/storj/storj/issues/5969

Change-Id: I6c9755aec6e1d4143005835adad212cafd46f649
2023-06-28 17:29:05 +00:00
Michal Niewrzal
578724e9b1 satellite/repair/repairer: use KnownReliable to check segment pieces
At the moment segment repairer is skipping offline nodes in checks like
clumped pieces and off placement pieces. This change is fixing this
problem using new version of KnownReliable method. New method is
returning both online and offline nodes. Provided data can be used to
find clumped and off placement pieces.

We are not using DownloadSelectionCache anymore with segment repairer.

https://github.com/storj/storj/issues/5998

Change-Id: I236a1926e21f13df4cdedc91130352d37ff97e18
2023-06-28 16:53:51 +00:00
Jeremy Wharton
ec780003f0 web/satellite: add responsiveness to upgrade notification
The upgrade notification has been updated to adapt to mobile screens
accordance with our designs.

Additionally, an issue where the notification would display "0B free
included" when displayed in the All Projects Dashboard has been fixed.

Change-Id: Ic13b9426ab5d6529c9d7b2ad8446a17da74905b1
2023-06-28 09:34:53 -05:00
Michal Niewrzal
e129841130 satellite/metabase: remove AOST from deleteInactiveObjectsAndSegments
By mistake AOST was added to query in deleteInactiveObjectsAndSegments
in DeleteZombieObjects. Delete statement is not supporting it.
Unfortunately unit tests didn't cover this case. This change removes
AOST from mentioned method and it adding AOST cases to unit tests.

Change-Id: Ib7f65134290df08c490c96b7e367d12f497a3373
2023-06-28 13:24:14 +00:00
Márton Elek
683f0c493f earthfile: build satellite-admin for nightly
Change-Id: Iafe7fe1867178517f359ca61d2178a68d1430894
2023-06-28 12:06:41 +00:00
Sean Harvey
b1523f82c8
satellite/admin: add owner full name on /api/apikeys/{apikey} endpoint
Updates storj/gateway-mt#321

Change-Id: I6759ec5dbba49261bb183e42d8cb333c326cb9e8
2023-06-28 16:01:15 +12:00
paul cannon
d06e4589ae bump storj.io/private
Change-Id: I0b773fe140bad485fc323701928eaf246638032c
2023-06-27 13:29:54 -05:00
Jeremy Wharton
7d44f99ce6 web/satellite: add My Projects button to All Projects Dashboard
This change adds a button to the header of the All Projects Dashboard
that navigates the user to the My Projects page.

Resolves #5996

Change-Id: Ie467e22c9039cf30fda1b8d815c1d6269f5ddf4f
2023-06-27 16:29:34 +00:00
Wilfred Asomani
30d0094c43 satellite/console: prevent unauthorized project mutation
This change further restricts projects members from modifying project
details by restricting the project edit graphql mutation; making it
check if the user performing the operation is the owner of the project.

Change-Id: Iaf10d16269ddc29437d3d5629db06e20cea3004e
2023-06-27 15:57:09 +00:00
Jeremy Wharton
bcce6023c3 web/satellite: show custom invitation text in registration page
This change makes the registration page to display custom text for
users that have been invited to a project.

References #5353

Change-Id: Ib20760f79ef29327b66316817010ca1dc00ff2ce
2023-06-27 15:15:43 +00:00
Vitalii
9374edfac9 web/satellite: added dropzone styling for object browser
Added greyed out dropzone styling for object browser.

Issue:
https://github.com/storj/storj/issues/5970

Change-Id: I9770a9d3fb90f6aaf659885f3c3cafed7af89e1d
2023-06-27 17:17:15 +03:00
Michal Niewrzal
98f4f249b2 satellite/overlay: refactor KnownReliable to be used with repairer
Currently we are using KnownUnreliableOrOffline to get missing pieces
for segment repairer (GetMissingPieces). The issue is that now repairer
is looking at more things than just missing pieces (clumped/off
placement pieces).

KnownReliable was refactored to get data (e.g. country, lastNet) about
all reliable nodes from provided list. List is split into online and
offline. This way we will be able to use results from this method to all
checks: missing pieces, clumped pieces, out of placement pieces.

This this first part of changes to handle different kind of pieces in
segment repairer.

https://github.com/storj/storj/issues/5998

Change-Id: I6cbaf59cff9d6c4346ace75bb814ccd985c0e43e
2023-06-27 13:27:23 +02:00
Egon Elbre
049953a7ce go.mod: bump storj.io/uplink
Change-Id: Ib535cb50195c5a0e6197e83e2e05865ba95bcaf8
2023-06-26 13:51:13 +00:00
Wilfred Asomani
79eb71841d satellite/{web,analytics}: add row actions to project members
This change adds row actions (delete,reinvite,copy) to the project
members table. It also adds analytics events for the actions.

Issue: #5762
Also fixes: #5941

Change-Id: I7fb7f88c7bd5ac2ce3e3d00530af4708ff220bd7
2023-06-26 12:45:02 +00:00
andriikotko
0421ef2fa1
docs/testplan: add testplan for inviting project members (#5811) 2023-06-26 13:20:16 +02:00
Jeremy Wharton
99f4a34a1d web/satellite: add button for resending expired project invitations
A button has been added to the Team page for resending expired project
member invitations. It appears when one or more of such invitations
have been selected.

Additionally, styling for certain search fields and the Team page's
header has been updated to align more closely with our designs.

Resolves #5752

Change-Id: I623fed5f50e60beca2f82136f8771dde5aa684f4
2023-06-23 23:46:15 -05:00
Jeremy Wharton
2ae75bcf4e satellite/console: prevent invite replies from deleting expired invites
This change prevents project member invitation responses from deleting
expired project invitations. Previously, accepting or declining an
expired invitation cause it to be destroyed.

References #5752

Change-Id: Id3917fb825bffc3e8a262d5b541b907678db1809
2023-06-23 19:05:52 -05:00
Jeremy Wharton
80c5a628cb satellite/console/dbcleanup: remove project invite cleanup
This reverts 9c75316 which allowed the satellite console DB cleanup
chore to delete expired project member invitations. We now want such
invitations to be accessible indefinitely.

References #5752

Change-Id: I489a7e19df825dd14376d3d260b70b3eef643e03
2023-06-23 21:15:36 +00:00
Jeremy Wharton
22f8b029b9 satellite/console: fix transaction error when inviting project members
The SQL transaction that inserted project invitations relied on the
error result of one of its statements in order to determine whether an
invitation should be updated. This was inappropriate since any errors
returned from a transaction statement should end the transaction
immediately. This change resolves that issue.

Change-Id: I354e430df293054d8583fb4faa5dc1bcf9053836
2023-06-23 20:17:37 +00:00
Wilfred Asomani
1b912ec167 satellite/{web,analytics}: add segment events for passphrase modals
This change sends new passphrase created event for when passphrase is
created with the method by which it was; entered/generated

Issue: #5918

Change-Id: Ib485b6ff7a968d4c84bf124e14c14c91478f0dfb
2023-06-23 18:30:46 +00:00
Moby von Briesen
7530a3a83d satellite/console: add CORS middleware to satellite UI and API
Add some basic handling to set cross-origin resource sharing headers for
the satellite UI app handler as well as API endpoints used by the
satellite UI.

This change also removes some no-longer-necessary CORS functionality on
the account registration endpoint. Previously, these CORS headers were
used to enable account registration cross-origin from www.storj.io.
However, we have since removed the ability to sign up via www.storj.io.

With these changes, browsers will prevent any requests to the affected
endpoints, unless the browser is making the request from the same host
as the satellite.

see https://github.com/storj/storj-private/issues/242

Change-Id: Ifd98be4a142a2e61e26392d97242d911e051fe8a
2023-06-23 17:46:44 +00:00
Wilfred Asomani
361f9fdba5 web/satellite: prevent unauthorized access to project settings page
This change further restricts projects members from accessing the
projects settings page by navigating to (all) projects dashboard when
/edit-project-details is visited or project is switched.
It also applies a white background to the project ownership tag to
improve contrast and visibility.

Change-Id: Ib855c4e3aa4be7ec9ec1e9b312041118442358ad
2023-06-23 17:02:07 +00:00
Wilfred Asomani
9d4787f5e7 web/satellite: use correct color for projects table icons
This change uses the correct color corresponding to the role of a user
on a project.

Change-Id: Ibd8f9ccae4486a8039f77bae5c2533b060e73be9
2023-06-23 15:40:21 +00:00
Jeremy Wharton
265ac599c7 satellite/analytics: register gallery view click event
This change registers the "Gallery View Clicked" analytics event on the
satellite's side. Previously, the event reported from the satellite
frontend would be ignored because it was not present in the satellite's
event whitelist.

References #5824

Change-Id: I636ace6a21b2738431d0c1f5adb9a16c4e177188
2023-06-23 13:58:29 +00:00
Michal Niewrzal
eb407b2ae3 satellite/overlay: delete unused KnownOffline method
Change-Id: Ief9288fee83f9c381dd7840f48333babcd3d6bf7
2023-06-23 13:24:30 +00:00
Wilfred Asomani
40ca3d3609 web/satellite: hide project settings option for members
This change follows up on 8f7c59d to hide project settings option on
the all projects dashboard table for members.

Change-Id: I0ac246e0f6018d7b3028b68439049df3081fce29
2023-06-23 12:02:46 +00:00
Wilfred Asomani
74b3617813 Revert "satellite/db: fix long loadtime for charges endpoint"
This reverts commit 676178299f.

Reason for revert:
The new query used by this commit performs a full table scan.
It's been reverted pending a fix for that.

Change-Id: Idc53954459aa6f5a692056232b8674b11d1928ce
2023-06-23 09:32:33 +00:00
Paul Willoughby
3180e09750 satellite/metainfo: increase default MaxEncryptedObjectKeyLength
Allow a longer encrypted key length to reduce 'key length is too big'
errors in gateway-mt.  Gateway is enforcing an unencrypted key length
of 1024 bytes but when encrypted some keys are exceeding the current
limit.

Updates https://github.com/storj/gateway-mt/issues/335

Change-Id: Ib02e2064c42e96b9d59936905832d8dd6068d2c7
2023-06-22 22:59:52 +00:00
Cameron
37e7eeb0e9 satellite/payments/accountfreeze: set grace period default to 15 days
Change-Id: Ied8f3758b579b83ebf04cba0fde9715c689bac4f
2023-06-22 18:53:03 +00:00
Clement Sam
1166fdfbab satellite/gc: add piece tracker ranged loop observer
Resolves https://github.com/storj/storj/issues/5798

Change-Id: I6fe2c57b3a247b085026feb8bee60c2d002db71b
2023-06-22 18:17:39 +00:00
Michal Niewrzal
2b2bca8e81 satellite/accounting/tally: save tallies in a batches
Because we are saving all tallies as a single SQL statement we finally
reached maximum message size. With this change we will call SaveTallies multiple times in batches.

https://github.com/storj/storj/issues/5977

Change-Id: I0c7dd27779b1743ede66448fb891e65c361aa3b0
2023-06-22 17:02:26 +00:00
paul cannon
355ea2133b satellite/audit: remove pieces when audits fail
When pieces fail an audit (hard fail, meaning the node acknowledged it
did not have the piece or the piece was corrupted), we will now remove
those pieces from the segment.

Previously, we did not do this, and some node operators were seeing the
same missing piece audited over and over again and losing reputation
every time.

This change will include both verification and reverification audits. It
will also apply to pieces found to be bad during repair, if
repair-to-reputation reporting is enabled.

Change-Id: I0ca7af7e3fecdc0aebbd34fee4be3a0eab53f4f7
2023-06-22 14:19:00 +00:00
Michal Niewrzal
203c6be25f satellite/repair/repairer: test repairing geofenced segment
Additional test case to cover situation where we are trying to
repair segment with specific placement set. We need to be sure
that segment won't be repaired into nodes that are outside
segment placement, even if that means that repair will fail.

Change-Id: I99d238aa9d9b9606eaf89cd1cf587a2585faee91
2023-06-22 13:21:05 +00:00
Michal Niewrzal
9e3fd4d514 satellite/overlay: delete unused method
Change-Id: I87828fcac4f4a9fb08c86af188aa6ea28c5c64af
2023-06-22 12:45:59 +00:00
Sean Harvey
ad9b0dfc40 satellite/admin: fix paid tier field on /api/apikeys/{apikey} endpoint
the field was not being set, so it would always show false in the
response whether the user was paid tier or not.

Updates storj/gateway-mt#321

Change-Id: I00ae751ac9118fc05b3c22bc6d2cb9600d0faaaf
2023-06-22 11:12:56 +00:00
Jeremy Wharton
25c21f03c3 web/satellite: add project role for expired invitations
Pending project members whose invitations have expired now appear with
the "Invite Expired" role in the Team page.

References #5752

Change-Id: Ic91289618ee02e65de29e986fa3205eccf39b267
2023-06-22 06:27:37 +00:00
Vitalii
adbd4fbab2 web/satellite: applied styling updates for gallery view
Add "geographic distribution" item into three-dot menu.
On small screens, instead of showing three-dot menu + geographic distribution, show three-dot menu + download.
Allow the user to use left and right arrow keys to navigate through previews in gallery view.
Update "Do not share download link with other people" notification according to new designs.
Add hover styles and tooltips to icons according to designs.
In the "View Details" modal, change "Saved in" to "Bucket".
(not related to gallery view) - the three-dot-menu in the file list has a border radius, but when you hover over the last component ("Delete"), the border radius goes away.

Issue:
https://github.com/storj/storj/issues/5968

Change-Id: I39bec2e630327d136cb2550dbbce4fcbf77399f9
2023-06-21 21:25:48 +00:00
Vitalii
1712e69f0c web/satellite: add data dimension label to charts on project dashboard
Added data dimension label to chart y-axis on project dashboard

Issue:
https://github.com/storj/storj/issues/5926

Change-Id: I03a3a12211b1a20f76e3a8291f349675956ff598
2023-06-21 20:30:02 +00:00
Lizzy Thomson
05f92fed11 web/satellite: show full access credentials when not hidden
Show full AG, API key or S3 credentials when not hidden

Issue https://github.com/storj/storj/issues/5848

Change-Id: I9e0903ed34b6f0068e9cef7f048553441ed98fc3
2023-06-21 19:41:26 +00:00
135 changed files with 3502 additions and 12909 deletions

View File

@ -37,6 +37,13 @@ satellite-web:
SAVE ARTIFACT dist AS LOCAL web/satellite/dist SAVE ARTIFACT dist AS LOCAL web/satellite/dist
SAVE ARTIFACT static AS LOCAL web/satellite/static SAVE ARTIFACT static AS LOCAL web/satellite/static
satellite-admin:
FROM node:16
WORKDIR /build
COPY satellite/admin/ui .
RUN ./build.sh
SAVE ARTIFACT build AS LOCAL satellite/admin/ui/build
storagenode-bin: storagenode-bin:
COPY go.mod go.mod COPY go.mod go.mod
COPY go.sum go.sum COPY go.sum go.sum
@ -112,6 +119,7 @@ build-tagged-image:
FROM img.dev.storj.io/storjup/base:20230208-1 FROM img.dev.storj.io/storjup/base:20230208-1
COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist
COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist
COPY +satellite-admin/build /app/satellite-admin/
COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static
COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist
COPY +storagenode-web/static /var/lib/storj/storj/web/storagenode/static COPY +storagenode-web/static /var/lib/storj/storj/web/storagenode/static

View File

@ -208,7 +208,14 @@ var (
Long: "Applies free tier coupon to Stripe customers without a coupon", Long: "Applies free tier coupon to Stripe customers without a coupon",
RunE: cmdApplyFreeTierCoupons, RunE: cmdApplyFreeTierCoupons,
} }
createCustomerBalanceInvoiceItems = &cobra.Command{ setInvoiceStatusCmd = &cobra.Command{
Use: "set-invoice-status [start-period] [end-period] [status]",
Short: "set all open invoices status",
Long: "set all open invoices in the specified date ranges to the provided status. Period is a UTC date formatted like YYYY-MM.",
Args: cobra.ExactArgs(3),
RunE: cmdSetInvoiceStatus,
}
createCustomerBalanceInvoiceItemsCmd = &cobra.Command{
Use: "create-balance-invoice-items", Use: "create-balance-invoice-items",
Short: "Creates stripe invoice line items for stripe customer balance", Short: "Creates stripe invoice line items for stripe customer balance",
Long: "Creates stripe invoice line items for stripe customer balances obtained from past invoices and other miscellaneous charges.", Long: "Creates stripe invoice line items for stripe customer balances obtained from past invoices and other miscellaneous charges.",
@ -342,6 +349,9 @@ var (
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"` Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
Before string `help:"select only exited nodes before this UTC date formatted like YYYY-MM. Date cannot be newer than the current time (required)"` Before string `help:"select only exited nodes before this UTC date formatted like YYYY-MM. Date cannot be newer than the current time (required)"`
} }
setInvoiceStatusCfg struct {
DryRun bool `help:"do not update stripe" default:"false"`
}
confDir string confDir string
identityDir string identityDir string
@ -381,7 +391,8 @@ func init() {
compensationCmd.AddCommand(recordPeriodCmd) compensationCmd.AddCommand(recordPeriodCmd)
compensationCmd.AddCommand(recordOneOffPaymentsCmd) compensationCmd.AddCommand(recordOneOffPaymentsCmd)
billingCmd.AddCommand(applyFreeTierCouponsCmd) billingCmd.AddCommand(applyFreeTierCouponsCmd)
billingCmd.AddCommand(createCustomerBalanceInvoiceItems) billingCmd.AddCommand(setInvoiceStatusCmd)
billingCmd.AddCommand(createCustomerBalanceInvoiceItemsCmd)
billingCmd.AddCommand(prepareCustomerInvoiceRecordsCmd) billingCmd.AddCommand(prepareCustomerInvoiceRecordsCmd)
billingCmd.AddCommand(createCustomerProjectInvoiceItemsCmd) billingCmd.AddCommand(createCustomerProjectInvoiceItemsCmd)
billingCmd.AddCommand(createCustomerInvoicesCmd) billingCmd.AddCommand(createCustomerInvoicesCmd)
@ -413,7 +424,9 @@ func init() {
process.Bind(reportsVerifyGEReceiptCmd, &reportsVerifyGracefulExitReceiptCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(reportsVerifyGEReceiptCmd, &reportsVerifyGracefulExitReceiptCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(partnerAttributionCmd, &partnerAttribtionCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(partnerAttributionCmd, &partnerAttribtionCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(applyFreeTierCouponsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(applyFreeTierCouponsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerBalanceInvoiceItems, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(setInvoiceStatusCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(setInvoiceStatusCmd, &setInvoiceStatusCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerBalanceInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(createCustomerProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir)) process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
@ -754,6 +767,30 @@ func cmdValueAttribution(cmd *cobra.Command, args []string) (err error) {
return reports.GenerateAttributionCSV(ctx, partnerAttribtionCfg.Database, start, end, userAgents, file) return reports.GenerateAttributionCSV(ctx, partnerAttribtionCfg.Database, start, end, userAgents, file)
} }
// cmdSetInvoiceStatus sets the status of all open invoices within the provided period to the provided status.
// args[0] is the start of the period in YYYY-MM format.
// args[1] is the end of the period in YYYY-MM format.
// args[2] is the status to set the invoices to.
func cmdSetInvoiceStatus(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd)
periodStart, err := parseYearMonth(args[0])
if err != nil {
return err
}
periodEnd, err := parseYearMonth(args[1])
if err != nil {
return err
}
// parseYearMonth returns the first day of the month, but we want the period end to be the last day of the month
periodEnd = periodEnd.AddDate(0, 1, -1)
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
return payments.SetInvoiceStatus(ctx, periodStart, periodEnd, args[2], setInvoiceStatusCfg.DryRun)
})
}
func cmdCreateCustomerBalanceInvoiceItems(cmd *cobra.Command, _ []string) (err error) { func cmdCreateCustomerBalanceInvoiceItems(cmd *cobra.Command, _ []string) (err error) {
ctx, _ := process.Ctx(cmd) ctx, _ := process.Ctx(cmd)

View File

@ -0,0 +1,38 @@
# Billing Page Testplan
 
## Background
This testplan is going to cover the new Billing Page. It will go over the figma design seen here - [Billing Page](https://www.figma.com/file/HlmasFJNHxs2lzGerq3WYH/Satellite-GUI-Public?node-id=11080%3A68109)
 
 
| Test Scenario | Test Cases | Description | Comments |
|--------------------------------------|--------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
| | | | |
| Roles behaviour | 1. Owner role. | Can invite, remove members. Make all project operations (upload/list/download/delete/generate/accesses). Update project info and project limits. | |
| | 2. Member role. | Can make all project operations (upload/list/download/delete/generate/accesses). Project member shouldn't see project in the billing screen | |
| | 3. Invited role. | This role signifies that the member has not accepted their invitation and cannot interact with the project in any way. | |
| Adding and removing Project Members. | 4. Adding member who has an account | If an invited member already has an activated user account on the project's satellite, the invitation email will contain a link that directs them to the satellite's login page. | |
| | 5. Adding member who doesn't have an account | If the member does not have an account, the invitation emails link will direct them to the registration page. | |
| | 6. Adding member who has not activated account | If the invited members account has not been activated, the invitation email will contain an account activation link and a message informing them that they will need to activate their account before they can accept the invitation. | |
| | 7. Security. Existed vs unexisted user invitation | Invite an existing user vs inviting an none existing user. For security reasons the behavior should be the same. If a user exists the invite should look the same. That also means at that point we can't display the user name and have to stick with the email name. | |
| | 8. User's name showing. User who accept invite vs User who doesn't | For user who hasn't accepted invite - we can't display the user name and have to stick with the email name. After invite was accepted the list should show the full name of the customer. | |
| | 9. Invite a person who already a member | Invite a person twice after the first invite was already accepted. -> Show error message about user already a member | |
| | 10. Resend invitation | Invite a person twice without the first invite beeing accepted. -> Show info message about duplicate | |
| | 11. Invitation token expiration | Invite token should have an expiration date. It should be as low as account activation. Maybe a full week would be a good balance. | |
| | 12. Token inactivation after resending | What should happen if Bob accepts the first invite but rejects the second invite (reminder email)? | |
| | 13. Token inactivation after removing | Alice invites Bob, Bob has not accepted the invite yet, Alice deletes the invite, Bob tries to accept the invite. Which error message do we show Bob? Should Alice deleting a project member also send out a notification email? | |
| | 14. Invite after removing | Alice removes Bob from her project and after that sends out a new invite. | |
| | 15. Invitation email - special scenario | Bob creates a user with the normal signup process, Bob doesn't confirm the activation email, Alice sends an invite. Which email do we send? According to the google doc we would send the account creation email but that shouldn't work here because there is already an account in our DB just not activated yet. Maybe just login the user and show him the invite instead of the signup process | |
| | 16. Invite rejection after creating acc | User create account but reject invite. Should they see an empty All project Dashboard? | |
| Billing | 17. Billing estimation | Only Owner can see billing estimation, member can't. Security -> try send API request for estimation https://satellite.qa.storj.io/api/v0/payments/account/charges?from=1680307200&to=168207756 with Member's token | |
| | 18. Invoices | Project is added to invoice only for Owner, not for member | |
| Functional | 19. Search | Search by name & email fields | |
| | 20. Sorting | Sort by name, date added (email?) | |
| | 21. Paginator | Amount of pages should be calculated correct | |
| | 22. Drop-list for chosing amount of rows | Check when change rows amount -> amount of pages changes | |
| | 23. Remove user 2 ways | Should be the same behaviour with user email confirmation | |
| | 24. Resend invite 2 ways | Should be called the same endpoints for inviting users | |

4
go.mod
View File

@ -63,8 +63,8 @@ require (
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/drpc v0.0.33 storj.io/drpc v0.0.33
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
storj.io/private v0.0.0-20230614131149-2ffd1635adea storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
) )
require ( require (

8
go.sum
View File

@ -1022,7 +1022,7 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs= storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0= storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0= storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg= storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ= storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=

View File

@ -6,6 +6,7 @@ storj.io/storj/satellite/accounting."bucket_segments" IntVal
storj.io/storj/satellite/accounting."total_bytes" IntVal storj.io/storj/satellite/accounting."total_bytes" IntVal
storj.io/storj/satellite/accounting."total_objects" IntVal storj.io/storj/satellite/accounting."total_objects" IntVal
storj.io/storj/satellite/accounting."total_segments" IntVal storj.io/storj/satellite/accounting."total_segments" IntVal
storj.io/storj/satellite/accounting/tally."bucket_tally_error" Event
storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal storj.io/storj/satellite/accounting/tally."nodetallies.totalsum" IntVal
storj.io/storj/satellite/audit."audit_contained_nodes" IntVal storj.io/storj/satellite/audit."audit_contained_nodes" IntVal
storj.io/storj/satellite/audit."audit_contained_nodes_global" Meter storj.io/storj/satellite/audit."audit_contained_nodes_global" Meter

View File

@ -1098,8 +1098,8 @@ func TestProjectUsage_BandwidthDeadAllocation(t *testing.T) {
total, err := io.ReadFull(reader, p) total, err := io.ReadFull(reader, p)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, total, len(p)) require.Equal(t, total, len(p))
require.NoError(t, cleanFn())
require.NoError(t, reader.Close()) require.NoError(t, reader.Close())
require.NoError(t, cleanFn())
planet.Satellites[0].Orders.Chore.Loop.TriggerWait() planet.Satellites[0].Orders.Chore.Loop.TriggerWait()

View File

@ -26,10 +26,11 @@ var (
// Config contains configurable values for the tally service. // Config contains configurable values for the tally service.
type Config struct { type Config struct {
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s" testDefault:"$TESTINTERVAL"` Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s" testDefault:"$TESTINTERVAL"`
SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"` SaveRollupBatchSize int `help:"how large of batches SaveRollup should process at a time" default:"1000"`
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"` ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
UseRangedLoop bool `help:"whether to enable node tally with ranged loop" default:"true"` UseRangedLoop bool `help:"whether to enable node tally with ranged loop" default:"true"`
SaveTalliesBatchSize int `help:"how large should be insert into tallies" default:"10000"`
ListLimit int `help:"how many buckets to query in a batch" default:"2500"` ListLimit int `help:"how many buckets to query in a batch" default:"2500"`
AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"` AsOfSystemInterval time.Duration `help:"as of system interval" releaseDefault:"-5m" devDefault:"-1us" testDefault:"-1us"`
@ -75,6 +76,8 @@ func (service *Service) Run(ctx context.Context) (err error) {
err := service.Tally(ctx) err := service.Tally(ctx)
if err != nil { if err != nil {
service.log.Error("tally failed", zap.Error(err)) service.log.Error("tally failed", zap.Error(err))
mon.Event("bucket_tally_error") //mon:locked
} }
return nil return nil
}) })
@ -198,45 +201,65 @@ func (service *Service) Tally(ctx context.Context) (err error) {
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)
} }
finishTime := service.nowFn()
if len(collector.Bucket) == 0 {
return nil
}
// save the new results // save the new results
var errAtRest error var errAtRest errs.Group
if len(collector.Bucket) > 0 {
// record bucket tallies to DB
err = service.projectAccountingDB.SaveTallies(ctx, finishTime, collector.Bucket)
if err != nil {
errAtRest = Error.New("ProjectAccounting.SaveTallies failed: %v", err)
}
updateLiveAccountingTotals(projectTotalsFromBuckets(collector.Bucket)) // record bucket tallies to DB
// TODO we should be able replace map with just slice
intervalStart := service.nowFn()
buffer := map[metabase.BucketLocation]*accounting.BucketTally{}
for location, tally := range collector.Bucket {
buffer[location] = tally
if len(buffer) >= service.config.SaveTalliesBatchSize {
// don't stop on error, we would like to store as much as possible
errAtRest.Add(service.flushTallies(ctx, intervalStart, buffer))
for key := range buffer {
delete(buffer, key)
}
}
} }
if len(collector.Bucket) > 0 { errAtRest.Add(service.flushTallies(ctx, intervalStart, buffer))
var total accounting.BucketTally
// TODO for now we don't have access to inline/remote stats per bucket
// but that may change in the future. To get back those stats we would
// most probably need to add inline/remote information to object in
// metabase. We didn't decide yet if that is really needed right now.
for _, bucket := range collector.Bucket {
monAccounting.IntVal("bucket_objects").Observe(bucket.ObjectCount) //mon:locked
monAccounting.IntVal("bucket_segments").Observe(bucket.Segments()) //mon:locked
// monAccounting.IntVal("bucket_inline_segments").Observe(bucket.InlineSegments) //mon:locked
// monAccounting.IntVal("bucket_remote_segments").Observe(bucket.RemoteSegments) //mon:locked
monAccounting.IntVal("bucket_bytes").Observe(bucket.Bytes()) //mon:locked updateLiveAccountingTotals(projectTotalsFromBuckets(collector.Bucket))
// monAccounting.IntVal("bucket_inline_bytes").Observe(bucket.InlineBytes) //mon:locked
// monAccounting.IntVal("bucket_remote_bytes").Observe(bucket.RemoteBytes) //mon:locked var total accounting.BucketTally
total.Combine(bucket) // TODO for now we don't have access to inline/remote stats per bucket
} // but that may change in the future. To get back those stats we would
monAccounting.IntVal("total_objects").Observe(total.ObjectCount) //mon:locked // most probably need to add inline/remote information to object in
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked // metabase. We didn't decide yet if that is really needed right now.
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked for _, bucket := range collector.Bucket {
monAccounting.IntVal("total_pending_objects").Observe(total.PendingObjectCount) monAccounting.IntVal("bucket_objects").Observe(bucket.ObjectCount) //mon:locked
monAccounting.IntVal("bucket_segments").Observe(bucket.Segments()) //mon:locked
// monAccounting.IntVal("bucket_inline_segments").Observe(bucket.InlineSegments) //mon:locked
// monAccounting.IntVal("bucket_remote_segments").Observe(bucket.RemoteSegments) //mon:locked
monAccounting.IntVal("bucket_bytes").Observe(bucket.Bytes()) //mon:locked
// monAccounting.IntVal("bucket_inline_bytes").Observe(bucket.InlineBytes) //mon:locked
// monAccounting.IntVal("bucket_remote_bytes").Observe(bucket.RemoteBytes) //mon:locked
total.Combine(bucket)
} }
monAccounting.IntVal("total_objects").Observe(total.ObjectCount) //mon:locked
monAccounting.IntVal("total_segments").Observe(total.Segments()) //mon:locked
monAccounting.IntVal("total_bytes").Observe(total.Bytes()) //mon:locked
monAccounting.IntVal("total_pending_objects").Observe(total.PendingObjectCount)
// return errors if something went wrong. return errAtRest.Err()
return errAtRest }
func (service *Service) flushTallies(ctx context.Context, intervalStart time.Time, tallies map[metabase.BucketLocation]*accounting.BucketTally) error {
err := service.projectAccountingDB.SaveTallies(ctx, intervalStart, tallies)
if err != nil {
return Error.New("ProjectAccounting.SaveTallies failed: %v", err)
}
return nil
} }
// BucketTallyCollector collects and adds up tallies for buckets. // BucketTallyCollector collects and adds up tallies for buckets.

View File

@ -346,7 +346,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
}, },
} }
findTally := func(bucket string, tallies []accounting.BucketTally) accounting.BucketTally { findTally := func(t *testing.T, bucket string, tallies []accounting.BucketTally) accounting.BucketTally {
for _, v := range tallies { for _, v := range tallies {
if v.BucketName == bucket { if v.BucketName == bucket {
return v return v
@ -378,7 +378,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
tallies, err := planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx) tallies, err := planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
require.NoError(t, err) require.NoError(t, err)
lastTally := findTally(tc.name, tallies) lastTally := findTally(t, tc.name, tallies)
require.Equal(t, tc.name, lastTally.BucketName) require.Equal(t, tc.name, lastTally.BucketName)
require.Equal(t, tc.expectedTallyAfterCopy.ObjectCount, lastTally.ObjectCount) require.Equal(t, tc.expectedTallyAfterCopy.ObjectCount, lastTally.ObjectCount)
require.Equal(t, tc.expectedTallyAfterCopy.TotalBytes, lastTally.TotalBytes) require.Equal(t, tc.expectedTallyAfterCopy.TotalBytes, lastTally.TotalBytes)
@ -392,7 +392,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
tallies, err = planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx) tallies, err = planet.Satellites[0].DB.ProjectAccounting().GetTallies(ctx)
require.NoError(t, err) require.NoError(t, err)
lastTally = findTally(tc.name, tallies) lastTally = findTally(t, tc.name, tallies)
require.Equal(t, tc.name, lastTally.BucketName) require.Equal(t, tc.name, lastTally.BucketName)
require.Equal(t, tc.expectedTallyAfterDelete.ObjectCount, lastTally.ObjectCount) require.Equal(t, tc.expectedTallyAfterDelete.ObjectCount, lastTally.ObjectCount)
require.Equal(t, tc.expectedTallyAfterDelete.TotalBytes, lastTally.TotalBytes) require.Equal(t, tc.expectedTallyAfterDelete.TotalBytes, lastTally.TotalBytes)
@ -402,7 +402,7 @@ func TestTallyOnCopiedObject(t *testing.T) {
}) })
} }
func TestTallyBatchSize(t *testing.T) { func TestBucketTallyCollectorListLimit(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{ Reconfigure: testplanet.Reconfigure{
@ -454,3 +454,58 @@ func TestTallyBatchSize(t *testing.T) {
} }
}) })
} }
func TestTallySaveTalliesBatchSize(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ProjectLimits.MaxBuckets = 23
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Accounting.Tally.Loop.Pause()
projectID := planet.Uplinks[0].Projects[0].ID
numberOfBuckets := 23
expectedBucketLocations := []metabase.BucketLocation{}
for i := 0; i < numberOfBuckets; i++ {
data := testrand.Bytes(1*memory.KiB + memory.Size(i))
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket"+strconv.Itoa(i), "test", data)
require.NoError(t, err)
expectedBucketLocations = append(expectedBucketLocations, metabase.BucketLocation{
ProjectID: projectID,
BucketName: "bucket" + strconv.Itoa(i),
})
}
satellite := planet.Satellites[0]
for _, batchSize := range []int{1, 2, 3, numberOfBuckets, 29, planet.Satellites[0].Config.Tally.SaveTalliesBatchSize} {
config := satellite.Config.Tally
config.SaveTalliesBatchSize = batchSize
tally := tally.New(zaptest.NewLogger(t), satellite.DB.StoragenodeAccounting(), satellite.DB.ProjectAccounting(),
satellite.LiveAccounting.Cache, satellite.Metabase.DB, satellite.DB.Buckets(), config)
// collect and store tallies in DB
err := tally.Tally(ctx)
require.NoError(t, err)
// verify we have in DB expected list of tallies
tallies, err := satellite.DB.ProjectAccounting().GetTallies(ctx)
require.NoError(t, err)
_, err = satellite.DB.Testing().RawDB().ExecContext(ctx, "DELETE FROM bucket_storage_tallies")
require.NoError(t, err)
bucketLocations := []metabase.BucketLocation{}
for _, tally := range tallies {
bucketLocations = append(bucketLocations, tally.BucketLocation)
}
require.ElementsMatch(t, expectedBucketLocations, bucketLocations)
}
})
}

View File

@ -422,6 +422,7 @@ A successful response body:
}, },
"owner": { "owner": {
"id": "12345678-1234-1234-1234-123456789abc", "id": "12345678-1234-1234-1234-123456789abc",
"fullName": "test user",
"email": "bob@example.test", "email": "bob@example.test",
"paidTier": true "paidTier": true
} }

View File

@ -164,6 +164,7 @@ func (server *Server) getAPIKey(w http.ResponseWriter, r *http.Request) {
} }
type ownerData struct { type ownerData struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
FullName string `json:"fullName"`
Email string `json:"email"` Email string `json:"email"`
PaidTier bool `json:"paidTier"` PaidTier bool `json:"paidTier"`
} }
@ -183,8 +184,10 @@ func (server *Server) getAPIKey(w http.ResponseWriter, r *http.Request) {
Name: project.Name, Name: project.Name,
}, },
Owner: ownerData{ Owner: ownerData{
ID: user.ID, ID: user.ID,
Email: user.Email, FullName: user.FullName,
Email: user.Email,
PaidTier: user.PaidTier,
}, },
}) })
if err != nil { if err != nil {

View File

@ -264,9 +264,36 @@ func TestAPIKeyManagementGet(t *testing.T) {
}, },
}, },
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
user, err := planet.Satellites[0].AddUser(ctx, console.CreateUser{
FullName: "testuser123",
Email: "test@email.com",
}, 1)
require.NoError(t, err)
project, err := planet.Satellites[0].AddProject(ctx, user.ID, "testproject")
require.NoError(t, err)
secret, err := macaroon.NewSecret()
require.NoError(t, err)
apiKey, err := macaroon.NewAPIKey(secret)
require.NoError(t, err)
apiKeyInfo, err := planet.Satellites[0].DB.Console().APIKeys().Create(ctx, apiKey.Head(), console.APIKeyInfo{
Name: "testkey",
ProjectID: project.ID,
Secret: secret,
})
require.NoError(t, err)
userCtx, err := planet.Satellites[0].UserContext(ctx, user.ID)
require.NoError(t, err)
_, err = planet.Satellites[0].API.Console.Service.Payments().AddCreditCard(userCtx, "test")
require.NoError(t, err)
address := planet.Satellites[0].Admin.Admin.Listener.Addr() address := planet.Satellites[0].Admin.Admin.Listener.Addr()
apikey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()] link := fmt.Sprintf("http://"+address.String()+"/api/apikeys/%s", apiKey.Serialize())
link := fmt.Sprintf("http://"+address.String()+"/api/apikeys/%s", apikey.Serialize())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, link, nil) req, err := http.NewRequestWithContext(ctx, http.MethodGet, link, nil)
require.NoError(t, err) require.NoError(t, err)
@ -288,6 +315,7 @@ func TestAPIKeyManagementGet(t *testing.T) {
} }
type ownerData struct { type ownerData struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
FullName string `json:"fullName"`
Email string `json:"email"` Email string `json:"email"`
PaidTier bool `json:"paidTier"` PaidTier bool `json:"paidTier"`
} }
@ -300,29 +328,21 @@ func TestAPIKeyManagementGet(t *testing.T) {
var apiResp response var apiResp response
require.NoError(t, json.NewDecoder(resp.Body).Decode(&apiResp)) require.NoError(t, json.NewDecoder(resp.Body).Decode(&apiResp))
apiKeyInfo, err := planet.Satellites[0].DB.Console().APIKeys().GetByHead(ctx, apikey.Head())
require.NoError(t, err)
project, err := planet.Satellites[0].DB.Console().Projects().Get(ctx, apiKeyInfo.ProjectID)
require.NoError(t, err)
owner, err := planet.Satellites[0].DB.Console().Users().Get(ctx, project.OwnerID)
require.NoError(t, err)
require.Equal(t, response{ require.Equal(t, response{
APIKey: apiKeyData{ APIKey: apiKeyData{
ID: apiKeyInfo.ID, ID: apiKeyInfo.ID,
Name: apiKeyInfo.Name, Name: "testkey",
CreatedAt: apiKeyInfo.CreatedAt.UTC(), CreatedAt: apiKeyInfo.CreatedAt.UTC(),
}, },
Project: projectData{ Project: projectData{
ID: project.ID, ID: project.ID,
Name: project.Name, Name: "testproject",
}, },
Owner: ownerData{ Owner: ownerData{
ID: owner.ID, ID: user.ID,
Email: owner.Email, FullName: "testuser123",
PaidTier: owner.PaidTier, Email: "test@email.com",
PaidTier: true,
}, },
}, apiResp) }, apiResp)
}) })

View File

@ -88,6 +88,10 @@ const (
eventExpiredCreditRemoved = "Expired Credit Removed" eventExpiredCreditRemoved = "Expired Credit Removed"
eventProjectInvitationAccepted = "Project Invitation Accepted" eventProjectInvitationAccepted = "Project Invitation Accepted"
eventProjectInvitationDeclined = "Project Invitation Declined" eventProjectInvitationDeclined = "Project Invitation Declined"
eventGalleryViewClicked = "Gallery View Clicked"
eventResendInviteClicked = "Resend Invite Clicked"
eventCopyInviteLinkClicked = "Copy Invite Link Clicked"
eventRemoveProjectMemberCLicked = "Remove Member Clicked"
) )
var ( var (
@ -156,7 +160,8 @@ func NewService(log *zap.Logger, config Config, satelliteName string) *Service {
eventSeePaymentsClicked, eventEditPaymentMethodClicked, eventUsageDetailedInfoClicked, eventAddNewPaymentMethodClicked, eventSeePaymentsClicked, eventEditPaymentMethodClicked, eventUsageDetailedInfoClicked, eventAddNewPaymentMethodClicked,
eventApplyNewCouponClicked, eventCreditCardRemoved, eventCouponCodeApplied, eventInvoiceDownloaded, eventCreditCardAddedFromBilling, eventApplyNewCouponClicked, eventCreditCardRemoved, eventCouponCodeApplied, eventInvoiceDownloaded, eventCreditCardAddedFromBilling,
eventStorjTokenAddedFromBilling, eventAddFundsClicked, eventProjectMembersInviteSent, eventError, eventProjectNameUpdated, eventProjectDescriptionUpdated, eventStorjTokenAddedFromBilling, eventAddFundsClicked, eventProjectMembersInviteSent, eventError, eventProjectNameUpdated, eventProjectDescriptionUpdated,
eventProjectStorageLimitUpdated, eventProjectBandwidthLimitUpdated, eventProjectInvitationAccepted, eventProjectInvitationDeclined} { eventProjectStorageLimitUpdated, eventProjectBandwidthLimitUpdated, eventProjectInvitationAccepted, eventProjectInvitationDeclined,
eventGalleryViewClicked, eventResendInviteClicked, eventRemoveProjectMemberCLicked, eventCopyInviteLinkClicked} {
service.clientEvents[name] = true service.clientEvents[name] = true
} }
@ -463,7 +468,7 @@ func (service *Service) TrackAccountVerified(userID uuid.UUID, email string) {
// TrackEvent sends an arbitrary event associated with user ID to Segment. // TrackEvent sends an arbitrary event associated with user ID to Segment.
// It is used for tracking occurrences of client-side events. // It is used for tracking occurrences of client-side events.
func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email string) { func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email string, customProps map[string]string) {
if !service.config.Enabled { if !service.config.Enabled {
return return
} }
@ -477,6 +482,10 @@ func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email str
props := segment.NewProperties() props := segment.NewProperties()
props.Set("email", email) props.Set("email", email)
for key, value := range customProps {
props.Set(key, value)
}
service.enqueueMessage(segment.Track{ service.enqueueMessage(segment.Track{
UserId: userID.String(), UserId: userID.String(),
Event: service.satelliteName + " " + eventName, Event: service.satelliteName + " " + eventName,

View File

@ -48,7 +48,7 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
satellitePeer = planet.Satellites[0] satellitePeer = planet.Satellites[0]
nodeID = planet.StorageNodes[0].ID() nodeID = planet.StorageNodes[0].ID()
report = audit.Report{ report = audit.Report{
Fails: storj.NodeIDList{nodeID}, Fails: metabase.Pieces{{StorageNode: nodeID}},
} }
) )
satellitePeer.Audit.Worker.Loop.Pause() satellitePeer.Audit.Worker.Loop.Pause()

View File

@ -11,6 +11,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation" "storj.io/storj/satellite/reputation"
) )
@ -22,6 +23,7 @@ type reporter struct {
log *zap.Logger log *zap.Logger
reputations *reputation.Service reputations *reputation.Service
overlay *overlay.Service overlay *overlay.Service
metabase *metabase.DB
containment Containment containment Containment
maxRetries int maxRetries int
maxReverifyCount int32 maxReverifyCount int32
@ -40,8 +42,10 @@ type Reporter interface {
// succeeded, failed, were offline, have pending audits, or failed for unknown // succeeded, failed, were offline, have pending audits, or failed for unknown
// reasons and their current reputation status. // reasons and their current reputation status.
type Report struct { type Report struct {
Segment *metabase.Segment
Successes storj.NodeIDList Successes storj.NodeIDList
Fails storj.NodeIDList Fails metabase.Pieces
Offlines storj.NodeIDList Offlines storj.NodeIDList
PendingAudits []*ReverificationJob PendingAudits []*ReverificationJob
Unknown storj.NodeIDList Unknown storj.NodeIDList
@ -49,11 +53,12 @@ type Report struct {
} }
// NewReporter instantiates a reporter. // NewReporter instantiates a reporter.
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) Reporter { func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, metabase *metabase.DB, containment Containment, maxRetries int, maxReverifyCount int32) Reporter {
return &reporter{ return &reporter{
log: log, log: log,
reputations: reputations, reputations: reputations,
overlay: overlay, overlay: overlay,
metabase: metabase,
containment: containment, containment: containment,
maxRetries: maxRetries, maxRetries: maxRetries,
maxReverifyCount: maxReverifyCount, maxReverifyCount: maxReverifyCount,
@ -72,7 +77,11 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
offlines := req.Offlines offlines := req.Offlines
pendingAudits := req.PendingAudits pendingAudits := req.PendingAudits
reporter.log.Debug("Reporting audits", logger := reporter.log
if req.Segment != nil {
logger = logger.With(zap.Stringer("stream ID", req.Segment.StreamID), zap.Uint64("position", req.Segment.Position.Encode()))
}
logger.Debug("Reporting audits",
zap.Int("successes", len(successes)), zap.Int("successes", len(successes)),
zap.Int("failures", len(fails)), zap.Int("failures", len(fails)),
zap.Int("unknowns", len(unknowns)), zap.Int("unknowns", len(unknowns)),
@ -102,8 +111,8 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess) successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess)
reportFailures(tries, "successful", err, successes, nil) reportFailures(tries, "successful", err, successes, nil)
fails, err = reporter.recordAuditStatus(ctx, fails, nodesReputation, reputation.AuditFailure) fails, err = reporter.recordFailedAudits(ctx, req.Segment, fails, nodesReputation)
reportFailures(tries, "failed", err, fails, nil) reportFailures(tries, "failed", err, nil, nil)
unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown) unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown)
reportFailures(tries, "unknown", err, unknowns, nil) reportFailures(tries, "unknown", err, unknowns, nil)
offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline) offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline)
@ -124,7 +133,7 @@ func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.N
err = reporter.reputations.ApplyAudit(ctx, nodeID, nodesReputation[nodeID], auditOutcome) err = reporter.reputations.ApplyAudit(ctx, nodeID, nodesReputation[nodeID], auditOutcome)
if err != nil { if err != nil {
failed = append(failed, nodeID) failed = append(failed, nodeID)
errors.Add(Error.New("failed to record audit status %s in overlay for node %s: %w", auditOutcome.String(), nodeID.String(), err)) errors.Add(Error.New("failed to record audit status %s in overlay for node %s: %w", auditOutcome.String(), nodeID, err))
} }
} }
return failed, errors.Err() return failed, errors.Err()
@ -182,6 +191,50 @@ func (reporter *reporter) recordPendingAudits(ctx context.Context, pendingAudits
return nil, nil return nil, nil
} }
const maxPiecesToRemoveAtOnce = 6
// recordFailedAudits performs reporting and response to hard-failed audits. Failed audits generally
// mean the piece is gone. Remove the pieces from the relevant pointers so that the segment can be
// repaired if appropriate, and so that we don't continually dock reputation for the same missing
// piece(s).
func (reporter *reporter) recordFailedAudits(ctx context.Context, segment *metabase.Segment, failures []metabase.Piece, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failedToRecord []metabase.Piece, err error) {
defer mon.Task()(&ctx)(&err)
piecesToRemove := make(metabase.Pieces, 0, len(failures))
var errors errs.Group
for _, f := range failures {
err = reporter.reputations.ApplyAudit(ctx, f.StorageNode, nodesReputation[f.StorageNode], reputation.AuditFailure)
if err != nil {
failedToRecord = append(failedToRecord, f)
errors.Add(Error.New("failed to record audit failure in overlay for node %s: %w", f.StorageNode, err))
}
piecesToRemove = append(piecesToRemove, f)
}
if segment != nil {
// Safety check. If, say, 30 pieces all started having audit failures at the same time, the
// problem is more likely with the audit system itself and not with the pieces.
if len(piecesToRemove) > maxPiecesToRemoveAtOnce {
reporter.log.Error("cowardly refusing to remove large number of pieces for failed audit",
zap.Int("piecesToRemove", len(piecesToRemove)),
zap.Int("threshold", maxPiecesToRemoveAtOnce))
return failedToRecord, errors.Err()
}
pieces, err := segment.Pieces.Remove(piecesToRemove)
if err != nil {
errors.Add(err)
return failedToRecord, errors.Err()
}
errors.Add(reporter.metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
StreamID: segment.StreamID,
Position: segment.Position,
OldPieces: segment.Pieces,
NewRedundancy: segment.Redundancy,
NewPieces: pieces,
}))
}
return failedToRecord, errors.Err()
}
func (reporter *reporter) ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error) { func (reporter *reporter) ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -214,7 +267,26 @@ func (reporter *reporter) RecordReverificationResult(ctx context.Context, pendin
report.Successes = append(report.Successes, pendingJob.Locator.NodeID) report.Successes = append(report.Successes, pendingJob.Locator.NodeID)
keepInQueue = false keepInQueue = false
case OutcomeFailure: case OutcomeFailure:
report.Fails = append(report.Fails, pendingJob.Locator.NodeID) // We have to look up the segment metainfo and pass it on to RecordAudits so that
// the segment can be modified (removing this piece). We don't persist this
// information through the reverification queue.
segmentInfo, err := reporter.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
StreamID: pendingJob.Locator.StreamID,
Position: pendingJob.Locator.Position,
})
if err != nil {
reporter.log.Error("could not look up segment after audit reverification",
zap.Stringer("stream ID", pendingJob.Locator.StreamID),
zap.Uint64("position", pendingJob.Locator.Position.Encode()),
zap.Error(err),
)
} else {
report.Segment = &segmentInfo
}
report.Fails = append(report.Fails, metabase.Piece{
StorageNode: pendingJob.Locator.NodeID,
Number: uint16(pendingJob.Locator.PieceNum),
})
keepInQueue = false keepInQueue = false
case OutcomeTimedOut: case OutcomeTimedOut:
// This will get re-added to the reverification queue, but that is idempotent // This will get re-added to the reverification queue, but that is idempotent

View File

@ -11,11 +11,14 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/audit" "storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
@ -98,7 +101,7 @@ func TestRecordAuditsCorrectOutcome(t *testing.T) {
report := audit.Report{ report := audit.Report{
Successes: []storj.NodeID{goodNode}, Successes: []storj.NodeID{goodNode},
Fails: []storj.NodeID{dqNode}, Fails: metabase.Pieces{{StorageNode: dqNode}},
Unknown: []storj.NodeID{suspendedNode}, Unknown: []storj.NodeID{suspendedNode},
PendingAudits: []*audit.ReverificationJob{ PendingAudits: []*audit.ReverificationJob{
{ {
@ -213,7 +216,7 @@ func TestGracefullyExitedNotUpdated(t *testing.T) {
} }
report = audit.Report{ report = audit.Report{
Successes: storj.NodeIDList{successNode.ID()}, Successes: storj.NodeIDList{successNode.ID()},
Fails: storj.NodeIDList{failedNode.ID()}, Fails: metabase.Pieces{{StorageNode: failedNode.ID()}},
Offlines: storj.NodeIDList{offlineNode.ID()}, Offlines: storj.NodeIDList{offlineNode.ID()},
PendingAudits: []*audit.ReverificationJob{&pending}, PendingAudits: []*audit.ReverificationJob{&pending},
Unknown: storj.NodeIDList{unknownNode.ID()}, Unknown: storj.NodeIDList{unknownNode.ID()},
@ -261,3 +264,52 @@ func TestReportOfflineAudits(t *testing.T) {
require.EqualValues(t, 0, info.UnknownAuditReputationBeta) require.EqualValues(t, 0, info.UnknownAuditReputationBeta)
}) })
} }
func TestReportingAuditFailureResultsInRemovalOfPiece(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
func(log *zap.Logger, index int, config *satellite.Config) {
// disable reputation write cache so changes are immediate
config.Reputation.FlushInterval = 0
},
testplanet.ReconfigureRS(4, 5, 6, 6),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
ul := planet.Uplinks[0]
testData := testrand.Bytes(1 * memory.MiB)
err := ul.Upload(ctx, satellite, "bucket-for-test", "path/of/testness", testData)
require.NoError(t, err)
segment, _ := getRemoteSegment(ctx, t, satellite, ul.Projects[0].ID, "bucket-for-test")
report := audit.Report{
Segment: &segment,
Fails: metabase.Pieces{
metabase.Piece{
Number: segment.Pieces[0].Number,
StorageNode: segment.Pieces[0].StorageNode,
},
},
}
satellite.Audit.Reporter.RecordAudits(ctx, report)
// piece marked as failed is no longer in the segment
afterSegment, _ := getRemoteSegment(ctx, t, satellite, ul.Projects[0].ID, "bucket-for-test")
require.Len(t, afterSegment.Pieces, len(segment.Pieces)-1)
for i, p := range afterSegment.Pieces {
assert.NotEqual(t, segment.Pieces[0].Number, p.Number, i)
assert.NotEqual(t, segment.Pieces[0].StorageNode, p.StorageNode, i)
}
// segment is still retrievable
gotData, err := ul.Download(ctx, satellite, "bucket-for-test", "path/of/testness")
require.NoError(t, err)
require.Equal(t, testData, gotData)
})
}

View File

@ -130,7 +130,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
} }
var offlineNodes storj.NodeIDList var offlineNodes storj.NodeIDList
var failedNodes storj.NodeIDList var failedNodes metabase.Pieces
var unknownNodes storj.NodeIDList var unknownNodes storj.NodeIDList
containedNodes := make(map[int]storj.NodeID) containedNodes := make(map[int]storj.NodeID)
sharesToAudit := make(map[int]Share) sharesToAudit := make(map[int]Share)
@ -206,7 +206,10 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
case RequestFailure: case RequestFailure:
if errs2.IsRPC(share.Error, rpcstatus.NotFound) { if errs2.IsRPC(share.Error, rpcstatus.NotFound) {
// missing share // missing share
failedNodes = append(failedNodes, share.NodeID) failedNodes = append(failedNodes, metabase.Piece{
Number: uint16(share.PieceNum),
StorageNode: share.NodeID,
})
errLogger.Info("Verify: piece not found (audit failed)") errLogger.Info("Verify: piece not found (audit failed)")
continue continue
} }
@ -258,6 +261,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
mon.Counter("could_not_verify_audit_shares").Inc(1) //mon:locked mon.Counter("could_not_verify_audit_shares").Inc(1) //mon:locked
verifier.log.Error("could not verify shares", zap.String("Segment", segmentInfoString(segment)), zap.Error(err)) verifier.log.Error("could not verify shares", zap.String("Segment", segmentInfoString(segment)), zap.Error(err))
return Report{ return Report{
Segment: &segmentInfo,
Fails: failedNodes, Fails: failedNodes,
Offlines: offlineNodes, Offlines: offlineNodes,
Unknown: unknownNodes, Unknown: unknownNodes,
@ -268,7 +272,10 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
verifier.log.Info("Verify: share data altered (audit failed)", verifier.log.Info("Verify: share data altered (audit failed)",
zap.Stringer("Node ID", shares[pieceNum].NodeID), zap.Stringer("Node ID", shares[pieceNum].NodeID),
zap.String("Segment", segmentInfoString(segment))) zap.String("Segment", segmentInfoString(segment)))
failedNodes = append(failedNodes, shares[pieceNum].NodeID) failedNodes = append(failedNodes, metabase.Piece{
StorageNode: shares[pieceNum].NodeID,
Number: uint16(pieceNum),
})
} }
successNodes := getSuccessNodes(ctx, shares, failedNodes, offlineNodes, unknownNodes, containedNodes) successNodes := getSuccessNodes(ctx, shares, failedNodes, offlineNodes, unknownNodes, containedNodes)
@ -276,6 +283,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
pendingAudits, err := createPendingAudits(ctx, containedNodes, segment) pendingAudits, err := createPendingAudits(ctx, containedNodes, segment)
if err != nil { if err != nil {
return Report{ return Report{
Segment: &segmentInfo,
Successes: successNodes, Successes: successNodes,
Fails: failedNodes, Fails: failedNodes,
Offlines: offlineNodes, Offlines: offlineNodes,
@ -284,6 +292,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
} }
return Report{ return Report{
Segment: &segmentInfo,
Successes: successNodes, Successes: successNodes,
Fails: failedNodes, Fails: failedNodes,
Offlines: offlineNodes, Offlines: offlineNodes,
@ -542,11 +551,11 @@ func getOfflineNodes(segment metabase.Segment, limits []*pb.AddressedOrderLimit,
} }
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit. // getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit.
func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) { func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes metabase.Pieces, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
defer mon.Task()(&ctx)(nil) defer mon.Task()(&ctx)(nil)
fails := make(map[storj.NodeID]bool) fails := make(map[storj.NodeID]bool)
for _, fail := range failedNodes { for _, fail := range failedNodes {
fails[fail] = true fails[fail.StorageNode] = true
} }
for _, offline := range offlineNodes { for _, offline := range offlineNodes {
fails[offline] = true fails[offline] = true

View File

@ -968,7 +968,15 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
assert.Len(t, report.Successes, origNumPieces-1) assert.Len(t, report.Successes, origNumPieces-1)
require.Len(t, report.Fails, 1) require.Len(t, report.Fails, 1)
assert.Equal(t, report.Fails[0], piece.StorageNode) assert.Equal(t, metabase.Piece{
StorageNode: piece.StorageNode,
Number: piece.Number,
}, report.Fails[0])
require.NotNil(t, report.Segment)
assert.Equal(t, segment.StreamID, report.Segment.StreamID)
assert.Equal(t, segment.Position, report.Segment.Position)
assert.Equal(t, segment.Redundancy, report.Segment.Redundancy)
assert.Equal(t, segment.Pieces, report.Segment.Pieces)
assert.Len(t, report.Offlines, 0) assert.Len(t, report.Offlines, 0)
require.Len(t, report.PendingAudits, 0) require.Len(t, report.PendingAudits, 0)
}) })
@ -1196,7 +1204,15 @@ func TestAuditRepairedSegmentInExcludedCountries(t *testing.T) {
}, nil) }, nil)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, report.Fails, 1) require.Len(t, report.Fails, 1)
require.Equal(t, report.Fails[0], lastPiece.StorageNode) require.Equal(t, metabase.Piece{
StorageNode: lastPiece.StorageNode,
Number: lastPiece.Number,
}, report.Fails[0])
require.NotNil(t, report.Segment)
assert.Equal(t, segmentAfterRepair.StreamID, report.Segment.StreamID)
assert.Equal(t, segmentAfterRepair.Position, report.Segment.Position)
assert.Equal(t, segmentAfterRepair.Redundancy, report.Segment.Redundancy)
assert.Equal(t, segmentAfterRepair.Pieces, report.Segment.Pieces)
}) })
} }

View File

@ -219,6 +219,7 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
log.Named("reporter"), log.Named("reporter"),
peer.Reputation, peer.Reputation,
peer.Overlay, peer.Overlay,
metabaseDB,
containmentDB, containmentDB,
config.Audit.MaxRetriesStatDB, config.Audit.MaxRetriesStatDB,
int32(config.Audit.MaxReverifyCount)) int32(config.Audit.MaxReverifyCount))

View File

@ -23,7 +23,9 @@ type WebappSessions interface {
// DeleteAllByUserID deletes all webapp sessions by user ID. // DeleteAllByUserID deletes all webapp sessions by user ID.
DeleteAllByUserID(ctx context.Context, userID uuid.UUID) (int64, error) DeleteAllByUserID(ctx context.Context, userID uuid.UUID) (int64, error)
// UpdateExpiration updates the expiration time of the session. // UpdateExpiration updates the expiration time of the session.
UpdateExpiration(ctx context.Context, sessionID uuid.UUID, expiresAt time.Time) (err error) UpdateExpiration(ctx context.Context, sessionID uuid.UUID, expiresAt time.Time) error
// DeleteExpired deletes all sessions that have expired before the provided timestamp.
DeleteExpired(ctx context.Context, now time.Time, asOfSystemTimeInterval time.Duration, pageSize int) error
} }
// WebappSession represents a session on the satellite web app. // WebappSession represents a session on the satellite web app.

View File

@ -250,8 +250,10 @@ func TestSetPermission_Uplink(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
err = upload.Commit() err = upload.Commit()
require.True(t, errors.Is(err, uplink.ErrPermissionDenied)) require.True(t, errors.Is(err, uplink.ErrPermissionDenied))
_, err = project.DownloadObject(ctx, testbucket1, testfilename1, nil)
download, err := project.DownloadObject(ctx, testbucket1, testfilename1, nil)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, download.Close())
// Only one bucket should be visible // Only one bucket should be visible
buckets := getAllBuckets(ctx, project) buckets := getAllBuckets(ctx, project)
@ -357,8 +359,9 @@ func TestSetPermission_Uplink(t *testing.T) {
objects := getAllObjects(ctx, project, testbucket3) objects := getAllObjects(ctx, project, testbucket3)
require.Equal(t, 1, len(objects)) require.Equal(t, 1, len(objects))
_, err = project.DownloadObject(ctx, testbucket3, testfilename2, nil) download, err := project.DownloadObject(ctx, testbucket3, testfilename2, nil)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, download.Close())
_, err = project.DeleteBucketWithObjects(ctx, testbucket3) _, err = project.DeleteBucketWithObjects(ctx, testbucket3)
require.NoError(t, err) require.NoError(t, err)

View File

@ -36,9 +36,10 @@ func NewAnalytics(log *zap.Logger, service *console.Service, a *analytics.Servic
} }
type eventTriggeredBody struct { type eventTriggeredBody struct {
EventName string `json:"eventName"` EventName string `json:"eventName"`
Link string `json:"link"` Link string `json:"link"`
ErrorEventSource string `json:"errorEventSource"` ErrorEventSource string `json:"errorEventSource"`
Props map[string]string `json:"props"`
} }
type pageVisitBody struct { type pageVisitBody struct {
@ -72,7 +73,7 @@ func (a *Analytics) EventTriggered(w http.ResponseWriter, r *http.Request) {
} else if et.Link != "" { } else if et.Link != "" {
a.analytics.TrackLinkEvent(et.EventName, user.ID, user.Email, et.Link) a.analytics.TrackLinkEvent(et.EventName, user.ID, user.Email, et.Link)
} else { } else {
a.analytics.TrackEvent(et.EventName, user.ID, user.Email) a.analytics.TrackEvent(et.EventName, user.ID, user.Email, et.Props)
} }
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
} }

View File

@ -31,12 +31,6 @@ var (
// errNotImplemented is the error value used by handlers of this package to // errNotImplemented is the error value used by handlers of this package to
// response with status Not Implemented. // response with status Not Implemented.
errNotImplemented = errs.New("not implemented") errNotImplemented = errs.New("not implemented")
// supportedCORSOrigins allows us to support visitors who sign up from the website.
supportedCORSOrigins = map[string]bool{
"https://storj.io": true,
"https://www.storj.io": true,
}
) )
// Auth is an api controller that exposes all auth functionality. // Auth is an api controller that exposes all auth functionality.
@ -210,19 +204,6 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
var err error var err error
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
origin := r.Header.Get("Origin")
if supportedCORSOrigins[origin] {
// we should send the exact origin back, rather than a wildcard
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
// OPTIONS is a pre-flight check for cross-origin (CORS) permissions
if r.Method == "OPTIONS" {
return
}
var registerData struct { var registerData struct {
FullName string `json:"fullName"` FullName string `json:"fullName"`
ShortName string `json:"shortName"` ShortName string `json:"shortName"`
@ -352,7 +333,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
FullName: user.FullName, FullName: user.FullName,
Email: user.Email, Email: user.Email,
Type: analytics.Personal, Type: analytics.Personal,
OriginHeader: origin, OriginHeader: r.Header.Get("Origin"),
Referrer: referrer, Referrer: referrer,
HubspotUTK: hubspotUTK, HubspotUTK: hubspotUTK,
UserAgent: string(user.UserAgent), UserAgent: string(user.UserAgent),
@ -465,6 +446,7 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
Email string `json:"email"` Email string `json:"email"`
Partner string `json:"partner"` Partner string `json:"partner"`
ProjectLimit int `json:"projectLimit"` ProjectLimit int `json:"projectLimit"`
ProjectStorageLimit int64 `json:"projectStorageLimit"`
IsProfessional bool `json:"isProfessional"` IsProfessional bool `json:"isProfessional"`
Position string `json:"position"` Position string `json:"position"`
CompanyName string `json:"companyName"` CompanyName string `json:"companyName"`
@ -490,6 +472,7 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
user.Partner = string(consoleUser.UserAgent) user.Partner = string(consoleUser.UserAgent)
} }
user.ProjectLimit = consoleUser.ProjectLimit user.ProjectLimit = consoleUser.ProjectLimit
user.ProjectStorageLimit = consoleUser.ProjectStorageLimit
user.IsProfessional = consoleUser.IsProfessional user.IsProfessional = consoleUser.IsProfessional
user.CompanyName = consoleUser.CompanyName user.CompanyName = consoleUser.CompanyName
user.Position = consoleUser.Position user.Position = consoleUser.Position

View File

@ -107,103 +107,6 @@ func TestAuth_Register(t *testing.T) {
}) })
} }
func TestAuth_Register_CORS(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Console.OpenRegistrationEnabled = true
config.Console.RateLimit.Burst = 10
config.Mail.AuthType = "nomail"
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
email := "user@test.com"
fullName := "testuser"
jsonBody := []byte(fmt.Sprintf(`{"email":"%s","fullName":"%s","password":"abc123","shortName":"test"}`, email, fullName))
url := planet.Satellites[0].ConsoleURL() + "/api/v0/auth/register"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonBody))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
// 1. OPTIONS request
// 1.1 CORS headers should not be set with origin other than storj.io or www.storj.io
req.Header.Set("Origin", "https://someexternalorigin.test")
req.Method = http.MethodOptions
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Methods"))
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Headers"))
require.NoError(t, resp.Body.Close())
// 1.2 CORS headers should be set with a domain of storj.io
req.Header.Set("Origin", "https://storj.io")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders := strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.NoError(t, resp.Body.Close())
// 1.3 CORS headers should be set with a domain of www.storj.io
req.Header.Set("Origin", "https://www.storj.io")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://www.storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders = strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.NoError(t, resp.Body.Close())
// 2. POST request with origin www.storj.io
req.Method = http.MethodPost
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
defer func() {
err = resp.Body.Close()
require.NoError(t, err)
}()
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://www.storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders = strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.Len(t, planet.Satellites, 1)
// this works only because we configured 'nomail' above. Mail send simulator won't click to activation link.
_, users, err := planet.Satellites[0].API.Console.Service.GetUserByEmailWithUnverified(ctx, email)
require.NoError(t, err)
require.Len(t, users, 1)
require.Equal(t, fullName, users[0].FullName)
})
}
func TestDeleteAccount(t *testing.T) { func TestDeleteAccount(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
log := testplanet.NewLogger(t) log := testplanet.NewLogger(t)

View File

@ -870,7 +870,7 @@ func TestWrongUser(t *testing.T) {
}`})) }`}))
require.Contains(t, body, "not authorized") require.Contains(t, body, "not authorized")
// TODO: wrong error code // TODO: wrong error code
require.Equal(t, http.StatusInternalServerError, resp.StatusCode) require.Equal(t, http.StatusUnauthorized, resp.StatusCode)
} }
{ // get bucket usages { // get bucket usages

View File

@ -132,6 +132,7 @@ type Server struct {
listener net.Listener listener net.Listener
server http.Server server http.Server
router *mux.Router
cookieAuth *consolewebauth.CookieAuth cookieAuth *consolewebauth.CookieAuth
ipRateLimiter *web.RateLimiter ipRateLimiter *web.RateLimiter
userIDRateLimiter *web.RateLimiter userIDRateLimiter *web.RateLimiter
@ -239,6 +240,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
} }
router := mux.NewRouter() router := mux.NewRouter()
server.router = router
// N.B. This middleware has to be the first one because it has to be called // N.B. This middleware has to be the first one because it has to be called
// the earliest in the HTTP chain. // the earliest in the HTTP chain.
router.Use(newTraceRequestMiddleware(logger, router)) router.Use(newTraceRequestMiddleware(logger, router))
@ -252,95 +254,104 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
consoleapi.NewUserManagement(logger, mon, server.service, router, &apiAuth{&server}) consoleapi.NewUserManagement(logger, mon, server.service, router, &apiAuth{&server})
} }
router.HandleFunc("/api/v0/config", server.frontendConfigHandler) router.Handle("/api/v0/config", server.withCORS(http.HandlerFunc(server.frontendConfigHandler)))
router.Handle("/api/v0/graphql", server.withAuth(http.HandlerFunc(server.graphqlHandler))) router.Handle("/api/v0/graphql", server.withCORS(server.withAuth(http.HandlerFunc(server.graphqlHandler))))
router.HandleFunc("/registrationToken/", server.createRegistrationTokenHandler) router.HandleFunc("/registrationToken/", server.createRegistrationTokenHandler)
router.HandleFunc("/robots.txt", server.seoHandler) router.HandleFunc("/robots.txt", server.seoHandler)
projectsController := consoleapi.NewProjects(logger, service) projectsController := consoleapi.NewProjects(logger, service)
projectsRouter := router.PathPrefix("/api/v0/projects").Subrouter() projectsRouter := router.PathPrefix("/api/v0/projects").Subrouter()
projectsRouter.Handle("/{id}/salt", server.withAuth(http.HandlerFunc(projectsController.GetSalt))).Methods(http.MethodGet) projectsRouter.Use(server.withCORS)
projectsRouter.Handle("/{id}/invite", server.withAuth(http.HandlerFunc(projectsController.InviteUsers))).Methods(http.MethodPost) projectsRouter.Use(server.withAuth)
projectsRouter.Handle("/{id}/invite-link", server.withAuth(http.HandlerFunc(projectsController.GetInviteLink))).Methods(http.MethodGet) projectsRouter.Handle("/{id}/salt", http.HandlerFunc(projectsController.GetSalt)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/invitations", server.withAuth(http.HandlerFunc(projectsController.GetUserInvitations))).Methods(http.MethodGet) projectsRouter.Handle("/{id}/invite", http.HandlerFunc(projectsController.InviteUsers)).Methods(http.MethodPost, http.MethodOptions)
projectsRouter.Handle("/invitations/{id}/respond", server.withAuth(http.HandlerFunc(projectsController.RespondToInvitation))).Methods(http.MethodPost) projectsRouter.Handle("/{id}/invite-link", http.HandlerFunc(projectsController.GetInviteLink)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/invitations", http.HandlerFunc(projectsController.GetUserInvitations)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/invitations/{id}/respond", http.HandlerFunc(projectsController.RespondToInvitation)).Methods(http.MethodPost, http.MethodOptions)
usageLimitsController := consoleapi.NewUsageLimits(logger, service) usageLimitsController := consoleapi.NewUsageLimits(logger, service)
projectsRouter.Handle("/{id}/usage-limits", server.withAuth(http.HandlerFunc(usageLimitsController.ProjectUsageLimits))).Methods(http.MethodGet) projectsRouter.Handle("/{id}/usage-limits", http.HandlerFunc(usageLimitsController.ProjectUsageLimits)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/usage-limits", server.withAuth(http.HandlerFunc(usageLimitsController.TotalUsageLimits))).Methods(http.MethodGet) projectsRouter.Handle("/usage-limits", http.HandlerFunc(usageLimitsController.TotalUsageLimits)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/{id}/daily-usage", server.withAuth(http.HandlerFunc(usageLimitsController.DailyUsage))).Methods(http.MethodGet) projectsRouter.Handle("/{id}/daily-usage", http.HandlerFunc(usageLimitsController.DailyUsage)).Methods(http.MethodGet, http.MethodOptions)
authController := consoleapi.NewAuth(logger, service, accountFreezeService, mailService, server.cookieAuth, server.analytics, config.SatelliteName, server.config.ExternalAddress, config.LetUsKnowURL, config.TermsAndConditionsURL, config.ContactInfoURL, config.GeneralRequestURL) authController := consoleapi.NewAuth(logger, service, accountFreezeService, mailService, server.cookieAuth, server.analytics, config.SatelliteName, server.config.ExternalAddress, config.LetUsKnowURL, config.TermsAndConditionsURL, config.ContactInfoURL, config.GeneralRequestURL)
authRouter := router.PathPrefix("/api/v0/auth").Subrouter() authRouter := router.PathPrefix("/api/v0/auth").Subrouter()
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.GetAccount))).Methods(http.MethodGet) authRouter.Use(server.withCORS)
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.UpdateAccount))).Methods(http.MethodPatch) authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.GetAccount))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account/change-email", server.withAuth(http.HandlerFunc(authController.ChangeEmail))).Methods(http.MethodPost) authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.UpdateAccount))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/account/change-password", server.withAuth(server.userIDRateLimiter.Limit(http.HandlerFunc(authController.ChangePassword)))).Methods(http.MethodPost) authRouter.Handle("/account/change-email", server.withAuth(http.HandlerFunc(authController.ChangeEmail))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/account/freezestatus", server.withAuth(http.HandlerFunc(authController.GetFreezeStatus))).Methods(http.MethodGet) authRouter.Handle("/account/change-password", server.withAuth(server.userIDRateLimiter.Limit(http.HandlerFunc(authController.ChangePassword)))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.GetUserSettings))).Methods(http.MethodGet) authRouter.Handle("/account/freezestatus", server.withAuth(http.HandlerFunc(authController.GetFreezeStatus))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.SetUserSettings))).Methods(http.MethodPatch) authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.GetUserSettings))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account/onboarding", server.withAuth(http.HandlerFunc(authController.SetOnboardingStatus))).Methods(http.MethodPatch) authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.SetUserSettings))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/account/delete", server.withAuth(http.HandlerFunc(authController.DeleteAccount))).Methods(http.MethodPost) authRouter.Handle("/account/onboarding", server.withAuth(http.HandlerFunc(authController.SetOnboardingStatus))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/mfa/enable", server.withAuth(http.HandlerFunc(authController.EnableUserMFA))).Methods(http.MethodPost) authRouter.Handle("/account/delete", server.withAuth(http.HandlerFunc(authController.DeleteAccount))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/disable", server.withAuth(http.HandlerFunc(authController.DisableUserMFA))).Methods(http.MethodPost) authRouter.Handle("/mfa/enable", server.withAuth(http.HandlerFunc(authController.EnableUserMFA))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/generate-secret-key", server.withAuth(http.HandlerFunc(authController.GenerateMFASecretKey))).Methods(http.MethodPost) authRouter.Handle("/mfa/disable", server.withAuth(http.HandlerFunc(authController.DisableUserMFA))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/generate-recovery-codes", server.withAuth(http.HandlerFunc(authController.GenerateMFARecoveryCodes))).Methods(http.MethodPost) authRouter.Handle("/mfa/generate-secret-key", server.withAuth(http.HandlerFunc(authController.GenerateMFASecretKey))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/logout", server.withAuth(http.HandlerFunc(authController.Logout))).Methods(http.MethodPost) authRouter.Handle("/mfa/generate-recovery-codes", server.withAuth(http.HandlerFunc(authController.GenerateMFARecoveryCodes))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/token", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Token))).Methods(http.MethodPost) authRouter.Handle("/logout", server.withAuth(http.HandlerFunc(authController.Logout))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/token-by-api-key", server.ipRateLimiter.Limit(http.HandlerFunc(authController.TokenByAPIKey))).Methods(http.MethodPost) authRouter.Handle("/token", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Token))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/token-by-api-key", server.ipRateLimiter.Limit(http.HandlerFunc(authController.TokenByAPIKey))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/register", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Register))).Methods(http.MethodPost, http.MethodOptions) authRouter.Handle("/register", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Register))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/forgot-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ForgotPassword))).Methods(http.MethodPost) authRouter.Handle("/forgot-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ForgotPassword))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/resend-email/{email}", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResendEmail))).Methods(http.MethodPost) authRouter.Handle("/resend-email/{email}", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResendEmail))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/reset-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResetPassword))).Methods(http.MethodPost) authRouter.Handle("/reset-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResetPassword))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/refresh-session", server.withAuth(http.HandlerFunc(authController.RefreshSession))).Methods(http.MethodPost) authRouter.Handle("/refresh-session", server.withAuth(http.HandlerFunc(authController.RefreshSession))).Methods(http.MethodPost, http.MethodOptions)
if config.ABTesting.Enabled { if config.ABTesting.Enabled {
abController := consoleapi.NewABTesting(logger, abTesting) abController := consoleapi.NewABTesting(logger, abTesting)
abRouter := router.PathPrefix("/api/v0/ab").Subrouter() abRouter := router.PathPrefix("/api/v0/ab").Subrouter()
abRouter.Handle("/values", server.withAuth(http.HandlerFunc(abController.GetABValues))).Methods(http.MethodGet) abRouter.Use(server.withCORS)
abRouter.Handle("/hit/{action}", server.withAuth(http.HandlerFunc(abController.SendHit))).Methods(http.MethodPost) abRouter.Use(server.withAuth)
abRouter.Handle("/values", http.HandlerFunc(abController.GetABValues)).Methods(http.MethodGet, http.MethodOptions)
abRouter.Handle("/hit/{action}", http.HandlerFunc(abController.SendHit)).Methods(http.MethodPost, http.MethodOptions)
} }
paymentController := consoleapi.NewPayments(logger, service, accountFreezeService, packagePlans) paymentController := consoleapi.NewPayments(logger, service, accountFreezeService, packagePlans)
paymentsRouter := router.PathPrefix("/api/v0/payments").Subrouter() paymentsRouter := router.PathPrefix("/api/v0/payments").Subrouter()
paymentsRouter.Use(server.withCORS)
paymentsRouter.Use(server.withAuth) paymentsRouter.Use(server.withAuth)
paymentsRouter.Handle("/cards", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.AddCreditCard))).Methods(http.MethodPost) paymentsRouter.Handle("/cards", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.AddCreditCard))).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/cards", paymentController.MakeCreditCardDefault).Methods(http.MethodPatch) paymentsRouter.HandleFunc("/cards", paymentController.MakeCreditCardDefault).Methods(http.MethodPatch, http.MethodOptions)
paymentsRouter.HandleFunc("/cards", paymentController.ListCreditCards).Methods(http.MethodGet) paymentsRouter.HandleFunc("/cards", paymentController.ListCreditCards).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/cards/{cardId}", paymentController.RemoveCreditCard).Methods(http.MethodDelete) paymentsRouter.HandleFunc("/cards/{cardId}", paymentController.RemoveCreditCard).Methods(http.MethodDelete, http.MethodOptions)
paymentsRouter.HandleFunc("/account/charges", paymentController.ProjectsCharges).Methods(http.MethodGet) paymentsRouter.HandleFunc("/account/charges", paymentController.ProjectsCharges).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/account/balance", paymentController.AccountBalance).Methods(http.MethodGet) paymentsRouter.HandleFunc("/account/balance", paymentController.AccountBalance).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/account", paymentController.SetupAccount).Methods(http.MethodPost) paymentsRouter.HandleFunc("/account", paymentController.SetupAccount).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet) paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost) paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet) paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet) paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch) paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch, http.MethodOptions)
paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet) paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/pricing", paymentController.GetProjectUsagePriceModel).Methods(http.MethodGet) paymentsRouter.HandleFunc("/pricing", paymentController.GetProjectUsagePriceModel).Methods(http.MethodGet, http.MethodOptions)
if config.PricingPackagesEnabled { if config.PricingPackagesEnabled {
paymentsRouter.HandleFunc("/purchase-package", paymentController.PurchasePackage).Methods(http.MethodPost) paymentsRouter.HandleFunc("/purchase-package", paymentController.PurchasePackage).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/package-available", paymentController.PackageAvailable).Methods(http.MethodGet) paymentsRouter.HandleFunc("/package-available", paymentController.PackageAvailable).Methods(http.MethodGet, http.MethodOptions)
} }
bucketsController := consoleapi.NewBuckets(logger, service) bucketsController := consoleapi.NewBuckets(logger, service)
bucketsRouter := router.PathPrefix("/api/v0/buckets").Subrouter() bucketsRouter := router.PathPrefix("/api/v0/buckets").Subrouter()
bucketsRouter.Use(server.withCORS)
bucketsRouter.Use(server.withAuth) bucketsRouter.Use(server.withAuth)
bucketsRouter.HandleFunc("/bucket-names", bucketsController.AllBucketNames).Methods(http.MethodGet) bucketsRouter.HandleFunc("/bucket-names", bucketsController.AllBucketNames).Methods(http.MethodGet, http.MethodOptions)
apiKeysController := consoleapi.NewAPIKeys(logger, service) apiKeysController := consoleapi.NewAPIKeys(logger, service)
apiKeysRouter := router.PathPrefix("/api/v0/api-keys").Subrouter() apiKeysRouter := router.PathPrefix("/api/v0/api-keys").Subrouter()
apiKeysRouter.Use(server.withCORS)
apiKeysRouter.Use(server.withAuth) apiKeysRouter.Use(server.withAuth)
apiKeysRouter.HandleFunc("/delete-by-name", apiKeysController.DeleteByNameAndProjectID).Methods(http.MethodDelete) apiKeysRouter.HandleFunc("/delete-by-name", apiKeysController.DeleteByNameAndProjectID).Methods(http.MethodDelete, http.MethodOptions)
apiKeysRouter.HandleFunc("/api-key-names", apiKeysController.GetAllAPIKeyNames).Methods(http.MethodGet) apiKeysRouter.HandleFunc("/api-key-names", apiKeysController.GetAllAPIKeyNames).Methods(http.MethodGet, http.MethodOptions)
analyticsController := consoleapi.NewAnalytics(logger, service, server.analytics) analyticsController := consoleapi.NewAnalytics(logger, service, server.analytics)
analyticsRouter := router.PathPrefix("/api/v0/analytics").Subrouter() analyticsRouter := router.PathPrefix("/api/v0/analytics").Subrouter()
analyticsRouter.Use(server.withCORS)
analyticsRouter.Use(server.withAuth) analyticsRouter.Use(server.withAuth)
analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost) analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost, http.MethodOptions)
analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost) analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost, http.MethodOptions)
if server.config.StaticDir != "" { if server.config.StaticDir != "" {
oidc := oidc.NewEndpoint( oidc := oidc.NewEndpoint(
@ -356,7 +367,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
router.Handle("/oauth/v2/clients/{id}", server.withAuth(http.HandlerFunc(oidc.GetClient))).Methods(http.MethodGet) router.Handle("/oauth/v2/clients/{id}", server.withAuth(http.HandlerFunc(oidc.GetClient))).Methods(http.MethodGet)
fs := http.FileServer(http.Dir(server.config.StaticDir)) fs := http.FileServer(http.Dir(server.config.StaticDir))
router.PathPrefix("/static/").Handler(server.brotliMiddleware(http.StripPrefix("/static", fs))) router.PathPrefix("/static/").Handler(server.withCORS(server.brotliMiddleware(http.StripPrefix("/static", fs))))
router.HandleFunc("/invited", server.handleInvited) router.HandleFunc("/invited", server.handleInvited)
@ -367,9 +378,9 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
slashRouter.HandleFunc("/cancel-password-recovery", server.cancelPasswordRecoveryHandler) slashRouter.HandleFunc("/cancel-password-recovery", server.cancelPasswordRecoveryHandler)
if server.config.UseVuetifyProject { if server.config.UseVuetifyProject {
router.PathPrefix("/vuetifypoc").Handler(http.HandlerFunc(server.vuetifyAppHandler)) router.PathPrefix("/vuetifypoc").Handler(server.withCORS(http.HandlerFunc(server.vuetifyAppHandler)))
} }
router.PathPrefix("/").Handler(http.HandlerFunc(server.appHandler)) router.PathPrefix("/").Handler(server.withCORS(http.HandlerFunc(server.appHandler)))
} }
server.server = http.Server{ server.server = http.Server{
@ -506,6 +517,29 @@ func (server *Server) vuetifyAppHandler(w http.ResponseWriter, r *http.Request)
http.ServeContent(w, r, path, info.ModTime(), file) http.ServeContent(w, r, path, info.ModTime(), file)
} }
// withCORS handles setting CORS-related headers on an http request.
func (server *Server) withCORS(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", strings.Trim(server.config.ExternalAddress, "/"))
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
w.Header().Set("Access-Control-Expose-Headers", "*, Authorization")
if r.Method == http.MethodOptions {
match := &mux.RouteMatch{}
if server.router.Match(r, match) {
methods, err := match.Route.GetMethods()
if err == nil && len(methods) > 0 {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(methods, ", "))
}
}
return
}
handler.ServeHTTP(w, r)
})
}
// withAuth performs initial authorization before every request. // withAuth performs initial authorization before every request.
func (server *Server) withAuth(handler http.Handler) http.Handler { func (server *Server) withAuth(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -742,8 +776,43 @@ func (server *Server) handleInvited(w http.ResponseWriter, r *http.Request) {
return return
} }
email := strings.ToLower(invite.Email) user, _, err := server.service.GetUserByEmailWithUnverified(ctx, invite.Email)
http.Redirect(w, r, loginLink+"?email="+email, http.StatusTemporaryRedirect) if err != nil && !console.ErrEmailNotFound.Has(err) {
server.log.Error("error getting invitation recipient", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
if user != nil {
http.Redirect(w, r, loginLink+"?email="+user.Email, http.StatusTemporaryRedirect)
return
}
params := url.Values{"email": {strings.ToLower(invite.Email)}}
if invite.InviterID != nil {
inviter, err := server.service.GetUser(ctx, *invite.InviterID)
if err != nil {
server.log.Error("error getting invitation sender", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
name := inviter.ShortName
if name == "" {
name = inviter.FullName
}
params.Add("inviter", name)
params.Add("inviter_email", inviter.Email)
}
proj, err := server.service.GetProjectNoAuth(ctx, invite.ProjectID)
if err != nil {
server.log.Error("error getting invitation project", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
params.Add("project", proj.Name)
http.Redirect(w, r, server.config.ExternalAddress+"signup?"+params.Encode(), http.StatusTemporaryRedirect)
} }
// graphqlHandler is graphql endpoint http handler function. // graphqlHandler is graphql endpoint http handler function.

View File

@ -85,23 +85,15 @@ func TestInvitedRouting(t *testing.T) {
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
sat := planet.Satellites[0] sat := planet.Satellites[0]
service := sat.API.Console.Service service := sat.API.Console.Service
invitedEmail := "invited@mail.test"
user, err := sat.AddUser(ctx, console.CreateUser{ owner, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Test User", FullName: "Project Owner",
Email: "u@mail.test", Email: "owner@mail.test",
}, 1) }, 1)
require.NoError(t, err) require.NoError(t, err)
user2, err := sat.AddUser(ctx, console.CreateUser{ project, err := sat.AddProject(ctx, owner.ID, "Test Project")
FullName: "Test User2",
Email: "u2@mail.test",
}, 1)
require.NoError(t, err)
ctx1, err := sat.UserContext(ctx, user.ID)
require.NoError(t, err)
project, err := sat.AddProject(ctx1, user.ID, "Test Project")
require.NoError(t, err) require.NoError(t, err)
client := http.Client{} client := http.Client{}
@ -128,24 +120,34 @@ func TestInvitedRouting(t *testing.T) {
loginURL := baseURL + "login" loginURL := baseURL + "login"
invalidURL := loginURL + "?invite_invalid=true" invalidURL := loginURL + "?invite_invalid=true"
tokenInvalidProj, err := service.CreateInviteToken(ctx, project.ID, user2.Email, time.Now()) tokenInvalidProj, err := service.CreateInviteToken(ctx, project.ID, invitedEmail, time.Now())
require.NoError(t, err) require.NoError(t, err)
token, err := service.CreateInviteToken(ctx, project.PublicID, user2.Email, time.Now()) token, err := service.CreateInviteToken(ctx, project.PublicID, invitedEmail, time.Now())
require.NoError(t, err) require.NoError(t, err)
checkInvitedRedirect("Invited - Invalid projectID", invalidURL, tokenInvalidProj) checkInvitedRedirect("Invited - Invalid projectID", invalidURL, tokenInvalidProj)
checkInvitedRedirect("Invited - User not invited", invalidURL, token) checkInvitedRedirect("Invited - User not invited", invalidURL, token)
_, err = service.InviteProjectMembers(ctx1, project.ID, []string{user2.Email}) ownerCtx, err := sat.UserContext(ctx, owner.ID)
require.NoError(t, err)
_, err = service.InviteProjectMembers(ownerCtx, project.ID, []string{invitedEmail})
require.NoError(t, err) require.NoError(t, err)
token, err = service.CreateInviteToken(ctx, project.PublicID, user2.Email, time.Now()) // Valid invite for nonexistent user should redirect to registration page with
// query parameters containing invitation information.
params := "email=invited%40mail.test&inviter=Project+Owner&inviter_email=owner%40mail.test&project=Test+Project"
checkInvitedRedirect("Invited - Nonexistent user", baseURL+"signup?"+params, token)
invitedUser, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Invited User",
Email: invitedEmail,
}, 1)
require.NoError(t, err) require.NoError(t, err)
// valid invite should redirect to login page with email. // valid invite should redirect to login page with email.
checkInvitedRedirect("Invited - User invited", loginURL+"?email="+user2.Email, token) checkInvitedRedirect("Invited - User invited", loginURL+"?email="+invitedUser.Email, token)
}) })
} }

View File

@ -24,8 +24,7 @@ type Config struct {
AsOfSystemTimeInterval time.Duration `help:"interval for 'AS OF SYSTEM TIME' clause (CockroachDB specific) to read from the DB at a specific time in the past" default:"-5m" testDefault:"0"` AsOfSystemTimeInterval time.Duration `help:"interval for 'AS OF SYSTEM TIME' clause (CockroachDB specific) to read from the DB at a specific time in the past" default:"-5m" testDefault:"0"`
PageSize int `help:"maximum number of database records to scan at once" default:"1000"` PageSize int `help:"maximum number of database records to scan at once" default:"1000"`
MaxUnverifiedUserAge time.Duration `help:"maximum lifetime of unverified user account records" default:"168h"` MaxUnverifiedUserAge time.Duration `help:"maximum lifetime of unverified user account records" default:"168h"`
MaxProjectInvitationAge time.Duration `help:"maximum lifetime of project member invitation records" default:"168h"`
} }
// Chore periodically removes unwanted records from the satellite console database. // Chore periodically removes unwanted records from the satellite console database.
@ -56,10 +55,9 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
chore.log.Error("Error deleting unverified users", zap.Error(err)) chore.log.Error("Error deleting unverified users", zap.Error(err))
} }
before = time.Now().Add(-chore.config.MaxProjectInvitationAge) err = chore.db.WebappSessions().DeleteExpired(ctx, time.Now(), chore.config.AsOfSystemTimeInterval, chore.config.PageSize)
err = chore.db.ProjectInvitations().DeleteBefore(ctx, before, chore.config.AsOfSystemTimeInterval, chore.config.PageSize)
if err != nil { if err != nil {
chore.log.Error("Error deleting project member invitations", zap.Error(err)) chore.log.Error("Error deleting expired webapp sessions", zap.Error(err))
} }
return nil return nil

View File

@ -14,20 +14,16 @@ import (
// //
// architecture: Database // architecture: Database
type ProjectInvitations interface { type ProjectInvitations interface {
// Insert inserts a project member invitation into the database. // Upsert updates a project member invitation if it exists and inserts it otherwise.
Insert(ctx context.Context, invite *ProjectInvitation) (*ProjectInvitation, error) Upsert(ctx context.Context, invite *ProjectInvitation) (*ProjectInvitation, error)
// Get returns a project member invitation from the database. // Get returns a project member invitation from the database.
Get(ctx context.Context, projectID uuid.UUID, email string) (*ProjectInvitation, error) Get(ctx context.Context, projectID uuid.UUID, email string) (*ProjectInvitation, error)
// GetByProjectID returns all of the project member invitations for the project specified by the given ID. // GetByProjectID returns all of the project member invitations for the project specified by the given ID.
GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]ProjectInvitation, error) GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]ProjectInvitation, error)
// GetByEmail returns all of the project member invitations for the specified email address. // GetByEmail returns all of the project member invitations for the specified email address.
GetByEmail(ctx context.Context, email string) ([]ProjectInvitation, error) GetByEmail(ctx context.Context, email string) ([]ProjectInvitation, error)
// Update updates the project member invitation specified by the given project ID and email address.
Update(ctx context.Context, projectID uuid.UUID, email string, request UpdateProjectInvitationRequest) (*ProjectInvitation, error)
// Delete removes a project member invitation from the database. // Delete removes a project member invitation from the database.
Delete(ctx context.Context, projectID uuid.UUID, email string) error Delete(ctx context.Context, projectID uuid.UUID, email string) error
// DeleteBefore deletes project member invitations created prior to some time from the database.
DeleteBefore(ctx context.Context, before time.Time, asOfSystemTimeInterval time.Duration, pageSize int) error
} }
// ProjectInvitation represents a pending project member invitation. // ProjectInvitation represents a pending project member invitation.
@ -37,9 +33,3 @@ type ProjectInvitation struct {
InviterID *uuid.UUID InviterID *uuid.UUID
CreatedAt time.Time CreatedAt time.Time
} }
// UpdateProjectInvitationRequest contains all fields which may be updated by ProjectInvitations.Update.
type UpdateProjectInvitationRequest struct {
CreatedAt *time.Time
InviterID *uuid.UUID
}

View File

@ -1813,12 +1813,11 @@ func (s *Service) UpdateProject(ctx context.Context, projectID uuid.UUID, update
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
isMember, err := s.isProjectMember(ctx, user.ID, projectID) _, project, err := s.isProjectOwner(ctx, user.ID, projectID)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
project := isMember.project
if updatedProject.Name != project.Name { if updatedProject.Name != project.Name {
passesNameCheck, err := s.checkProjectName(ctx, updatedProject, user.ID) passesNameCheck, err := s.checkProjectName(ctx, updatedProject, user.ID)
if err != nil || !passesNameCheck { if err != nil || !passesNameCheck {
@ -3545,7 +3544,6 @@ func (s *Service) RespondToProjectInvitation(ctx context.Context, projectID uuid
} }
if s.IsProjectInvitationExpired(invite) { if s.IsProjectInvitationExpired(invite) {
deleteWithLog()
return ErrProjectInviteInvalid.New(projInviteInvalidErrMsg) return ErrProjectInviteInvalid.New(projInviteInvalidErrMsg)
} }
@ -3580,8 +3578,8 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
} }
projectID = isMember.project.ID projectID = isMember.project.ID
// collect user querying errors var users []*User
users := make([]*User, 0) var newUserEmails []string
for _, email := range emails { for _, email := range emails {
invitedUser, err := s.store.Users().GetByEmail(ctx, email) invitedUser, err := s.store.Users().GetByEmail(ctx, email)
if err == nil { if err == nil {
@ -3600,7 +3598,9 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
return nil, ErrProjectInviteActive.New(projInviteActiveErrMsg, invitedUser.Email) return nil, ErrProjectInviteActive.New(projInviteActiveErrMsg, invitedUser.Email)
} }
users = append(users, invitedUser) users = append(users, invitedUser)
} else if !errs.Is(err, sql.ErrNoRows) { } else if errs.Is(err, sql.ErrNoRows) {
newUserEmails = append(newUserEmails, email)
} else {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
} }
@ -3608,30 +3608,20 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
inviteTokens := make(map[string]string) inviteTokens := make(map[string]string)
// add project invites in transaction scope // add project invites in transaction scope
err = s.store.WithTx(ctx, func(ctx context.Context, tx DBTx) error { err = s.store.WithTx(ctx, func(ctx context.Context, tx DBTx) error {
for _, invited := range users { for _, email := range emails {
invite, err := tx.ProjectInvitations().Insert(ctx, &ProjectInvitation{ invite, err := tx.ProjectInvitations().Upsert(ctx, &ProjectInvitation{
ProjectID: projectID, ProjectID: projectID,
Email: invited.Email, Email: email,
InviterID: &user.ID, InviterID: &user.ID,
}) })
if err != nil { if err != nil {
if !dbx.IsConstraintError(err) { return err
return err
}
now := time.Now()
invite, err = tx.ProjectInvitations().Update(ctx, projectID, invited.Email, UpdateProjectInvitationRequest{
CreatedAt: &now,
InviterID: &user.ID,
})
if err != nil {
return err
}
} }
token, err := s.CreateInviteToken(ctx, isMember.project.PublicID, invited.Email, invite.CreatedAt) token, err := s.CreateInviteToken(ctx, isMember.project.PublicID, email, invite.CreatedAt)
if err != nil { if err != nil {
return err return err
} }
inviteTokens[invited.Email] = token inviteTokens[email] = token
invites = append(invites, *invite) invites = append(invites, *invite)
} }
return nil return nil
@ -3658,6 +3648,18 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
}, },
) )
} }
for _, email := range newUserEmails {
inviteLink := fmt.Sprintf("%s?invite=%s", baseLink, inviteTokens[email])
s.mailService.SendRenderedAsync(
ctx,
[]post.Address{{Address: email}},
&NewUserProjectInvitationEmail{
InviterEmail: user.Email,
Region: s.satelliteName,
SignUpLink: inviteLink,
},
)
}
return invites, nil return invites, nil
} }

View File

@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
@ -269,6 +270,19 @@ func TestService(t *testing.T) {
}) })
require.Error(t, err) require.Error(t, err)
require.Nil(t, updatedProject) require.Nil(t, updatedProject)
user2, userCtx2 := getOwnerAndCtx(ctx, up2Proj)
_, err = service.AddProjectMembers(userCtx1, up1Proj.ID, []string{user2.Email})
require.NoError(t, err)
// Members should not be able to update project.
_, err = service.UpdateProject(userCtx2, up1Proj.ID, console.ProjectInfo{
Name: updatedName,
})
require.Error(t, err)
require.True(t, console.ErrUnauthorized.Has(err))
// remove user2.
err = service.DeleteProjectMembersAndInvitations(userCtx1, up1Proj.ID, []string{user2.Email})
require.NoError(t, err)
}) })
t.Run("AddProjectMembers", func(t *testing.T) { t.Run("AddProjectMembers", func(t *testing.T) {
@ -314,7 +328,7 @@ func TestService(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
for _, id := range []uuid.UUID{up1Proj.ID, up2Proj.ID} { for _, id := range []uuid.UUID{up1Proj.ID, up2Proj.ID} {
_, err = sat.DB.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{ _, err = sat.DB.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
ProjectID: id, ProjectID: id,
Email: invitedUser.Email, Email: invitedUser.Email,
}) })
@ -1975,7 +1989,7 @@ func TestProjectInvitations(t *testing.T) {
} }
addInvite := func(t *testing.T, ctx context.Context, project *console.Project, email string) *console.ProjectInvitation { addInvite := func(t *testing.T, ctx context.Context, project *console.Project, email string) *console.ProjectInvitation {
invite, err := sat.DB.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{ invite, err := sat.DB.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
ProjectID: project.ID, ProjectID: project.ID,
Email: email, Email: email,
InviterID: &project.OwnerID, InviterID: &project.OwnerID,
@ -1985,11 +1999,18 @@ func TestProjectInvitations(t *testing.T) {
return invite return invite
} }
expireInvite := func(t *testing.T, ctx context.Context, invite *console.ProjectInvitation) { setInviteDate := func(t *testing.T, ctx context.Context, invite *console.ProjectInvitation, createdAt time.Time) {
createdAt := time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration) result, err := sat.DB.Testing().RawDB().ExecContext(ctx,
newInvite, err := sat.DB.Console().ProjectInvitations().Update(ctx, invite.ProjectID, invite.Email, console.UpdateProjectInvitationRequest{ "UPDATE project_invitations SET created_at = $1 WHERE project_id = $2 AND email = $3",
CreatedAt: &createdAt, createdAt, invite.ProjectID, strings.ToUpper(invite.Email),
}) )
require.NoError(t, err)
count, err := result.RowsAffected()
require.NoError(t, err)
require.EqualValues(t, 1, count)
newInvite, err := sat.DB.Console().ProjectInvitations().Get(ctx, invite.ProjectID, invite.Email)
require.NoError(t, err) require.NoError(t, err)
*invite = *newInvite *invite = *newInvite
} }
@ -2010,15 +2031,14 @@ func TestProjectInvitations(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, invites, 1) require.Len(t, invites, 1)
// adding in a non-existent user should not fail the invitation. // adding in a non-existent user should work.
invites, err = service.InviteProjectMembers(ctx, project.ID, []string{user3.Email, "notauser@mail.com"}) invites, err = service.InviteProjectMembers(ctx, project.ID, []string{user3.Email, "notauser@mail.com"})
require.NoError(t, err) require.NoError(t, err)
require.Len(t, invites, 1) require.Len(t, invites, 2)
invites, err = service.GetUserProjectInvitations(ctx3) invites, err = service.GetUserProjectInvitations(ctx3)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, invites, 1) require.Len(t, invites, 1)
user3Invite := invites[0]
// prevent unauthorized users from inviting others (user2 is not a member of the project yet). // prevent unauthorized users from inviting others (user2 is not a member of the project yet).
_, err = service.InviteProjectMembers(ctx2, project.ID, []string{"other@mail.com"}) _, err = service.InviteProjectMembers(ctx2, project.ID, []string{"other@mail.com"})
@ -2033,10 +2053,12 @@ func TestProjectInvitations(t *testing.T) {
require.Empty(t, invites) require.Empty(t, invites)
// expire the invitation. // expire the invitation.
require.False(t, service.IsProjectInvitationExpired(&user3Invite)) user3Invite, err := sat.DB.Console().ProjectInvitations().Get(ctx, project.ID, user3.Email)
require.NoError(t, err)
require.False(t, service.IsProjectInvitationExpired(user3Invite))
oldCreatedAt := user3Invite.CreatedAt oldCreatedAt := user3Invite.CreatedAt
expireInvite(t, ctx, &user3Invite) setInviteDate(t, ctx, user3Invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
require.True(t, service.IsProjectInvitationExpired(&user3Invite)) require.True(t, service.IsProjectInvitationExpired(user3Invite))
// resending an expired invitation should succeed. // resending an expired invitation should succeed.
invites, err = service.InviteProjectMembers(ctx2, project.ID, []string{user3.Email}) invites, err = service.InviteProjectMembers(ctx2, project.ID, []string{user3.Email})
@ -2066,7 +2088,7 @@ func TestProjectInvitations(t *testing.T) {
require.Equal(t, invite.InviterID, invites[0].InviterID) require.Equal(t, invite.InviterID, invites[0].InviterID)
require.WithinDuration(t, invite.CreatedAt, invites[0].CreatedAt, time.Second) require.WithinDuration(t, invite.CreatedAt, invites[0].CreatedAt, time.Second)
expireInvite(t, ctx, &invites[0]) setInviteDate(t, ctx, &invites[0], time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
invites, err = service.GetUserProjectInvitations(ctx) invites, err = service.GetUserProjectInvitations(ctx)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, invites) require.Empty(t, invites)
@ -2155,7 +2177,7 @@ func TestProjectInvitations(t *testing.T) {
require.NotNil(t, inviteFromToken) require.NotNil(t, inviteFromToken)
require.Equal(t, invite, inviteFromToken) require.Equal(t, invite, inviteFromToken)
expireInvite(t, ctx, invite) setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
invites, err := service.GetUserProjectInvitations(ctx) invites, err := service.GetUserProjectInvitations(ctx)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, invites) require.Empty(t, invites)
@ -2178,16 +2200,24 @@ func TestProjectInvitations(t *testing.T) {
proj := addProject(t, ctx) proj := addProject(t, ctx)
invite := addInvite(t, ctx, proj, user.Email) invite := addInvite(t, ctx, proj, user.Email)
expireInvite(t, ctx, invite)
// Expect an error when accepting an expired invitation.
// The invitation should remain in the database.
setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
err := service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept) err := service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept)
require.True(t, console.ErrProjectInviteInvalid.Has(err)) require.True(t, console.ErrProjectInviteInvalid.Has(err))
addInvite(t, ctx, proj, user.Email) _, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err)
// Expect no error when accepting an active invitation.
// The invitation should be removed from the database, and the user should be added as a member.
setInviteDate(t, ctx, invite, time.Now())
require.NoError(t, err)
require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept)) require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept))
invites, err := service.GetUserProjectInvitations(ctx) _, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err) require.ErrorIs(t, err, sql.ErrNoRows)
require.Empty(t, invites)
memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID) memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID)
require.NoError(t, err) require.NoError(t, err)
@ -2206,12 +2236,25 @@ func TestProjectInvitations(t *testing.T) {
user, ctx := getUserAndCtx(t) user, ctx := getUserAndCtx(t)
proj := addProject(t, ctx) proj := addProject(t, ctx)
addInvite(t, ctx, proj, user.Email) invite := addInvite(t, ctx, proj, user.Email)
// Expect an error when rejecting an expired invitation.
// The invitation should remain in the database.
setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
err := service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationDecline)
require.True(t, console.ErrProjectInviteInvalid.Has(err))
_, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err)
// Expect no error when rejecting an active invitation.
// The invitation should be removed from the database.
setInviteDate(t, ctx, invite, time.Now())
require.NoError(t, err)
require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationDecline)) require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationDecline))
invites, err := service.GetUserProjectInvitations(ctx) _, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err) require.ErrorIs(t, err, sql.ErrNoRows)
require.Empty(t, invites)
memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID) memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID)
require.NoError(t, err) require.NoError(t, err)

View File

@ -0,0 +1,135 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker
import (
"context"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/storj"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/overlay"
)
var (
// Error is a standard error class for this package.
Error = errs.Class("piecetracker")
mon = monkit.Package()
// check if Observer and Partial interfaces are satisfied.
_ rangedloop.Observer = (*Observer)(nil)
_ rangedloop.Partial = (*observerFork)(nil)
)
// Observer implements piecetraker ranged loop observer.
//
// The piecetracker counts the number of pieces currently expected to reside on each node,
// then passes the counts to the overlay with UpdatePieceCounts().
type Observer struct {
log *zap.Logger
config Config
overlay overlay.DB
metabaseDB *metabase.DB
pieceCounts map[metabase.NodeAlias]int64
}
// NewObserver creates new piecetracker ranged loop observer.
func NewObserver(log *zap.Logger, metabaseDB *metabase.DB, overlay overlay.DB, config Config) *Observer {
return &Observer{
log: log,
overlay: overlay,
metabaseDB: metabaseDB,
config: config,
pieceCounts: map[metabase.NodeAlias]int64{},
}
}
// Start implements ranged loop observer start method.
func (observer *Observer) Start(ctx context.Context, time time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
observer.pieceCounts = map[metabase.NodeAlias]int64{}
return nil
}
// Fork implements ranged loop observer fork method.
func (observer *Observer) Fork(ctx context.Context) (_ rangedloop.Partial, err error) {
defer mon.Task()(&ctx)(&err)
return newObserverFork(), nil
}
// Join joins piecetracker ranged loop partial to main observer updating piece counts map.
func (observer *Observer) Join(ctx context.Context, partial rangedloop.Partial) (err error) {
defer mon.Task()(&ctx)(&err)
pieceTracker, ok := partial.(*observerFork)
if !ok {
return Error.New("expected %T but got %T", pieceTracker, partial)
}
// Merge piece counts for each node.
for nodeAlias, pieceCount := range pieceTracker.pieceCounts {
observer.pieceCounts[nodeAlias] += pieceCount
}
return nil
}
// Finish updates piece counts in the DB.
func (observer *Observer) Finish(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
observer.log.Info("piecetracker observer finished")
nodeAliasMap, err := observer.metabaseDB.LatestNodesAliasMap(ctx)
pieceCounts := make(map[storj.NodeID]int64, len(observer.pieceCounts))
for nodeAlias, count := range observer.pieceCounts {
nodeID, ok := nodeAliasMap.Node(nodeAlias)
if !ok {
observer.log.Error("unrecognized node alias in piecetracker ranged-loop", zap.Int32("node-alias", int32(nodeAlias)))
continue
}
pieceCounts[nodeID] = count
}
err = observer.overlay.UpdatePieceCounts(ctx, pieceCounts)
if err != nil {
observer.log.Error("error updating piece counts", zap.Error(err))
return Error.Wrap(err)
}
return nil
}
type observerFork struct {
pieceCounts map[metabase.NodeAlias]int64
}
// newObserverFork creates new piecetracker ranged loop fork.
func newObserverFork() *observerFork {
return &observerFork{
pieceCounts: map[metabase.NodeAlias]int64{},
}
}
// Process iterates over segment range updating partial piece counts for each node.
func (fork *observerFork) Process(ctx context.Context, segments []rangedloop.Segment) error {
for _, segment := range segments {
if segment.Inline() {
continue
}
for _, piece := range segment.AliasPieces {
fork.pieceCounts[piece.Alias]++
}
}
return nil
}

View File

@ -0,0 +1,82 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
)
func TestObserverPieceTracker(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.PieceTracker.UseRangedLoop = true
config.RangedLoop.Parallelism = 4
config.RangedLoop.BatchSize = 4
// configure RS
config.Metainfo.RS.Min = 2
config.Metainfo.RS.Repair = 3
config.Metainfo.RS.Success = 4
config.Metainfo.RS.Total = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// ensure that the piece counts are empty
pieceCounts, err := planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(pieceCounts))
// Setup: create 50KiB of data for the uplink to upload
testdata := testrand.Bytes(50 * memory.KiB)
testBucket := "testbucket"
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], testBucket, "test/path", testdata)
require.NoError(t, err)
// Run the ranged loop
_, err = planet.Satellites[0].RangedLoop.RangedLoop.Service.RunOnce(ctx)
require.NoError(t, err)
// Check that the piece counts are correct
pieceCounts, err = planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.True(t, len(pieceCounts) > 0)
for node, count := range pieceCounts {
require.Equal(t, int64(1), count, "node %s should have 1 piece", node)
}
// upload more objects
numOfObjects := 10
for i := 0; i < numOfObjects; i++ {
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], testBucket, fmt.Sprintf("test/path%d", i), testdata)
require.NoError(t, err)
}
// Run the ranged loop again
_, err = planet.Satellites[0].RangedLoop.RangedLoop.Service.RunOnce(ctx)
require.NoError(t, err)
// Check that the piece counts are correct
pieceCounts, err = planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.True(t, len(pieceCounts) > 0)
for node, count := range pieceCounts {
require.Equal(t, int64(numOfObjects+1), count, "node %s should have %d pieces", node, numOfObjects+1)
}
})
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker
// Config is the configuration for the piecetracker.
type Config struct {
UseRangedLoop bool `help:"whether to enable piece tracker observer with ranged loop" default:"true"`
}

View File

@ -253,7 +253,7 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
) )
RETURNING RETURNING
created_at`, created_at`,
opts.ProjectID, opts.NewBucket, opts.NewEncryptedObjectKey, nextAvailableVersion, opts.NewStreamID, opts.ProjectID, []byte(opts.NewBucket), opts.NewEncryptedObjectKey, nextAvailableVersion, opts.NewStreamID,
sourceObject.ExpiresAt, sourceObject.SegmentCount, sourceObject.ExpiresAt, sourceObject.SegmentCount,
encryptionParameters{&sourceObject.Encryption}, encryptionParameters{&sourceObject.Encryption},
copyMetadata, opts.NewEncryptedMetadataKeyNonce, opts.NewEncryptedMetadataKey, copyMetadata, opts.NewEncryptedMetadataKeyNonce, opts.NewEncryptedMetadataKey,

View File

@ -249,7 +249,8 @@ func (db *DB) deleteInactiveObjectsAndSegments(ctx context.Context, objects []Ob
for _, obj := range objects { for _, obj := range objects {
batch.Queue(` batch.Queue(`
WITH check_segments AS ( WITH check_segments AS (
SELECT 1 FROM segments WHERE stream_id = $5::BYTEA AND created_at > $6 SELECT 1 FROM segments
WHERE stream_id = $5::BYTEA AND created_at > $6
), deleted_objects AS ( ), deleted_objects AS (
DELETE FROM objects DELETE FROM objects
WHERE WHERE
@ -258,9 +259,7 @@ func (db *DB) deleteInactiveObjectsAndSegments(ctx context.Context, objects []Ob
RETURNING stream_id RETURNING stream_id
) )
DELETE FROM segments DELETE FROM segments
`+db.impl.AsOfSystemInterval(opts.AsOfSystemInterval)+` WHERE segments.stream_id IN (SELECT stream_id FROM deleted_objects)
WHERE
segments.stream_id IN (SELECT stream_id FROM deleted_objects)
`, obj.ProjectID, []byte(obj.BucketName), []byte(obj.ObjectKey), obj.Version, obj.StreamID, opts.InactiveDeadline) `, obj.ProjectID, []byte(obj.BucketName), []byte(obj.ObjectKey), obj.Version, obj.StreamID, opts.InactiveDeadline)
} }

View File

@ -332,8 +332,9 @@ func TestDeleteZombieObjects(t *testing.T) {
// object will be checked if is inactive and will be deleted with segment // object will be checked if is inactive and will be deleted with segment
metabasetest.DeleteZombieObjects{ metabasetest.DeleteZombieObjects{
Opts: metabase.DeleteZombieObjects{ Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now.Add(1 * time.Hour), DeadlineBefore: now.Add(1 * time.Hour),
InactiveDeadline: now.Add(2 * time.Hour), InactiveDeadline: now.Add(2 * time.Hour),
AsOfSystemInterval: -1 * time.Microsecond,
}, },
}.Check(ctx, t, db) }.Check(ctx, t, db)

View File

@ -62,7 +62,7 @@ func (db *DB) ListObjects(ctx context.Context, opts ListObjects) (result ListObj
var entries []ObjectEntry var entries []ObjectEntry
err = withRows(db.db.QueryContext(ctx, opts.getSQLQuery(), err = withRows(db.db.QueryContext(ctx, opts.getSQLQuery(),
opts.ProjectID, opts.BucketName, opts.startKey(), opts.Cursor.Version, opts.ProjectID, []byte(opts.BucketName), opts.startKey(), opts.Cursor.Version,
opts.stopKey(), opts.Status, opts.stopKey(), opts.Status,
opts.Limit+1, len(opts.Prefix)+1))(func(rows tagsql.Rows) error { opts.Limit+1, len(opts.Prefix)+1))(func(rows tagsql.Rows) error {
entries, err = scanListObjectsResult(rows, opts) entries, err = scanListObjectsResult(rows, opts)

View File

@ -196,7 +196,7 @@ func (db *DB) ListBucketsStreamIDs(ctx context.Context, opts ListBucketsStreamID
LIMIT $3 LIMIT $3
`, pgutil.UUIDArray(projectIDs), pgutil.ByteaArray(bucketNamesBytes), `, pgutil.UUIDArray(projectIDs), pgutil.ByteaArray(bucketNamesBytes),
opts.Limit, opts.Limit,
opts.CursorBucket.ProjectID, opts.CursorBucket.BucketName, opts.CursorStreamID, opts.CursorBucket.ProjectID, []byte(opts.CursorBucket.BucketName), opts.CursorStreamID,
))(func(rows tagsql.Rows) error { ))(func(rows tagsql.Rows) error {
for rows.Next() { for rows.Next() {
var streamID uuid.UUID var streamID uuid.UUID

View File

@ -822,14 +822,17 @@ func TestCollectBucketTallies(t *testing.T) {
t.Run("invalid bucket name", func(t *testing.T) { t.Run("invalid bucket name", func(t *testing.T) {
defer metabasetest.DeleteAll{}.Check(ctx, t, db) defer metabasetest.DeleteAll{}.Check(ctx, t, db)
projectA := uuid.UUID{1}
projectB := uuid.UUID{2}
metabasetest.CollectBucketTallies{ metabasetest.CollectBucketTallies{
Opts: metabase.CollectBucketTallies{ Opts: metabase.CollectBucketTallies{
From: metabase.BucketLocation{ From: metabase.BucketLocation{
ProjectID: testrand.UUID(), ProjectID: projectA,
BucketName: "a\\", BucketName: "a\\",
}, },
To: metabase.BucketLocation{ To: metabase.BucketLocation{
ProjectID: testrand.UUID(), ProjectID: projectB,
BucketName: "b\\", BucketName: "b\\",
}, },
}, },

View File

@ -27,7 +27,7 @@ func TestZombieDeletion(t *testing.T) {
Reconfigure: testplanet.Reconfigure{ Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) { Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.ZombieDeletion.Interval = 500 * time.Millisecond config.ZombieDeletion.Interval = 500 * time.Millisecond
config.ZombieDeletion.AsOfSystemInterval = 0 config.ZombieDeletion.AsOfSystemInterval = -1 * time.Microsecond
}, },
}, },
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {

View File

@ -130,7 +130,7 @@ type Config struct {
MaxInlineSegmentSize memory.Size `default:"4KiB" help:"maximum inline segment size"` MaxInlineSegmentSize memory.Size `default:"4KiB" help:"maximum inline segment size"`
// we have such default value because max value for ObjectKey is 1024(1 Kib) but EncryptedObjectKey // we have such default value because max value for ObjectKey is 1024(1 Kib) but EncryptedObjectKey
// has encryption overhead 16 bytes. So overall size is 1024 + 16 * 16. // has encryption overhead 16 bytes. So overall size is 1024 + 16 * 16.
MaxEncryptedObjectKeyLength int `default:"1750" help:"maximum encrypted object key length"` MaxEncryptedObjectKeyLength int `default:"2000" help:"maximum encrypted object key length"`
MaxSegmentSize memory.Size `default:"64MiB" help:"maximum segment size"` MaxSegmentSize memory.Size `default:"64MiB" help:"maximum segment size"`
MaxMetadataSize memory.Size `default:"2KiB" help:"maximum segment metadata size"` MaxMetadataSize memory.Size `default:"2KiB" help:"maximum segment metadata size"`
MaxCommitInterval time.Duration `default:"48h" testDefault:"1h" help:"maximum time allowed to pass between creating and committing a segment"` MaxCommitInterval time.Duration `default:"48h" testDefault:"1h" help:"maximum time allowed to pass between creating and committing a segment"`

View File

@ -64,14 +64,12 @@ func BenchmarkOverlay(b *testing.B) {
check = append(check, testrand.NodeID()) check = append(check, testrand.NodeID())
} }
b.Run("KnownUnreliableOrOffline", func(b *testing.B) { b.Run("KnownReliable", func(b *testing.B) {
criteria := &overlay.NodeCriteria{ onlineWindow := 1000 * time.Hour
OnlineWindow: 1000 * time.Hour,
}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
badNodes, err := overlaydb.KnownUnreliableOrOffline(ctx, criteria, check) online, _, err := overlaydb.KnownReliable(ctx, check, onlineWindow, 0)
require.NoError(b, err) require.NoError(b, err)
require.Len(b, badNodes, OfflineCount) require.Len(b, online, OnlineCount)
} }
}) })

View File

@ -17,6 +17,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/exp/slices"
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/pb" "storj.io/common/pb"
@ -113,36 +114,45 @@ func TestMinimumDiskSpace(t *testing.T) {
}) })
} }
func TestOffline(t *testing.T) { func TestOnlineOffline(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0] satellite := planet.Satellites[0]
service := satellite.Overlay.Service service := satellite.Overlay.Service
// TODO: handle cleanup
result, err := service.KnownUnreliableOrOffline(ctx, []storj.NodeID{ online, offline, err := service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(), planet.StorageNodes[0].ID(),
}) })
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, result) require.Empty(t, offline)
require.Len(t, online, 1)
result, err = service.KnownUnreliableOrOffline(ctx, []storj.NodeID{ online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(), planet.StorageNodes[0].ID(),
planet.StorageNodes[1].ID(), planet.StorageNodes[1].ID(),
planet.StorageNodes[2].ID(), planet.StorageNodes[2].ID(),
}) })
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, result) require.Empty(t, offline)
require.Len(t, online, 3)
result, err = service.KnownUnreliableOrOffline(ctx, []storj.NodeID{ unreliableNodeID := storj.NodeID{1, 2, 3, 4}
online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(), planet.StorageNodes[0].ID(),
{1, 2, 3, 4}, // note that this succeeds by design unreliableNodeID,
planet.StorageNodes[2].ID(), planet.StorageNodes[2].ID(),
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, result, 1) require.Empty(t, offline)
require.Equal(t, result[0], storj.NodeID{1, 2, 3, 4}) require.Len(t, online, 2)
require.False(t, slices.ContainsFunc(online, func(node overlay.SelectedNode) bool {
return node.ID == unreliableNodeID
}))
require.False(t, slices.ContainsFunc(offline, func(node overlay.SelectedNode) bool {
return node.ID == unreliableNodeID
}))
}) })
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/exp/maps"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
@ -47,8 +48,6 @@ var ErrLowDifficulty = errs.Class("node id difficulty too low")
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs
GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (map[storj.NodeID]*SelectedNode, error)
// GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs. // GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs.
// The return value contains necessary information to create orders as well as nodes' // The return value contains necessary information to create orders as well as nodes'
// current reputation status. // current reputation status.
@ -62,14 +61,10 @@ type DB interface {
// Get looks up the node by nodeID // Get looks up the node by nodeID
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error) Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
// KnownOffline filters a set of nodes to offline nodes
KnownOffline(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new
KnownUnreliableOrOffline(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries. // KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error) KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes. // KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
KnownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) ([]*pb.Node, error) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []SelectedNode, offline []SelectedNode, err error)
// Reliable returns all nodes that are reliable // Reliable returns all nodes that are reliable
Reliable(context.Context, *NodeCriteria) (storj.NodeIDList, error) Reliable(context.Context, *NodeCriteria) (storj.NodeIDList, error)
// UpdateReputation updates the DB columns for all reputation fields in ReputationStatus. // UpdateReputation updates the DB columns for all reputation fields in ReputationStatus.
@ -396,13 +391,6 @@ func (service *Service) Get(ctx context.Context, nodeID storj.NodeID) (_ *NodeDo
return service.db.Get(ctx, nodeID) return service.db.Get(ctx, nodeID)
} }
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs.
func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.GetOnlineNodesForGetDelete(ctx, nodeIDs, service.config.Node.OnlineWindow, service.config.Node.AsOfSystemTime)
}
// CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs. // CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs.
func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) { func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -551,24 +539,6 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
return nodes, nil return nodes, nil
} }
// KnownOffline filters a set of nodes to offline nodes.
func (service *Service) KnownOffline(ctx context.Context, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{
OnlineWindow: service.config.Node.OnlineWindow,
}
return service.db.KnownOffline(ctx, criteria, nodeIds)
}
// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new.
func (service *Service) KnownUnreliableOrOffline(ctx context.Context, nodeIds storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{
OnlineWindow: service.config.Node.OnlineWindow,
}
return service.db.KnownUnreliableOrOffline(ctx, criteria, nodeIds)
}
// InsertOfflineNodeEvents inserts offline events into node events. // InsertOfflineNodeEvents inserts offline events into node events.
func (service *Service) InsertOfflineNodeEvents(ctx context.Context, cooldown time.Duration, cutoff time.Duration, limit int) (count int, err error) { func (service *Service) InsertOfflineNodeEvents(ctx context.Context, cooldown time.Duration, cutoff time.Duration, limit int) (count int, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -614,9 +584,11 @@ func (service *Service) KnownReliableInExcludedCountries(ctx context.Context, no
} }
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes. // KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) { func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []SelectedNode, offlineNodes []SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.db.KnownReliable(ctx, service.config.Node.OnlineWindow, nodeIDs)
// TODO add as of system time
return service.db.KnownReliable(ctx, nodeIDs, service.config.Node.OnlineWindow, 0)
} }
// Reliable filters a set of nodes that are reliable, independent of new. // Reliable filters a set of nodes that are reliable, independent of new.
@ -791,23 +763,23 @@ func (service *Service) UpdateCheckIn(ctx context.Context, node NodeCheckInInfo,
// GetMissingPieces returns the list of offline nodes and the corresponding pieces. // GetMissingPieces returns the list of offline nodes and the corresponding pieces.
func (service *Service) GetMissingPieces(ctx context.Context, pieces metabase.Pieces) (missingPieces []uint16, err error) { func (service *Service) GetMissingPieces(ctx context.Context, pieces metabase.Pieces) (missingPieces []uint16, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// TODO this method will be removed completely in subsequent change
var nodeIDs storj.NodeIDList var nodeIDs storj.NodeIDList
missingPiecesMap := map[storj.NodeID]uint16{}
for _, p := range pieces { for _, p := range pieces {
nodeIDs = append(nodeIDs, p.StorageNode) nodeIDs = append(nodeIDs, p.StorageNode)
missingPiecesMap[p.StorageNode] = p.Number
} }
badNodeIDs, err := service.KnownUnreliableOrOffline(ctx, nodeIDs) onlineNodes, _, err := service.KnownReliable(ctx, nodeIDs)
if err != nil { if err != nil {
return nil, Error.New("error getting nodes %s", err) return nil, Error.New("error getting nodes %s", err)
} }
for _, p := range pieces { for _, node := range onlineNodes {
for _, nodeID := range badNodeIDs { delete(missingPiecesMap, node.ID)
if nodeID == p.StorageNode {
missingPieces = append(missingPieces, p.Number)
}
}
} }
return missingPieces, nil return maps.Values(missingPiecesMap), nil
} }
// GetReliablePiecesInExcludedCountries returns the list of pieces held by nodes located in excluded countries. // GetReliablePiecesInExcludedCountries returns the list of pieces held by nodes located in excluded countries.

View File

@ -388,47 +388,6 @@ func TestNodeInfo(t *testing.T) {
}) })
} }
func TestGetOnlineNodesForGetDelete(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// pause chores that might update node data
planet.Satellites[0].RangedLoop.RangedLoop.Service.Loop.Stop()
planet.Satellites[0].Repair.Repairer.Loop.Pause()
for _, node := range planet.StorageNodes {
node.Contact.Chore.Pause(ctx)
}
// should not return anything if nodeIDs aren't in the nodes table
actualNodes, err := planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, []storj.NodeID{})
require.NoError(t, err)
require.Equal(t, 0, len(actualNodes))
actualNodes, err = planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, []storj.NodeID{testrand.NodeID()})
require.NoError(t, err)
require.Equal(t, 0, len(actualNodes))
expectedNodes := make(map[storj.NodeID]*overlay.SelectedNode, len(planet.StorageNodes))
nodeIDs := make([]storj.NodeID, len(planet.StorageNodes)+1)
for i, node := range planet.StorageNodes {
nodeIDs[i] = node.ID()
dossier, err := planet.Satellites[0].Overlay.Service.Get(ctx, node.ID())
require.NoError(t, err)
expectedNodes[dossier.Id] = &overlay.SelectedNode{
ID: dossier.Id,
Address: dossier.Address,
LastNet: dossier.LastNet,
LastIPPort: dossier.LastIPPort,
}
}
// add a fake node ID to make sure GetOnlineNodesForGetDelete doesn't error and still returns the expected nodes.
nodeIDs[len(planet.StorageNodes)] = testrand.NodeID()
actualNodes, err = planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, nodeIDs)
require.NoError(t, err)
require.Equal(t, expectedNodes, actualNodes)
})
}
func TestKnownReliable(t *testing.T) { func TestKnownReliable(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
@ -475,7 +434,7 @@ func TestKnownReliable(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Check that only storage nodes #4 and #5 are reliable // Check that only storage nodes #4 and #5 are reliable
result, err := service.KnownReliable(ctx, []storj.NodeID{ online, _, err := service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(), planet.StorageNodes[0].ID(),
planet.StorageNodes[1].ID(), planet.StorageNodes[1].ID(),
planet.StorageNodes[2].ID(), planet.StorageNodes[2].ID(),
@ -484,7 +443,7 @@ func TestKnownReliable(t *testing.T) {
planet.StorageNodes[5].ID(), planet.StorageNodes[5].ID(),
}) })
require.NoError(t, err) require.NoError(t, err)
require.Len(t, result, 2) require.Len(t, online, 2)
// Sort the storage nodes for predictable checks // Sort the storage nodes for predictable checks
expectedReliable := []storj.NodeURL{ expectedReliable := []storj.NodeURL{
@ -492,11 +451,11 @@ func TestKnownReliable(t *testing.T) {
planet.StorageNodes[5].NodeURL(), planet.StorageNodes[5].NodeURL(),
} }
sort.Slice(expectedReliable, func(i, j int) bool { return expectedReliable[i].ID.Less(expectedReliable[j].ID) }) sort.Slice(expectedReliable, func(i, j int) bool { return expectedReliable[i].ID.Less(expectedReliable[j].ID) })
sort.Slice(result, func(i, j int) bool { return result[i].Id.Less(result[j].Id) }) sort.Slice(online, func(i, j int) bool { return online[i].ID.Less(online[j].ID) })
// Assert the reliable nodes are the expected ones // Assert the reliable nodes are the expected ones
for i, node := range result { for i, node := range online {
assert.Equal(t, expectedReliable[i].ID, node.Id) assert.Equal(t, expectedReliable[i].ID, node.ID)
assert.Equal(t, expectedReliable[i].Address, node.Address.Address) assert.Equal(t, expectedReliable[i].Address, node.Address.Address)
} }
}) })

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
@ -25,13 +26,10 @@ func TestStatDB(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
testDatabase(ctx, t, db.OverlayCache()) testDatabase(ctx, t, db.OverlayCache())
}) })
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
testDatabase(ctx, t, db.OverlayCache())
})
} }
func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) { func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{ // TestKnownUnreliableOrOffline and TestReliable { // Test KnownReliable and Reliable
for i, tt := range []struct { for i, tt := range []struct {
nodeID storj.NodeID nodeID storj.NodeID
unknownAuditSuspended bool unknownAuditSuspended bool
@ -108,16 +106,24 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
ExcludedCountries: []string{"FR", "BE"}, ExcludedCountries: []string{"FR", "BE"},
} }
invalid, err := cache.KnownUnreliableOrOffline(ctx, criteria, nodeIds) contains := func(nodeID storj.NodeID) func(node overlay.SelectedNode) bool {
return func(node overlay.SelectedNode) bool {
return node.ID == nodeID
}
}
online, offline, err := cache.KnownReliable(ctx, nodeIds, criteria.OnlineWindow, criteria.AsOfSystemInterval)
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, invalid, storj.NodeID{2}) // disqualified // unrealiable nodes shouldn't be in results
require.Contains(t, invalid, storj.NodeID{3}) // unknown audit suspended require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{2}))) // disqualified
require.Contains(t, invalid, storj.NodeID{4}) // offline require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{3}))) // unknown audit suspended
require.Contains(t, invalid, storj.NodeID{5}) // gracefully exited require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{5}))) // gracefully exited
require.Contains(t, invalid, storj.NodeID{6}) // offline suspended require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{6}))) // offline suspended
require.Contains(t, invalid, storj.NodeID{9}) // not in db require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{9}))) // not in db
require.Len(t, invalid, 6)
require.True(t, slices.ContainsFunc(offline, contains(storj.NodeID{4}))) // offline
require.Len(t, append(online, offline...), 4)
valid, err := cache.Reliable(ctx, criteria) valid, err := cache.Reliable(ctx, criteria)
require.NoError(t, err) require.NoError(t, err)
@ -239,6 +245,5 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
require.NoError(t, err) require.NoError(t, err)
_, err = cache.Get(ctx, nodeID) _, err = cache.Get(ctx, nodeID)
require.NoError(t, err) require.NoError(t, err)
} }
} }

View File

@ -29,7 +29,7 @@ var (
type Config struct { type Config struct {
Enabled bool `help:"whether to run this chore." default:"false"` Enabled bool `help:"whether to run this chore." default:"false"`
Interval time.Duration `help:"How often to run this chore, which is how often unpaid invoices are checked." default:"24h"` Interval time.Duration `help:"How often to run this chore, which is how often unpaid invoices are checked." default:"24h"`
GracePeriod time.Duration `help:"How long to wait between a warning event and freezing an account." default:"720h"` GracePeriod time.Duration `help:"How long to wait between a warning event and freezing an account." default:"360h"`
PriceThreshold int64 `help:"The failed invoice amount (in cents) beyond which an account will not be frozen" default:"10000"` PriceThreshold int64 `help:"The failed invoice amount (in cents) beyond which an account will not be frozen" default:"10000"`
} }

View File

@ -64,6 +64,8 @@ type Invoices interface {
Pay(id string, params *stripe.InvoicePayParams) (*stripe.Invoice, error) Pay(id string, params *stripe.InvoicePayParams) (*stripe.Invoice, error)
Del(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error) Del(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error)
Get(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error) Get(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error)
MarkUncollectible(id string, params *stripe.InvoiceMarkUncollectibleParams) (*stripe.Invoice, error)
VoidInvoice(id string, params *stripe.InvoiceVoidParams) (*stripe.Invoice, error)
} }
// InvoiceItems Stripe InvoiceItems interface. // InvoiceItems Stripe InvoiceItems interface.

View File

@ -860,6 +860,86 @@ func (service *Service) createInvoices(ctx context.Context, customers []Customer
return scheduled, draft, errGrp.Err() return scheduled, draft, errGrp.Err()
} }
// SetInvoiceStatus will set all open invoices within the specified date range to the requested status.
func (service *Service) SetInvoiceStatus(ctx context.Context, startPeriod, endPeriod time.Time, status string, dryRun bool) (err error) {
defer mon.Task()(&ctx)(&err)
switch stripe.InvoiceStatus(strings.ToLower(status)) {
case stripe.InvoiceStatusUncollectible:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to uncollectible", zap.String("invoiceId", invoiceId))
if !dryRun {
_, err := service.stripeClient.Invoices().MarkUncollectible(invoiceId, &stripe.InvoiceMarkUncollectibleParams{})
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
case stripe.InvoiceStatusVoid:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to void", zap.String("invoiceId", invoiceId))
if !dryRun {
_, err = service.stripeClient.Invoices().VoidInvoice(invoiceId, &stripe.InvoiceVoidParams{})
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
case stripe.InvoiceStatusPaid:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to paid", zap.String("invoiceId", invoiceId))
if !dryRun {
payParams := &stripe.InvoicePayParams{
Params: stripe.Params{Context: ctx},
PaidOutOfBand: stripe.Bool(true),
}
_, err = service.stripeClient.Invoices().Pay(invoiceId, payParams)
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
default:
// unknown
service.log.Error("Unknown status provided. Valid options are uncollectible, void, or paid.", zap.String("status", status))
return Error.New("unknown status provided")
}
return err
}
func (service *Service) iterateInvoicesInTimeRange(ctx context.Context, startPeriod, endPeriod time.Time, updateStatus func(string) error) (err error) {
defer mon.Task()(&ctx)(&err)
params := &stripe.InvoiceListParams{
ListParams: stripe.ListParams{
Context: ctx,
Limit: stripe.Int64(100),
},
Status: stripe.String("open"),
CreatedRange: &stripe.RangeQueryParams{
GreaterThanOrEqual: startPeriod.Unix(),
LesserThanOrEqual: endPeriod.Unix(),
},
}
numInvoices := 0
invoicesIterator := service.stripeClient.Invoices().List(params)
for invoicesIterator.Next() {
numInvoices++
stripeInvoice := invoicesIterator.Invoice()
err := updateStatus(stripeInvoice.ID)
if err != nil {
return Error.Wrap(err)
}
}
service.log.Info("found " + strconv.Itoa(numInvoices) + " total invoices")
return Error.Wrap(invoicesIterator.Err())
}
// CreateBalanceInvoiceItems will find users with a stripe balance, create an invoice // CreateBalanceInvoiceItems will find users with a stripe balance, create an invoice
// item with the charges due, and zero out the stripe balance. // item with the charges due, and zero out the stripe balance.
func (service *Service) CreateBalanceInvoiceItems(ctx context.Context) (err error) { func (service *Service) CreateBalanceInvoiceItems(ctx context.Context) (err error) {

View File

@ -36,6 +36,292 @@ import (
stripe1 "storj.io/storj/satellite/payments/stripe" stripe1 "storj.io/storj/satellite/payments/stripe"
) )
func TestService_SetInvoiceStatusUncollectible(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to uncollectible", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "uncollectible", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusUncollectible, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusVoid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to void", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "void", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusVoid, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusPaid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to paid", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "paid", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusPaid, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusInvalid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to invalid", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "not a real status", false)
require.Error(t, err)
})
})
}
func TestService_BalanceInvoiceItems(t *testing.T) { func TestService_BalanceInvoiceItems(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0, SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,

View File

@ -497,6 +497,32 @@ type mockInvoices struct {
invoiceItems *mockInvoiceItems invoiceItems *mockInvoiceItems
} }
func (m *mockInvoices) MarkUncollectible(id string, params *stripe.InvoiceMarkUncollectibleParams) (*stripe.Invoice, error) {
for _, invoices := range m.invoices {
for _, invoice := range invoices {
if invoice.ID == id {
invoice.Status = stripe.InvoiceStatusUncollectible
return invoice, nil
}
}
}
return nil, errors.New("invoice not found")
}
func (m *mockInvoices) VoidInvoice(id string, params *stripe.InvoiceVoidParams) (*stripe.Invoice, error) {
for _, invoices := range m.invoices {
for _, invoice := range invoices {
if invoice.ID == id {
invoice.Status = stripe.InvoiceStatusVoid
return invoice, nil
}
}
}
return nil, errors.New("invoice not found")
}
func newMockInvoices(root *mockStripeState, invoiceItems *mockInvoiceItems) *mockInvoices { func newMockInvoices(root *mockStripeState, invoiceItems *mockInvoiceItems) *mockInvoices {
return &mockInvoices{ return &mockInvoices{
root: root, root: root,
@ -639,8 +665,9 @@ func (m *mockInvoices) Pay(id string, params *stripe.InvoicePayParams) (*stripe.
invoice.AmountRemaining = 0 invoice.AmountRemaining = 0
return invoice, nil return invoice, nil
} }
} else if invoice.AmountRemaining == 0 { } else if invoice.AmountRemaining == 0 || (params.PaidOutOfBand != nil && *params.PaidOutOfBand) {
invoice.Status = stripe.InvoiceStatusPaid invoice.Status = stripe.InvoiceStatusPaid
invoice.AmountRemaining = 0
} }
return invoice, nil return invoice, nil
} }

View File

@ -43,6 +43,7 @@ import (
"storj.io/storj/satellite/console/userinfo" "storj.io/storj/satellite/console/userinfo"
"storj.io/storj/satellite/contact" "storj.io/storj/satellite/contact"
"storj.io/storj/satellite/gc/bloomfilter" "storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/gc/piecetracker"
"storj.io/storj/satellite/gc/sender" "storj.io/storj/satellite/gc/sender"
"storj.io/storj/satellite/gracefulexit" "storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/mailservice" "storj.io/storj/satellite/mailservice"
@ -215,6 +216,8 @@ type Config struct {
ProjectLimit accounting.ProjectLimitConfig ProjectLimit accounting.ProjectLimitConfig
Analytics analytics.Config Analytics analytics.Config
PieceTracker piecetracker.Config
} }
func setupMailService(log *zap.Logger, config Config) (*mailservice.Service, error) { func setupMailService(log *zap.Logger, config Config) (*mailservice.Service, error) {

View File

@ -18,6 +18,7 @@ import (
"storj.io/storj/private/lifecycle" "storj.io/storj/private/lifecycle"
"storj.io/storj/satellite/accounting/nodetally" "storj.io/storj/satellite/accounting/nodetally"
"storj.io/storj/satellite/audit" "storj.io/storj/satellite/audit"
"storj.io/storj/satellite/gc/piecetracker"
"storj.io/storj/satellite/gracefulexit" "storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/metabase" "storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop" "storj.io/storj/satellite/metabase/rangedloop"
@ -65,6 +66,10 @@ type RangedLoop struct {
NodeTallyObserver *nodetally.Observer NodeTallyObserver *nodetally.Observer
} }
PieceTracker struct {
Observer *piecetracker.Observer
}
RangedLoop struct { RangedLoop struct {
Service *rangedloop.Service Service *rangedloop.Service
} }
@ -124,6 +129,15 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
metabaseDB) metabaseDB)
} }
{ // setup piece tracker observer
peer.PieceTracker.Observer = piecetracker.NewObserver(
log.Named("piecetracker"),
metabaseDB,
peer.DB.OverlayCache(),
config.PieceTracker,
)
}
{ // setup overlay { // setup overlay
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.DB.OverlayCache(), peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay) peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.DB.OverlayCache(), peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil { if err != nil {
@ -167,6 +181,10 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
observers = append(observers, peer.Repair.Observer) observers = append(observers, peer.Repair.Observer)
} }
if config.PieceTracker.UseRangedLoop {
observers = append(observers, peer.PieceTracker.Observer)
}
segments := rangedloop.NewMetabaseRangeSplitter(metabaseDB, config.RangedLoop.AsOfSystemInterval, config.RangedLoop.BatchSize) segments := rangedloop.NewMetabaseRangeSplitter(metabaseDB, config.RangedLoop.AsOfSystemInterval, config.RangedLoop.BatchSize)
peer.RangedLoop.Service = rangedloop.NewService(log.Named("rangedloop"), config.RangedLoop, segments, observers) peer.RangedLoop.Service = rangedloop.NewService(log.Named("rangedloop"), config.RangedLoop, segments, observers)

View File

@ -15,7 +15,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
@ -195,65 +194,15 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
stats.repairSegmentSize.Observe(int64(segment.EncryptedSize)) stats.repairSegmentSize.Observe(int64(segment.EncryptedSize))
piecesCheck, err := repairer.classifySegmentPieces(ctx, segment)
if err != nil {
return false, err
}
pieces := segment.Pieces pieces := segment.Pieces
allNodeIDs := make([]storj.NodeID, len(pieces)) numRetrievable := len(pieces) - len(piecesCheck.MissingPiecesSet)
for i, p := range pieces { numHealthy := len(pieces) - len(piecesCheck.MissingPiecesSet) - piecesCheck.NumUnhealthyRetrievable
allNodeIDs[i] = p.StorageNode
}
excludeNodeIDs := allNodeIDs
missingPieces, err := repairer.overlay.GetMissingPieces(ctx, pieces)
if err != nil {
return false, overlayQueryError.New("error identifying missing pieces: %w", err)
}
var clumpedPieces metabase.Pieces
var clumpedPiecesSet map[uint16]bool
if repairer.doDeclumping {
// if multiple pieces are on the same last_net, keep only the first one. The rest are
// to be considered retrievable but unhealthy.
lastNets, err := repairer.overlay.GetNodesNetworkInOrder(ctx, allNodeIDs)
if err != nil {
return false, metainfoGetError.Wrap(err)
}
clumpedPieces = repair.FindClumpedPieces(segment.Pieces, lastNets)
clumpedPiecesSet = make(map[uint16]bool)
for _, clumpedPiece := range clumpedPieces {
clumpedPiecesSet[clumpedPiece.Number] = true
}
}
var outOfPlacementPieces metabase.Pieces
var outOfPlacementPiecesSet map[uint16]bool
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
var err error
outOfPlacementNodes, err := repairer.overlay.GetNodesOutOfPlacement(ctx, allNodeIDs, segment.Placement)
if err != nil {
return false, metainfoGetError.Wrap(err)
}
outOfPlacementPiecesSet = make(map[uint16]bool)
for _, piece := range pieces {
if slices.Contains(outOfPlacementNodes, piece.StorageNode) {
outOfPlacementPieces = append(outOfPlacementPieces, piece)
outOfPlacementPiecesSet[piece.Number] = true
}
}
}
numUnhealthyRetrievable := len(clumpedPieces) + len(outOfPlacementPieces)
if len(clumpedPieces) != 0 && len(outOfPlacementPieces) != 0 {
// verify that some of clumped pieces and out of placement pieces are not the same
unhealthyRetrievableSet := map[uint16]bool{}
maps.Copy(unhealthyRetrievableSet, clumpedPiecesSet)
maps.Copy(unhealthyRetrievableSet, outOfPlacementPiecesSet)
numUnhealthyRetrievable = len(unhealthyRetrievableSet)
}
numRetrievable := len(pieces) - len(missingPieces)
numHealthy := len(pieces) - len(missingPieces) - numUnhealthyRetrievable
// irreparable segment // irreparable segment
if numRetrievable < int(segment.Redundancy.RequiredShares) { if numRetrievable < int(segment.Redundancy.RequiredShares) {
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
@ -297,7 +246,15 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// repair not needed // repair not needed
if numHealthy-numHealthyInExcludedCountries > int(repairThreshold) { if numHealthy-numHealthyInExcludedCountries > int(repairThreshold) {
// remove pieces out of placement without repairing as we are above repair threshold // remove pieces out of placement without repairing as we are above repair threshold
if len(outOfPlacementPieces) > 0 { if len(piecesCheck.OutOfPlacementPiecesSet) > 0 {
var outOfPlacementPieces metabase.Pieces
for _, piece := range pieces {
if _, ok := piecesCheck.OutOfPlacementPiecesSet[piece.Number]; ok {
outOfPlacementPieces = append(outOfPlacementPieces, piece)
}
}
newPieces, err := segment.Pieces.Update(nil, outOfPlacementPieces) newPieces, err := segment.Pieces.Update(nil, outOfPlacementPieces)
if err != nil { if err != nil {
return false, metainfoPutError.Wrap(err) return false, metainfoPutError.Wrap(err)
@ -317,13 +274,13 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
return false, metainfoPutError.Wrap(err) return false, metainfoPutError.Wrap(err)
} }
mon.Meter("dropped_out_of_placement_pieces").Mark(len(outOfPlacementPieces)) mon.Meter("dropped_out_of_placement_pieces").Mark(len(piecesCheck.OutOfPlacementPiecesSet))
} }
mon.Meter("repair_unnecessary").Mark(1) //mon:locked mon.Meter("repair_unnecessary").Mark(1) //mon:locked
stats.repairUnnecessary.Mark(1) stats.repairUnnecessary.Mark(1)
repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold), repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold),
zap.Int("numClumped", len(clumpedPieces)), zap.Int("numOffPieces", len(outOfPlacementPieces))) zap.Int("numClumped", len(piecesCheck.ClumpedPiecesSet)), zap.Int("numOffPieces", len(piecesCheck.OutOfPlacementPiecesSet)))
return true, nil return true, nil
} }
@ -334,7 +291,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked
stats.healthyRatioBeforeRepair.Observe(healthyRatioBeforeRepair) stats.healthyRatioBeforeRepair.Observe(healthyRatioBeforeRepair)
lostPiecesSet := sliceToSet(missingPieces) lostPiecesSet := piecesCheck.MissingPiecesSet
var retrievablePieces metabase.Pieces var retrievablePieces metabase.Pieces
unhealthyPieces := make(map[metabase.Piece]struct{}) unhealthyPieces := make(map[metabase.Piece]struct{})
@ -342,12 +299,11 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// Populate retrievablePieces with all pieces from the segment except those correlating to indices in lostPieces. // Populate retrievablePieces with all pieces from the segment except those correlating to indices in lostPieces.
// Populate unhealthyPieces with all pieces in lostPieces, clumpedPieces or outOfPlacementPieces. // Populate unhealthyPieces with all pieces in lostPieces, clumpedPieces or outOfPlacementPieces.
for _, piece := range pieces { for _, piece := range pieces {
excludeNodeIDs = append(excludeNodeIDs, piece.StorageNode)
if lostPiecesSet[piece.Number] { if lostPiecesSet[piece.Number] {
unhealthyPieces[piece] = struct{}{} unhealthyPieces[piece] = struct{}{}
} else { } else {
retrievablePieces = append(retrievablePieces, piece) retrievablePieces = append(retrievablePieces, piece)
if clumpedPiecesSet[piece.Number] || outOfPlacementPiecesSet[piece.Number] { if piecesCheck.ClumpedPiecesSet[piece.Number] || piecesCheck.OutOfPlacementPiecesSet[piece.Number] {
unhealthyPieces[piece] = struct{}{} unhealthyPieces[piece] = struct{}{}
} else { } else {
healthySet[int32(piece.Number)] = struct{}{} healthySet[int32(piece.Number)] = struct{}{}
@ -399,7 +355,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// Request Overlay for n-h new storage nodes // Request Overlay for n-h new storage nodes
request := overlay.FindStorageNodesRequest{ request := overlay.FindStorageNodesRequest{
RequestedCount: requestCount, RequestedCount: requestCount,
ExcludedIDs: excludeNodeIDs, ExcludedIDs: piecesCheck.ExcludeNodeIDs,
Placement: segment.Placement, Placement: segment.Placement,
} }
newNodes, err := repairer.overlay.FindStorageNodesForUpload(ctx, request) newNodes, err := repairer.overlay.FindStorageNodesForUpload(ctx, request)
@ -530,6 +486,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
} }
report := audit.Report{ report := audit.Report{
Segment: &segment,
NodesReputation: cachedNodesReputation, NodesReputation: cachedNodesReputation,
} }
@ -537,7 +494,10 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
report.Successes = append(report.Successes, outcome.Piece.StorageNode) report.Successes = append(report.Successes, outcome.Piece.StorageNode)
} }
for _, outcome := range piecesReport.Failed { for _, outcome := range piecesReport.Failed {
report.Fails = append(report.Fails, outcome.Piece.StorageNode) report.Fails = append(report.Fails, metabase.Piece{
StorageNode: outcome.Piece.StorageNode,
Number: outcome.Piece.Number,
})
} }
for _, outcome := range piecesReport.Offline { for _, outcome := range piecesReport.Offline {
report.Offlines = append(report.Offlines, outcome.Piece.StorageNode) report.Offlines = append(report.Offlines, outcome.Piece.StorageNode)
@ -667,8 +627,8 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
repairer.log.Debug("repaired segment", repairer.log.Debug("repaired segment",
zap.Stringer("Stream ID", segment.StreamID), zap.Stringer("Stream ID", segment.StreamID),
zap.Uint64("Position", segment.Position.Encode()), zap.Uint64("Position", segment.Position.Encode()),
zap.Int("clumped pieces", len(clumpedPieces)), zap.Int("clumped pieces", len(piecesCheck.ClumpedPiecesSet)),
zap.Int("out of placement pieces", len(outOfPlacementPieces)), zap.Int("out of placement pieces", len(piecesCheck.OutOfPlacementPiecesSet)),
zap.Int("in excluded countries", numHealthyInExcludedCountries), zap.Int("in excluded countries", numHealthyInExcludedCountries),
zap.Int("removed pieces", len(toRemove)), zap.Int("removed pieces", len(toRemove)),
zap.Int("repaired pieces", len(repairedPieces)), zap.Int("repaired pieces", len(repairedPieces)),
@ -677,6 +637,98 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
return true, nil return true, nil
} }
type piecesCheckResult struct {
ExcludeNodeIDs []storj.NodeID
MissingPiecesSet map[uint16]bool
ClumpedPiecesSet map[uint16]bool
OutOfPlacementPiecesSet map[uint16]bool
NumUnhealthyRetrievable int
}
func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segment metabase.Segment) (result piecesCheckResult, err error) {
defer mon.Task()(&ctx)(&err)
pieces := segment.Pieces
allNodeIDs := make([]storj.NodeID, len(pieces))
nodeIDPieceMap := map[storj.NodeID]uint16{}
result.MissingPiecesSet = map[uint16]bool{}
for i, p := range pieces {
allNodeIDs[i] = p.StorageNode
nodeIDPieceMap[p.StorageNode] = p.Number
result.MissingPiecesSet[p.Number] = true
}
result.ExcludeNodeIDs = allNodeIDs
online, offline, err := repairer.overlay.KnownReliable(ctx, allNodeIDs)
if err != nil {
return piecesCheckResult{}, overlayQueryError.New("error identifying missing pieces: %w", err)
}
// remove online nodes from missing pieces
for _, onlineNode := range online {
pieceNum := nodeIDPieceMap[onlineNode.ID]
delete(result.MissingPiecesSet, pieceNum)
}
if repairer.doDeclumping {
// if multiple pieces are on the same last_net, keep only the first one. The rest are
// to be considered retrievable but unhealthy.
lastNets := make([]string, 0, len(allNodeIDs))
reliablePieces := metabase.Pieces{}
collectLastNets := func(reliable []overlay.SelectedNode) {
for _, node := range reliable {
pieceNum := nodeIDPieceMap[node.ID]
reliablePieces = append(reliablePieces, metabase.Piece{
Number: pieceNum,
StorageNode: node.ID,
})
lastNets = append(lastNets, node.LastNet)
}
}
collectLastNets(online)
collectLastNets(offline)
clumpedPieces := repair.FindClumpedPieces(reliablePieces, lastNets)
result.ClumpedPiecesSet = map[uint16]bool{}
for _, clumpedPiece := range clumpedPieces {
result.ClumpedPiecesSet[clumpedPiece.Number] = true
}
}
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
result.OutOfPlacementPiecesSet = map[uint16]bool{}
checkPlacement := func(reliable []overlay.SelectedNode) {
for _, node := range reliable {
if segment.Placement.AllowedCountry(node.CountryCode) {
continue
}
result.OutOfPlacementPiecesSet[nodeIDPieceMap[node.ID]] = true
}
}
checkPlacement(online)
checkPlacement(offline)
}
result.NumUnhealthyRetrievable = len(result.ClumpedPiecesSet) + len(result.OutOfPlacementPiecesSet)
if len(result.ClumpedPiecesSet) != 0 && len(result.OutOfPlacementPiecesSet) != 0 {
// verify that some of clumped pieces and out of placement pieces are not the same
unhealthyRetrievableSet := map[uint16]bool{}
maps.Copy(unhealthyRetrievableSet, result.ClumpedPiecesSet)
maps.Copy(unhealthyRetrievableSet, result.OutOfPlacementPiecesSet)
result.NumUnhealthyRetrievable = len(unhealthyRetrievableSet)
}
return result, nil
}
// checkIfSegmentAltered checks if oldSegment has been altered since it was selected for audit. // checkIfSegmentAltered checks if oldSegment has been altered since it was selected for audit.
func (repairer *SegmentRepairer) checkIfSegmentAltered(ctx context.Context, oldSegment metabase.Segment) (err error) { func (repairer *SegmentRepairer) checkIfSegmentAltered(ctx context.Context, oldSegment metabase.Segment) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -793,15 +845,6 @@ func (repairer *SegmentRepairer) AdminFetchPieces(ctx context.Context, seg *meta
return pieceInfos, nil return pieceInfos, nil
} }
// sliceToSet converts the given slice to a set.
func sliceToSet(slice []uint16) map[uint16]bool {
set := make(map[uint16]bool, len(slice))
for _, value := range slice {
set[value] = true
}
return set
}
// commaSeparatedArray concatenates an array into a comma-separated string, // commaSeparatedArray concatenates an array into a comma-separated string,
// lazily. // lazily.
type commaSeparatedArray []string type commaSeparatedArray []string

View File

@ -5,6 +5,7 @@ package repairer_test
import ( import (
"context" "context"
"strconv"
"testing" "testing"
"time" "time"
@ -14,6 +15,7 @@ import (
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
@ -27,13 +29,15 @@ import (
func TestSegmentRepairPlacement(t *testing.T) { func TestSegmentRepairPlacement(t *testing.T) {
piecesCount := 4 piecesCount := 4
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 12, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{ Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(1, 2, piecesCount, piecesCount), Satellite: testplanet.ReconfigureRS(1, 1, piecesCount, piecesCount),
}, },
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket")) require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))
defaultLocation := location.Poland
_, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{ _, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{
ProjectID: planet.Uplinks[0].Projects[0].ID, ProjectID: planet.Uplinks[0].Projects[0].ID,
Name: "testbucket", Name: "testbucket",
@ -41,65 +45,85 @@ func TestSegmentRepairPlacement(t *testing.T) {
}) })
require.NoError(t, err) require.NoError(t, err)
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL"))
}
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
type testCase struct { type testCase struct {
piecesOutOfPlacement int piecesOutOfPlacement int
piecesAfterRepair int piecesAfterRepair int
// how many from out of placement pieces should be also offline
piecesOutOfPlacementOffline int
} }
for _, tc := range []testCase{ for i, tc := range []testCase{
// all pieces/nodes are out of placement, repair download/upload should be triggered // all pieces/nodes are out of placement, repair download/upload should be triggered
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount}, {piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount},
// all pieces/nodes are out of placement, repair download/upload should be triggered, some pieces are offline
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 2},
// few pieces/nodes are out of placement, repair download/upload should be triggered // few pieces/nodes are out of placement, repair download/upload should be triggered
{piecesOutOfPlacement: piecesCount - 2, piecesAfterRepair: piecesCount}, {piecesOutOfPlacement: piecesCount - 1, piecesAfterRepair: piecesCount},
{piecesOutOfPlacement: piecesCount - 1, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 1},
// single piece/node is out of placement, NO download/upload repair, we are only removing piece from segment // single piece/node is out of placement, NO download/upload repair, we are only removing piece from segment
// as segment is still above repair threshold // as segment is still above repair threshold
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1}, {piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1},
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
} { } {
for _, node := range planet.StorageNodes { t.Run("#"+strconv.Itoa(i), func(t *testing.T) {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL")) for _, node := range planet.StorageNodes {
} require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), defaultLocation.String()))
}
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx)) require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx) expectedData := testrand.Bytes(5 * memory.KiB)
require.NoError(t, err) err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", expectedData)
require.Len(t, segments, 1) require.NoError(t, err)
require.Len(t, segments[0].Pieces, piecesCount)
for _, piece := range segments[0].Pieces[:tc.piecesOutOfPlacement] { segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, piece.StorageNode, "US")) require.NoError(t, err)
} require.Len(t, segments, 1)
require.Len(t, segments[0].Pieces, piecesCount)
// confirm that some pieces are out of placement for index, piece := range segments[0].Pieces {
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement) // make node offline if needed
require.NoError(t, err) require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.FindNode(piece.StorageNode), index < tc.piecesOutOfPlacementOffline, defaultLocation))
require.False(t, ok)
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx)) if index < tc.piecesOutOfPlacement {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, piece.StorageNode, "US"))
}
}
_, err = planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{ // confirm that some pieces are out of placement
StreamID: segments[0].StreamID, ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
Position: segments[0].Position, require.NoError(t, err)
require.False(t, ok)
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
_, err = planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.NoError(t, err)
// confirm that all pieces have correct placement
segments, err = planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, tc.piecesAfterRepair)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.True(t, ok)
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "object")
require.NoError(t, err)
require.Equal(t, expectedData, data)
}) })
require.NoError(t, err)
// confirm that all pieces have correct placement
segments, err = planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, tc.piecesAfterRepair)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.True(t, ok)
} }
}) })
} }
@ -190,6 +214,52 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) {
}) })
} }
func TestSegmentRepairPlacementNotEnoughNodes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 8, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(1, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))
_, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{
ProjectID: planet.Uplinks[0].Projects[0].ID,
Name: "testbucket",
Placement: storj.EU,
})
require.NoError(t, err)
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL"))
}
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
// change all nodes location to US
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "US"))
}
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.Len(t, segments[0].Pieces, 4)
// we have bucket geofenced to EU but now all nodes are in US, repairing should fail because
// not enough nodes are available but segment shouldn't be deleted from repair queue
shouldDelete, err := planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.Error(t, err)
require.False(t, shouldDelete)
})
}
func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces metabase.Pieces, placement storj.PlacementConstraint) (bool, error) { func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces metabase.Pieces, placement storj.PlacementConstraint) (bool, error) {
for _, piece := range pieces { for _, piece := range pieces {
nodeDossier, err := overaly.Get(ctx, piece.StorageNode) nodeDossier, err := overaly.Get(ctx, piece.StorageNode)
@ -202,3 +272,26 @@ func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces
} }
return true, nil return true, nil
} }
func updateNodeStatus(ctx context.Context, satellite *testplanet.Satellite, node *testplanet.StorageNode, offline bool, countryCode location.CountryCode) error {
timestamp := time.Now()
if offline {
timestamp = time.Now().Add(-4 * time.Hour)
}
return satellite.DB.OverlayCache().UpdateCheckIn(ctx, overlay.NodeCheckInInfo{
NodeID: node.ID(),
Address: &pb.NodeAddress{Address: node.Addr()},
IsUp: true,
Version: &pb.NodeVersion{
Version: "v0.0.0",
CommitHash: "",
Timestamp: time.Time{},
Release: true,
},
Capacity: &pb.NodeCapacity{
FreeDisk: 1 * memory.GiB.Int64(),
},
CountryCode: countryCode,
}, timestamp, satellite.Config.Overlay.Node)
}

View File

@ -195,6 +195,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
log.Named("reporter"), log.Named("reporter"),
peer.Reputation, peer.Reputation,
peer.Overlay, peer.Overlay,
metabaseDB,
containmentDB, containmentDB,
config.Audit.MaxRetriesStatDB, config.Audit.MaxRetriesStatDB,
int32(config.Audit.MaxReverifyCount)) int32(config.Audit.MaxReverifyCount))

View File

@ -16,6 +16,7 @@ import (
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/audit" "storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation" "storj.io/storj/satellite/reputation"
) )
@ -182,7 +183,7 @@ func TestAuditSuspendExceedGracePeriod(t *testing.T) {
// give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit // give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit
report := audit.Report{ report := audit.Report{
Successes: storj.NodeIDList{successNodeID}, Successes: storj.NodeIDList{successNodeID},
Fails: storj.NodeIDList{failNodeID}, Fails: metabase.Pieces{{StorageNode: failNodeID}},
Offlines: storj.NodeIDList{offlineNodeID}, Offlines: storj.NodeIDList{offlineNodeID},
Unknown: storj.NodeIDList{unknownNodeID}, Unknown: storj.NodeIDList{unknownNodeID},
NodesReputation: nodesStatus, NodesReputation: nodesStatus,
@ -248,7 +249,7 @@ func TestAuditSuspendDQDisabled(t *testing.T) {
// give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit // give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit
report := audit.Report{ report := audit.Report{
Successes: storj.NodeIDList{successNodeID}, Successes: storj.NodeIDList{successNodeID},
Fails: storj.NodeIDList{failNodeID}, Fails: metabase.Pieces{{StorageNode: failNodeID}},
Offlines: storj.NodeIDList{offlineNodeID}, Offlines: storj.NodeIDList{offlineNodeID},
Unknown: storj.NodeIDList{unknownNodeID}, Unknown: storj.NodeIDList{unknownNodeID},
NodesReputation: nodesStatus, NodesReputation: nodesStatus,

View File

@ -48,7 +48,7 @@ func (db *ConsoleDB) ProjectMembers() console.ProjectMembers {
// ProjectInvitations is a getter for ProjectInvitations repository. // ProjectInvitations is a getter for ProjectInvitations repository.
func (db *ConsoleDB) ProjectInvitations() console.ProjectInvitations { func (db *ConsoleDB) ProjectInvitations() console.ProjectInvitations {
return &projectInvitations{db.db} return &projectInvitations{db.methods}
} }
// APIKeys is a getter for APIKeys repository. // APIKeys is a getter for APIKeys repository.
@ -78,7 +78,7 @@ func (db *ConsoleDB) ResetPasswordTokens() console.ResetPasswordTokens {
// WebappSessions is a getter for WebappSessions repository. // WebappSessions is a getter for WebappSessions repository.
func (db *ConsoleDB) WebappSessions() consoleauth.WebappSessions { func (db *ConsoleDB) WebappSessions() consoleauth.WebappSessions {
return &webappSessions{db.methods} return &webappSessions{db.db}
} }
// AccountFreezeEvents is a getter for AccountFreezeEvents repository. // AccountFreezeEvents is a getter for AccountFreezeEvents repository.

View File

@ -169,7 +169,7 @@ model project_invitation (
field created_at timestamp ( autoinsert, updatable ) field created_at timestamp ( autoinsert, updatable )
) )
create project_invitation ( ) create project_invitation ( replace )
read one ( read one (
select project_invitation select project_invitation

View File

@ -12869,7 +12869,7 @@ func (obj *pgxImpl) Create_ProjectMember(ctx context.Context,
} }
func (obj *pgxImpl) Create_ProjectInvitation(ctx context.Context, func (obj *pgxImpl) Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field, project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field, project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) ( optional ProjectInvitation_Create_Fields) (
@ -12882,7 +12882,7 @@ func (obj *pgxImpl) Create_ProjectInvitation(ctx context.Context,
__inviter_id_val := optional.InviterId.value() __inviter_id_val := optional.InviterId.value()
__created_at_val := __now __created_at_val := __now
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at") var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) ON CONFLICT ( project_id, email ) DO UPDATE SET project_id = EXCLUDED.project_id, email = EXCLUDED.email, inviter_id = EXCLUDED.inviter_id, created_at = EXCLUDED.created_at RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __values []interface{} var __values []interface{}
__values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val) __values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val)
@ -20876,7 +20876,7 @@ func (obj *pgxcockroachImpl) Create_ProjectMember(ctx context.Context,
} }
func (obj *pgxcockroachImpl) Create_ProjectInvitation(ctx context.Context, func (obj *pgxcockroachImpl) Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field, project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field, project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) ( optional ProjectInvitation_Create_Fields) (
@ -20889,7 +20889,7 @@ func (obj *pgxcockroachImpl) Create_ProjectInvitation(ctx context.Context,
__inviter_id_val := optional.InviterId.value() __inviter_id_val := optional.InviterId.value()
__created_at_val := __now __created_at_val := __now
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at") var __embed_stmt = __sqlbundle_Literal("UPSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __values []interface{} var __values []interface{}
__values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val) __values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val)
@ -28506,19 +28506,6 @@ func (rx *Rx) Create_Project(ctx context.Context,
} }
func (rx *Rx) Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Create_ProjectInvitation(ctx, project_invitation_project_id, project_invitation_email, optional)
}
func (rx *Rx) Create_ProjectMember(ctx context.Context, func (rx *Rx) Create_ProjectMember(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field, project_member_member_id ProjectMember_MemberId_Field,
project_member_project_id ProjectMember_ProjectId_Field) ( project_member_project_id ProjectMember_ProjectId_Field) (
@ -29707,6 +29694,19 @@ func (rx *Rx) Replace_AccountFreezeEvent(ctx context.Context,
} }
func (rx *Rx) Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Replace_ProjectInvitation(ctx, project_invitation_project_id, project_invitation_email, optional)
}
func (rx *Rx) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context, func (rx *Rx) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field, accounting_timestamps_name AccountingTimestamps_Name_Field,
update AccountingTimestamps_Update_Fields) ( update AccountingTimestamps_Update_Fields) (
@ -30273,12 +30273,6 @@ type Methods interface {
optional Project_Create_Fields) ( optional Project_Create_Fields) (
project *Project, err error) project *Project, err error)
Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error)
Create_ProjectMember(ctx context.Context, Create_ProjectMember(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field, project_member_member_id ProjectMember_MemberId_Field,
project_member_project_id ProjectMember_ProjectId_Field) ( project_member_project_id ProjectMember_ProjectId_Field) (
@ -30808,6 +30802,12 @@ type Methods interface {
optional AccountFreezeEvent_Create_Fields) ( optional AccountFreezeEvent_Create_Fields) (
account_freeze_event *AccountFreezeEvent, err error) account_freeze_event *AccountFreezeEvent, err error)
Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error)
UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context, UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field, accounting_timestamps_name AccountingTimestamps_Name_Field,
update AccountingTimestamps_Update_Fields) ( update AccountingTimestamps_Update_Fields) (

View File

@ -266,62 +266,6 @@ func (cache *overlaycache) Get(ctx context.Context, id storj.NodeID) (dossier *o
return convertDBNode(ctx, node) return convertDBNode(ctx, node)
} }
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs.
func (cache *overlaycache) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes map[storj.NodeID]*overlay.SelectedNode, err error) {
for {
nodes, err = cache.getOnlineNodesForGetDelete(ctx, nodeIDs, onlineWindow, asOf)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return nodes, err
}
break
}
return nodes, err
}
func (cache *overlaycache) getOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (_ map[storj.NodeID]*overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT last_net, id, address, last_ip_port, noise_proto, noise_public_key, debounce_limit, features
FROM nodes
`+cache.db.impl.AsOfSystemInterval(asOf.Interval())+`
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow))
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
nodes := make(map[storj.NodeID]*overlay.SelectedNode)
for rows.Next() {
var node overlay.SelectedNode
node.Address = &pb.NodeAddress{}
var lastIPPort sql.NullString
var noise noiseScanner
err = rows.Scan(&node.LastNet, &node.ID, &node.Address.Address, &lastIPPort, &noise.Proto, &noise.PublicKey, &node.Address.DebounceLimit, &node.Address.Features)
if err != nil {
return nil, err
}
if lastIPPort.Valid {
node.LastIPPort = lastIPPort.String
}
node.Address.NoiseInfo = noise.Convert()
nodes[node.ID] = &node
}
return nodes, Error.Wrap(rows.Err())
}
// GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs. // GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs.
func (cache *overlaycache) GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (nodes map[storj.NodeID]*overlay.NodeReputation, err error) { func (cache *overlaycache) GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (nodes map[storj.NodeID]*overlay.NodeReputation, err error) {
for { for {
@ -378,70 +322,6 @@ func (cache *overlaycache) getOnlineNodesForAuditRepair(ctx context.Context, nod
return nodes, Error.Wrap(rows.Err()) return nodes, Error.Wrap(rows.Err())
} }
// KnownOffline filters a set of nodes to offline nodes.
func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
for {
offlineNodes, err = cache.knownOffline(ctx, criteria, nodeIDs)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return offlineNodes, err
}
break
}
return offlineNodes, err
}
func (cache *overlaycache) knownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIds) == 0 {
return nil, Error.New("no ids provided")
}
// get offline nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id FROM nodes
`+cache.db.impl.AsOfSystemInterval(criteria.AsOfSystemInterval)+`
WHERE id = any($1::bytea[])
AND last_contact_success < $2
`), pgutil.NodeIDArray(nodeIds), time.Now().Add(-criteria.OnlineWindow),
)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var id storj.NodeID
err = rows.Scan(&id)
if err != nil {
return nil, err
}
offlineNodes = append(offlineNodes, id)
}
return offlineNodes, Error.Wrap(rows.Err())
}
// KnownUnreliableOrOffline filters a set of nodes to unreliable or offlines node, independent of new.
func (cache *overlaycache) KnownUnreliableOrOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
for {
badNodes, err = cache.knownUnreliableOrOffline(ctx, criteria, nodeIDs)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return badNodes, err
}
break
}
return badNodes, err
}
// GetOfflineNodesForEmail gets nodes that we want to send an email to. These are non-disqualified, non-exited nodes where // GetOfflineNodesForEmail gets nodes that we want to send an email to. These are non-disqualified, non-exited nodes where
// last_contact_success is between two points: the point where it is considered offline (offlineWindow), and the point where we don't want // last_contact_success is between two points: the point where it is considered offline (offlineWindow), and the point where we don't want
// to send more emails (cutoff). It also filters nodes where last_offline_email is too recent (cooldown). // to send more emails (cutoff). It also filters nodes where last_offline_email is too recent (cooldown).
@ -567,102 +447,64 @@ func (cache *overlaycache) knownReliableInExcludedCountries(ctx context.Context,
return reliableInExcluded, Error.Wrap(rows.Err()) return reliableInExcluded, Error.Wrap(rows.Err())
} }
func (cache *overlaycache) knownUnreliableOrOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (badNodes storj.NodeIDList, err error) { // KnownReliable filters a set of nodes to reliable nodes. List is split into online and offline nodes.
defer mon.Task()(&ctx)(&err) func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []overlay.SelectedNode, offline []overlay.SelectedNode, err error) {
if len(nodeIDs) == 0 {
return nil, Error.New("no ids provided")
}
// get reliable and online nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id
FROM nodes
`+cache.db.impl.AsOfSystemInterval(criteria.AsOfSystemInterval)+`
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-criteria.OnlineWindow),
)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
goodNodes := make(map[storj.NodeID]struct{}, len(nodeIDs))
for rows.Next() {
var id storj.NodeID
err = rows.Scan(&id)
if err != nil {
return nil, err
}
goodNodes[id] = struct{}{}
}
for _, id := range nodeIDs {
if _, ok := goodNodes[id]; !ok {
badNodes = append(badNodes, id)
}
}
return badNodes, Error.Wrap(rows.Err())
}
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
func (cache *overlaycache) KnownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) {
for { for {
nodes, err = cache.knownReliable(ctx, onlineWindow, nodeIDs) online, offline, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
if err != nil { if err != nil {
if cockroachutil.NeedsRetry(err) { if cockroachutil.NeedsRetry(err) {
continue continue
} }
return nodes, err return nil, nil, err
} }
break break
} }
return nodes, err return online, offline, err
} }
func (cache *overlaycache) knownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) { func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []overlay.SelectedNode, offline []overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if len(nodeIDs) == 0 { if len(nodeIDs) == 0 {
return nil, Error.New("no ids provided") return nil, nil, Error.New("no ids provided")
} }
// get online nodes err = withRows(cache.db.Query(ctx, `
rows, err := cache.db.Query(ctx, cache.db.Rebind(` SELECT id, address, last_net, last_ip_port, country_code, last_contact_success > $2 as online
SELECT id, last_net, last_ip_port, address, protocol, noise_proto, noise_public_key, debounce_limit, features FROM nodes
FROM nodes `+cache.db.impl.AsOfSystemInterval(asOfSystemInterval)+`
WHERE id = any($1::bytea[]) WHERE id = any($1::bytea[])
AND disqualified IS NULL AND disqualified IS NULL
AND unknown_audit_suspended IS NULL AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL AND offline_suspended IS NULL
AND exit_finished_at IS NULL AND exit_finished_at IS NULL
AND last_contact_success > $2 `, pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow),
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow), ))(func(rows tagsql.Rows) error {
) for rows.Next() {
if err != nil { var onlineNode bool
return nil, err var node overlay.SelectedNode
} node.Address = &pb.NodeAddress{}
defer func() { err = errs.Combine(err, rows.Close()) }() var lastIPPort sql.NullString
err = rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort, &node.CountryCode, &onlineNode)
if err != nil {
return err
}
for rows.Next() { if lastIPPort.Valid {
row := &dbx.Node{} node.LastIPPort = lastIPPort.String
err = rows.Scan(&row.Id, &row.LastNet, &row.LastIpPort, &row.Address, &row.Protocol, &row.NoiseProto, &row.NoisePublicKey, &row.DebounceLimit, &row.Features) }
if err != nil {
return nil, err if onlineNode {
online = append(online, node)
} else {
offline = append(offline, node)
}
} }
node, err := convertDBNode(ctx, row) return nil
if err != nil { })
return nil, err
} return online, offline, Error.Wrap(err)
nodes = append(nodes, &node.Node)
}
return nodes, Error.Wrap(rows.Err())
} }
// Reliable returns all reliable nodes. // Reliable returns all reliable nodes.

View File

@ -418,3 +418,139 @@ func TestOverlayCache_SelectAllStorageNodesDownloadUpload(t *testing.T) {
}) })
} }
func TestOverlayCache_KnownReliable(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache := db.OverlayCache()
allNodes := []overlay.SelectedNode{
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
addNode(ctx, t, cache, "audit-suspended", "127.0.0.4", false, false, true, false, false),
addNode(ctx, t, cache, "offline-suspended", "127.0.0.5", false, false, false, true, false),
addNode(ctx, t, cache, "exited", "127.0.0.6", false, false, false, false, true),
}
ids := func(nodes ...overlay.SelectedNode) storj.NodeIDList {
nodeIds := storj.NodeIDList{}
for _, node := range nodes {
nodeIds = append(nodeIds, node.ID)
}
return nodeIds
}
nodes := func(nodes ...overlay.SelectedNode) []overlay.SelectedNode {
return append([]overlay.SelectedNode{}, nodes...)
}
type testCase struct {
IDs storj.NodeIDList
Online []overlay.SelectedNode
Offline []overlay.SelectedNode
}
shuffledNodeIDs := ids(allNodes...)
rand.Shuffle(len(shuffledNodeIDs), shuffledNodeIDs.Swap)
for _, tc := range []testCase{
{
IDs: ids(allNodes[0], allNodes[1]),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
{
IDs: ids(allNodes[0]),
Online: nodes(allNodes[0]),
},
{
IDs: ids(allNodes[1]),
Offline: nodes(allNodes[1]),
},
{ // only unreliable
IDs: ids(allNodes[2], allNodes[3], allNodes[4], allNodes[5]),
},
{ // all nodes
IDs: ids(allNodes...),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
// all nodes but in shuffled order
{
IDs: shuffledNodeIDs,
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
// all nodes + one ID not from DB
{
IDs: append(ids(allNodes...), testrand.NodeID()),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
} {
online, offline, err := cache.KnownReliable(ctx, tc.IDs, 1*time.Hour, 0)
require.NoError(t, err)
require.ElementsMatch(t, tc.Online, online)
require.ElementsMatch(t, tc.Offline, offline)
}
_, _, err := cache.KnownReliable(ctx, storj.NodeIDList{}, 1*time.Hour, 0)
require.Error(t, err)
})
}
func addNode(ctx context.Context, t *testing.T, cache overlay.DB, address, lastIPPort string, online, disqalified, auditSuspended, offlineSuspended, exited bool) overlay.SelectedNode {
selectedNode := overlay.SelectedNode{
ID: testrand.NodeID(),
Address: &pb.NodeAddress{Address: address},
LastNet: lastIPPort,
LastIPPort: lastIPPort,
CountryCode: location.Poland,
}
checkInInfo := overlay.NodeCheckInInfo{
IsUp: true,
NodeID: selectedNode.ID,
Address: &pb.NodeAddress{Address: selectedNode.Address.Address},
LastIPPort: selectedNode.LastIPPort,
LastNet: selectedNode.LastNet,
CountryCode: selectedNode.CountryCode,
Version: &pb.NodeVersion{Version: "v0.0.0"},
}
timestamp := time.Now().UTC()
if !online {
timestamp = time.Now().Add(-10 * time.Hour)
}
err := cache.UpdateCheckIn(ctx, checkInInfo, timestamp, overlay.NodeSelectionConfig{})
require.NoError(t, err)
if disqalified {
_, err := cache.DisqualifyNode(ctx, selectedNode.ID, time.Now(), overlay.DisqualificationReasonAuditFailure)
require.NoError(t, err)
}
if auditSuspended {
require.NoError(t, cache.TestSuspendNodeUnknownAudit(ctx, selectedNode.ID, time.Now()))
}
if offlineSuspended {
require.NoError(t, cache.TestSuspendNodeOffline(ctx, selectedNode.ID, time.Now()))
}
if exited {
now := time.Now()
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
NodeID: selectedNode.ID,
ExitInitiatedAt: now,
ExitLoopCompletedAt: now,
ExitFinishedAt: now,
ExitSuccess: true,
})
require.NoError(t, err)
}
return selectedNode
}

View File

@ -549,67 +549,55 @@ func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid
func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, projectID uuid.UUID, partnerNames []string, since, before time.Time) (usages map[string]accounting.ProjectUsage, err error) { func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, projectID uuid.UUID, partnerNames []string, since, before time.Time) (usages map[string]accounting.ProjectUsage, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since) since = timeTruncateDown(since)
bucketNames, err := db.getBucketsSinceAndBefore(ctx, projectID, since, before)
storageQuery := db.db.Rebind(`
SELECT * FROM (
SELECT
COALESCE(t.bucket_name, rollups.bucket_name) AS bucket_name,
COALESCE(t.interval_start, rollups.interval_start) AS interval_start,
COALESCE(t.total_bytes, 0) AS total_bytes,
COALESCE(t.inline, 0) AS inline,
COALESCE(t.remote, 0) AS remote,
COALESCE(t.total_segments_count, 0) AS total_segments_count,
COALESCE(t.object_count, 0) AS object_count,
m.user_agent,
COALESCE(rollups.egress, 0) AS egress
FROM
bucket_storage_tallies AS t
FULL OUTER JOIN (
SELECT
bucket_name,
SUM(settled + inline) AS egress,
MIN(interval_start) AS interval_start
FROM
bucket_bandwidth_rollups
WHERE
project_id = $1 AND
interval_start >= $2 AND
interval_start < $3 AND
action = $4
GROUP BY
bucket_name
) AS rollups ON
t.bucket_name = rollups.bucket_name
LEFT JOIN bucket_metainfos AS m ON
m.project_id = $1 AND
m.name = COALESCE(t.bucket_name, rollups.bucket_name)
WHERE
(t.project_id IS NULL OR t.project_id = $1) AND
COALESCE(t.interval_start, rollups.interval_start) >= $2 AND
COALESCE(t.interval_start, rollups.interval_start) < $3
) AS q` + db.db.impl.AsOfSystemInterval(-10) + ` ORDER BY bucket_name, interval_start DESC`)
usages = make(map[string]accounting.ProjectUsage)
storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], since, before, pb.PieceAction_GET)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var prevTallyForBucket = make(map[string]*accounting.BucketStorageTally)
var recentBucket string
for storageTalliesRows.Next() { storageQuery := db.db.Rebind(`
tally := accounting.BucketStorageTally{} SELECT
var userAgent []byte bucket_storage_tallies.interval_start,
var inline, remote, egress int64 bucket_storage_tallies.total_bytes,
err = storageTalliesRows.Scan(&tally.BucketName, &tally.IntervalStart, &tally.TotalBytes, &inline, &remote, &tally.TotalSegmentCount, &tally.ObjectCount, &userAgent, &egress) bucket_storage_tallies.inline,
if err != nil { bucket_storage_tallies.remote,
return nil, errs.Combine(err, storageTalliesRows.Close()) bucket_storage_tallies.total_segments_count,
bucket_storage_tallies.object_count
FROM
bucket_storage_tallies
WHERE
bucket_storage_tallies.project_id = ? AND
bucket_storage_tallies.bucket_name = ? AND
bucket_storage_tallies.interval_start >= ? AND
bucket_storage_tallies.interval_start < ?
ORDER BY bucket_storage_tallies.interval_start DESC
`)
totalEgressQuery := db.db.Rebind(`
SELECT
COALESCE(SUM(settled) + SUM(inline), 0)
FROM
bucket_bandwidth_rollups
WHERE
project_id = ? AND
bucket_name = ? AND
interval_start >= ? AND
interval_start < ? AND
action = ?;
`)
usages = make(map[string]accounting.ProjectUsage)
for _, bucket := range bucketNames {
userAgentRow, err := db.db.Get_BucketMetainfo_UserAgent_By_ProjectId_And_Name(ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name([]byte(bucket)))
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
} }
var partner string var partner string
if userAgent != nil { if userAgentRow != nil && userAgentRow.UserAgent != nil {
entries, err := useragent.ParseEntries(userAgent) entries, err := useragent.ParseEntries(userAgentRow.UserAgent)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -623,40 +611,59 @@ func (db *ProjectAccounting) GetProjectTotalByPartner(ctx context.Context, proje
} }
} }
} }
if _, ok := usages[partner]; !ok { if _, ok := usages[partner]; !ok {
usages[partner] = accounting.ProjectUsage{Since: since, Before: before} usages[partner] = accounting.ProjectUsage{Since: since, Before: before}
} }
usage := usages[partner] usage := usages[partner]
if tally.TotalBytes == 0 { storageTalliesRows, err := db.db.QueryContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
tally.TotalBytes = inline + remote if err != nil {
return nil, err
} }
if tally.BucketName != recentBucket { var prevTally *accounting.BucketStorageTally
usage.Egress += egress for storageTalliesRows.Next() {
recentBucket = tally.BucketName tally := accounting.BucketStorageTally{}
var inline, remote int64
err = storageTalliesRows.Scan(&tally.IntervalStart, &tally.TotalBytes, &inline, &remote, &tally.TotalSegmentCount, &tally.ObjectCount)
if err != nil {
return nil, errs.Combine(err, storageTalliesRows.Close())
}
if tally.TotalBytes == 0 {
tally.TotalBytes = inline + remote
}
if prevTally == nil {
prevTally = &tally
continue
}
hours := prevTally.IntervalStart.Sub(tally.IntervalStart).Hours()
usage.Storage += memory.Size(tally.TotalBytes).Float64() * hours
usage.SegmentCount += float64(tally.TotalSegmentCount) * hours
usage.ObjectCount += float64(tally.ObjectCount) * hours
prevTally = &tally
} }
if _, ok := prevTallyForBucket[tally.BucketName]; !ok { err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
prevTallyForBucket[tally.BucketName] = &tally if err != nil {
usages[partner] = usage return nil, err
continue
} }
hours := prevTallyForBucket[tally.BucketName].IntervalStart.Sub(tally.IntervalStart).Hours() totalEgressRow := db.db.QueryRowContext(ctx, totalEgressQuery, projectID[:], []byte(bucket), since, before, pb.PieceAction_GET)
usage.Storage += memory.Size(tally.TotalBytes).Float64() * hours if err != nil {
usage.SegmentCount += float64(tally.TotalSegmentCount) * hours return nil, err
usage.ObjectCount += float64(tally.ObjectCount) * hours }
var egress int64
if err = totalEgressRow.Scan(&egress); err != nil {
return nil, err
}
usage.Egress += egress
usages[partner] = usage usages[partner] = usage
prevTallyForBucket[tally.BucketName] = &tally
}
err = errs.Combine(storageTalliesRows.Err(), storageTalliesRows.Close())
if err != nil {
return nil, err
} }
return usages, nil return usages, nil

View File

@ -207,31 +207,31 @@ func Test_GetProjectTotal(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
const epsilon = 1e-8 const epsilon = 1e-8
require.InDelta(t, float64(tallies[0].Bytes()+tallies[1].Bytes()), usage.Storage, epsilon) require.InDelta(t, usage.Storage, float64(tallies[0].Bytes()+tallies[1].Bytes()), epsilon)
require.InDelta(t, float64(tallies[0].TotalSegmentCount+tallies[1].TotalSegmentCount), usage.SegmentCount, epsilon) require.InDelta(t, usage.SegmentCount, float64(tallies[0].TotalSegmentCount+tallies[1].TotalSegmentCount), epsilon)
require.InDelta(t, float64(tallies[0].ObjectCount+tallies[1].ObjectCount), usage.ObjectCount, epsilon) require.InDelta(t, usage.ObjectCount, float64(tallies[0].ObjectCount+tallies[1].ObjectCount), epsilon)
require.Equal(t, expectedEgress, usage.Egress) require.Equal(t, usage.Egress, expectedEgress)
require.Equal(t, tallies[0].IntervalStart, usage.Since) require.Equal(t, usage.Since, tallies[0].IntervalStart)
require.Equal(t, tallies[2].IntervalStart.Add(time.Minute), usage.Before) require.Equal(t, usage.Before, tallies[2].IntervalStart.Add(time.Minute))
// Ensure that GetProjectTotal treats the 'before' arg as exclusive // Ensure that GetProjectTotal treats the 'before' arg as exclusive
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, tallies[0].IntervalStart, tallies[2].IntervalStart) usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, tallies[0].IntervalStart, tallies[2].IntervalStart)
require.NoError(t, err) require.NoError(t, err)
require.InDelta(t, float64(tallies[0].Bytes()), usage.Storage, epsilon) require.InDelta(t, usage.Storage, float64(tallies[0].Bytes()), epsilon)
require.InDelta(t, float64(tallies[0].TotalSegmentCount), usage.SegmentCount, epsilon) require.InDelta(t, usage.SegmentCount, float64(tallies[0].TotalSegmentCount), epsilon)
require.InDelta(t, float64(tallies[0].ObjectCount), usage.ObjectCount, epsilon) require.InDelta(t, usage.ObjectCount, float64(tallies[0].ObjectCount), epsilon)
require.Equal(t, expectedEgress, usage.Egress) require.Equal(t, usage.Egress, expectedEgress)
require.Equal(t, tallies[0].IntervalStart, usage.Since) require.Equal(t, usage.Since, tallies[0].IntervalStart)
require.Equal(t, tallies[2].IntervalStart, usage.Before) require.Equal(t, usage.Before, tallies[2].IntervalStart)
usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, rollups[0].IntervalStart, rollups[1].IntervalStart) usage, err = db.ProjectAccounting().GetProjectTotal(ctx, projectID, rollups[0].IntervalStart, rollups[1].IntervalStart)
require.NoError(t, err) require.NoError(t, err)
require.Zero(t, usage.Storage) require.Zero(t, usage.Storage)
require.Zero(t, usage.SegmentCount) require.Zero(t, usage.SegmentCount)
require.Zero(t, usage.ObjectCount) require.Zero(t, usage.ObjectCount)
require.Equal(t, rollups[0].Inline+rollups[0].Settled, usage.Egress) require.Equal(t, usage.Egress, rollups[0].Inline+rollups[0].Settled)
require.Equal(t, rollups[0].IntervalStart, usage.Since) require.Equal(t, usage.Since, rollups[0].IntervalStart)
require.Equal(t, rollups[1].IntervalStart, usage.Before) require.Equal(t, usage.Before, rollups[1].IntervalStart)
}, },
) )
} }

View File

@ -5,9 +5,6 @@ package satellitedb
import ( import (
"context" "context"
"database/sql"
"errors"
"time"
"storj.io/common/uuid" "storj.io/common/uuid"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
@ -19,11 +16,11 @@ var _ console.ProjectInvitations = (*projectInvitations)(nil)
// projectInvitations is an implementation of console.ProjectInvitations. // projectInvitations is an implementation of console.ProjectInvitations.
type projectInvitations struct { type projectInvitations struct {
db *satelliteDB db dbx.Methods
} }
// Insert inserts a project member invitation into the database. // Upsert updates a project member invitation if it exists and inserts it otherwise.
func (invites *projectInvitations) Insert(ctx context.Context, invite *console.ProjectInvitation) (_ *console.ProjectInvitation, err error) { func (invites *projectInvitations) Upsert(ctx context.Context, invite *console.ProjectInvitation) (_ *console.ProjectInvitation, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if invite == nil { if invite == nil {
@ -36,7 +33,7 @@ func (invites *projectInvitations) Insert(ctx context.Context, invite *console.P
createFields.InviterId = dbx.ProjectInvitation_InviterId(id) createFields.InviterId = dbx.ProjectInvitation_InviterId(id)
} }
dbxInvite, err := invites.db.Create_ProjectInvitation(ctx, dbxInvite, err := invites.db.Replace_ProjectInvitation(ctx,
dbx.ProjectInvitation_ProjectId(invite.ProjectID[:]), dbx.ProjectInvitation_ProjectId(invite.ProjectID[:]),
dbx.ProjectInvitation_Email(normalizeEmail(invite.Email)), dbx.ProjectInvitation_Email(normalizeEmail(invite.Email)),
createFields, createFields,
@ -87,30 +84,6 @@ func (invites *projectInvitations) GetByEmail(ctx context.Context, email string)
return projectInvitationSliceFromDBX(dbxInvites) return projectInvitationSliceFromDBX(dbxInvites)
} }
// Update updates the project member invitation specified by the given project ID and email address.
func (invites *projectInvitations) Update(ctx context.Context, projectID uuid.UUID, email string, request console.UpdateProjectInvitationRequest) (_ *console.ProjectInvitation, err error) {
defer mon.Task()(&ctx)(&err)
update := dbx.ProjectInvitation_Update_Fields{}
if request.CreatedAt != nil {
update.CreatedAt = dbx.ProjectInvitation_CreatedAt(*request.CreatedAt)
}
if request.InviterID != nil {
update.InviterId = dbx.ProjectInvitation_InviterId((*request.InviterID)[:])
}
dbxInvite, err := invites.db.Update_ProjectInvitation_By_ProjectId_And_Email(ctx,
dbx.ProjectInvitation_ProjectId(projectID[:]),
dbx.ProjectInvitation_Email(normalizeEmail(email)),
update,
)
if err != nil {
return nil, err
}
return projectInvitationFromDBX(dbxInvite)
}
// Delete removes a project member invitation from the database. // Delete removes a project member invitation from the database.
func (invites *projectInvitations) Delete(ctx context.Context, projectID uuid.UUID, email string) (err error) { func (invites *projectInvitations) Delete(ctx context.Context, projectID uuid.UUID, email string) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -122,81 +95,6 @@ func (invites *projectInvitations) Delete(ctx context.Context, projectID uuid.UU
return err return err
} }
// DeleteBefore deletes project member invitations created prior to some time from the database.
func (invites *projectInvitations) DeleteBefore(
ctx context.Context, before time.Time, asOfSystemTimeInterval time.Duration, pageSize int) (err error) {
defer mon.Task()(&ctx)(&err)
if pageSize <= 0 {
return Error.New("expected page size to be positive; got %d", pageSize)
}
var pageCursor, pageEnd struct {
ProjectID uuid.UUID
Email string
}
aost := invites.db.impl.AsOfSystemInterval(asOfSystemTimeInterval)
for {
// Select the ID beginning this page of records
err := invites.db.QueryRowContext(ctx, `
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) > ($1, $2) AND created_at < $3
ORDER BY (project_id, email) LIMIT 1
`, pageCursor.ProjectID, pageCursor.Email, before).Scan(&pageCursor.ProjectID, &pageCursor.Email)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil
}
return Error.Wrap(err)
}
// Select the ID ending this page of records
err = invites.db.QueryRowContext(ctx, `
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) > ($1, $2)
ORDER BY (project_id, email) LIMIT 1 OFFSET $3
`, pageCursor.ProjectID, pageCursor.Email, pageSize).Scan(&pageEnd.ProjectID, &pageEnd.Email)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return Error.Wrap(err)
}
// Since this is the last page, we want to return all remaining records
_, err = invites.db.ExecContext(ctx, `
DELETE FROM project_invitations
WHERE (project_id, email) IN (
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) >= ($1, $2)
AND created_at < $3
ORDER BY (project_id, email)
)
`, pageCursor.ProjectID, pageCursor.Email, before)
return Error.Wrap(err)
}
// Delete all old, unverified records in the range between the beginning and ending IDs
_, err = invites.db.ExecContext(ctx, `
DELETE FROM project_invitations
WHERE (project_id, email) IN (
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) >= ($1, $2)
AND (project_id, email) <= ($3, $4)
AND created_at < $5
ORDER BY (project_id, email)
)
`, pageCursor.ProjectID, pageCursor.Email, pageEnd.ProjectID, pageEnd.Email, before)
if err != nil {
return Error.Wrap(err)
}
// Advance the cursor to the next page
pageCursor = pageEnd
}
}
// projectInvitationFromDBX converts a project member invitation from the database to a *console.ProjectInvitation. // projectInvitationFromDBX converts a project member invitation from the database to a *console.ProjectInvitation.
func projectInvitationFromDBX(dbxInvite *dbx.ProjectInvitation) (_ *console.ProjectInvitation, err error) { func projectInvitationFromDBX(dbxInvite *dbx.ProjectInvitation) (_ *console.ProjectInvitation, err error) {
if dbxInvite == nil { if dbxInvite == nil {

View File

@ -50,7 +50,7 @@ func TestProjectInvitations(t *testing.T) {
if !t.Run("insert invitations", func(t *testing.T) { if !t.Run("insert invitations", func(t *testing.T) {
// Expect failure because no user with inviterID exists. // Expect failure because no user with inviterID exists.
_, err = invitesDB.Insert(ctx, invite) _, err = invitesDB.Upsert(ctx, invite)
require.Error(t, err) require.Error(t, err)
_, err = db.Console().Users().Insert(ctx, &console.User{ _, err = db.Console().Users().Insert(ctx, &console.User{
@ -59,19 +59,15 @@ func TestProjectInvitations(t *testing.T) {
}) })
require.NoError(t, err) require.NoError(t, err)
invite, err = invitesDB.Insert(ctx, invite) invite, err = invitesDB.Upsert(ctx, invite)
require.NoError(t, err) require.NoError(t, err)
require.WithinDuration(t, time.Now(), invite.CreatedAt, time.Minute) require.WithinDuration(t, time.Now(), invite.CreatedAt, time.Minute)
require.Equal(t, projID, invite.ProjectID) require.Equal(t, projID, invite.ProjectID)
require.Equal(t, strings.ToUpper(email), invite.Email) require.Equal(t, strings.ToUpper(email), invite.Email)
// Duplicate invitations should be rejected. inviteSameEmail, err = invitesDB.Upsert(ctx, inviteSameEmail)
_, err = invitesDB.Insert(ctx, invite)
require.Error(t, err)
inviteSameEmail, err = invitesDB.Insert(ctx, inviteSameEmail)
require.NoError(t, err) require.NoError(t, err)
inviteSameProject, err = invitesDB.Insert(ctx, inviteSameProject) inviteSameProject, err = invitesDB.Upsert(ctx, inviteSameProject)
require.NoError(t, err) require.NoError(t, err)
}) { }) {
// None of the following subtests will pass if invitation insertion failed. // None of the following subtests will pass if invitation insertion failed.
@ -126,22 +122,19 @@ func TestProjectInvitations(t *testing.T) {
t.Run("update invitation", func(t *testing.T) { t.Run("update invitation", func(t *testing.T) {
ctx := testcontext.New(t) ctx := testcontext.New(t)
req := console.UpdateProjectInvitationRequest{}
newCreatedAt := invite.CreatedAt.Add(time.Hour)
req.CreatedAt = &newCreatedAt
newInvite, err := invitesDB.Update(ctx, projID, email, req)
require.NoError(t, err)
require.Equal(t, newCreatedAt, newInvite.CreatedAt)
inviter, err := db.Console().Users().Insert(ctx, &console.User{ inviter, err := db.Console().Users().Insert(ctx, &console.User{
ID: testrand.UUID(), ID: testrand.UUID(),
PasswordHash: testrand.Bytes(8), PasswordHash: testrand.Bytes(8),
}) })
require.NoError(t, err) require.NoError(t, err)
req.InviterID = &inviter.ID invite.InviterID = &inviter.ID
newInvite, err = invitesDB.Update(ctx, projID, email, req)
oldCreatedAt := invite.CreatedAt
invite, err = invitesDB.Upsert(ctx, invite)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, inviter.ID, *newInvite.InviterID) require.Equal(t, inviter.ID, *invite.InviterID)
require.True(t, invite.CreatedAt.After(oldCreatedAt))
}) })
t.Run("delete invitation", func(t *testing.T) { t.Run("delete invitation", func(t *testing.T) {
@ -169,45 +162,3 @@ func TestProjectInvitations(t *testing.T) {
}) })
}) })
} }
func TestDeleteBefore(t *testing.T) {
maxAge := time.Hour
now := time.Now()
expiration := now.Add(-maxAge)
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
invitesDB := db.Console().ProjectInvitations()
// Only positive page sizes should be allowed.
require.Error(t, invitesDB.DeleteBefore(ctx, time.Time{}, 0, 0))
require.Error(t, invitesDB.DeleteBefore(ctx, time.Time{}, 0, -1))
createInvite := func() *console.ProjectInvitation {
projID := testrand.UUID()
_, err := db.Console().Projects().Insert(ctx, &console.Project{ID: projID})
require.NoError(t, err)
invite, err := invitesDB.Insert(ctx, &console.ProjectInvitation{ProjectID: projID})
require.NoError(t, err)
return invite
}
newInvite := createInvite()
oldInvite := createInvite()
oldCreatedAt := expiration.Add(-time.Second)
oldInvite, err := invitesDB.Update(ctx, oldInvite.ProjectID, oldInvite.Email, console.UpdateProjectInvitationRequest{
CreatedAt: &oldCreatedAt,
})
require.NoError(t, err)
require.NoError(t, invitesDB.DeleteBefore(ctx, expiration, 0, 1))
// Ensure that the old invitation record was deleted and the other remains.
_, err = invitesDB.Get(ctx, oldInvite.ProjectID, oldInvite.Email)
require.ErrorIs(t, err, sql.ErrNoRows)
_, err = invitesDB.Get(ctx, newInvite.ProjectID, newInvite.Email)
require.NoError(t, err)
})
}

View File

@ -36,7 +36,7 @@ func TestGetPagedWithInvitationsByProjectID(t *testing.T) {
_, err = db.Console().ProjectMembers().Insert(ctx, memberUser.ID, projectID) _, err = db.Console().ProjectMembers().Insert(ctx, memberUser.ID, projectID)
require.NoError(t, err) require.NoError(t, err)
_, err = db.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{ _, err = db.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
ProjectID: projectID, ProjectID: projectID,
Email: "bob@mail.test", Email: "bob@mail.test",
}) })

View File

@ -5,6 +5,8 @@ package satellitedb
import ( import (
"context" "context"
"database/sql"
"errors"
"time" "time"
"storj.io/common/uuid" "storj.io/common/uuid"
@ -16,7 +18,7 @@ import (
var _ consoleauth.WebappSessions = (*webappSessions)(nil) var _ consoleauth.WebappSessions = (*webappSessions)(nil)
type webappSessions struct { type webappSessions struct {
db dbx.Methods db *satelliteDB
} }
// Create creates a webapp session and returns the session info. // Create creates a webapp session and returns the session info.
@ -91,6 +93,75 @@ func (db *webappSessions) DeleteAllByUserID(ctx context.Context, userID uuid.UUI
return db.db.Delete_WebappSession_By_UserId(ctx, dbx.WebappSession_UserId(userID.Bytes())) return db.db.Delete_WebappSession_By_UserId(ctx, dbx.WebappSession_UserId(userID.Bytes()))
} }
// DeleteExpired deletes all sessions that have expired before the provided timestamp.
func (db *webappSessions) DeleteExpired(ctx context.Context, now time.Time, asOfSystemTimeInterval time.Duration, pageSize int) (err error) {
defer mon.Task()(&ctx)(&err)
if pageSize <= 0 {
return Error.New("expected page size to be positive; got %d", pageSize)
}
var pageCursor, pageEnd uuid.UUID
aost := db.db.impl.AsOfSystemInterval(asOfSystemTimeInterval)
for {
// Select the ID beginning this page of records
err := db.db.QueryRowContext(ctx, `
SELECT id FROM webapp_sessions
`+aost+`
WHERE id > $1 AND expires_at < $2
ORDER BY id LIMIT 1
`, pageCursor, now).Scan(&pageCursor)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil
}
return Error.Wrap(err)
}
// Select the ID ending this page of records
err = db.db.QueryRowContext(ctx, `
SELECT id FROM webapp_sessions
`+aost+`
WHERE id > $1
ORDER BY id LIMIT 1 OFFSET $2
`, pageCursor, pageSize).Scan(&pageEnd)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return Error.Wrap(err)
}
// Since this is the last page, we want to return all remaining records
_, err = db.db.ExecContext(ctx, `
DELETE FROM webapp_sessions
WHERE id IN (
SELECT id FROM webapp_sessions
`+aost+`
WHERE id >= $1 AND expires_at < $2
ORDER BY id
)
`, pageCursor, now)
return Error.Wrap(err)
}
// Delete all expired records in the range between the beginning and ending IDs
_, err = db.db.ExecContext(ctx, `
DELETE FROM webapp_sessions
WHERE id IN (
SELECT id FROM webapp_sessions
`+aost+`
WHERE id BETWEEN $1 AND $2
AND expires_at < $3
ORDER BY id
)
`, pageCursor, pageEnd, now)
if err != nil {
return Error.Wrap(err)
}
// Advance the cursor to the next page
pageCursor = pageEnd
}
}
func getSessionFromDBX(dbxSession *dbx.WebappSession) (consoleauth.WebappSession, error) { func getSessionFromDBX(dbxSession *dbx.WebappSession) (consoleauth.WebappSession, error) {
id, err := uuid.FromBytes(dbxSession.Id) id, err := uuid.FromBytes(dbxSession.Id)
if err != nil { if err != nil {

View File

@ -4,6 +4,7 @@
package satellitedb_test package satellitedb_test
import ( import (
"database/sql"
"testing" "testing"
"time" "time"
@ -186,3 +187,26 @@ func TestWebappSessionsDeleteAllByUserID(t *testing.T) {
require.Len(t, allSessions, 0) require.Len(t, allSessions, 0)
}) })
} }
func TestDeleteExpired(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
sessionsDB := db.Console().WebappSessions()
now := time.Now()
// Only positive page sizes should be allowed.
require.Error(t, sessionsDB.DeleteExpired(ctx, time.Time{}, 0, 0))
require.Error(t, sessionsDB.DeleteExpired(ctx, time.Time{}, 0, -1))
newSession, err := sessionsDB.Create(ctx, testrand.UUID(), testrand.UUID(), "", "", now.Add(time.Second))
require.NoError(t, err)
oldSession, err := sessionsDB.Create(ctx, testrand.UUID(), testrand.UUID(), "", "", now.Add(-time.Second))
require.NoError(t, err)
require.NoError(t, sessionsDB.DeleteExpired(ctx, now, 0, 1))
// Ensure that the old session record was deleted and the other remains.
_, err = sessionsDB.GetBySessionID(ctx, oldSession.ID)
require.ErrorIs(t, err, sql.ErrNoRows)
_, err = sessionsDB.GetBySessionID(ctx, newSession.ID)
require.NoError(t, err)
})
}

View File

@ -2,7 +2,7 @@
# account-freeze.enabled: false # account-freeze.enabled: false
# How long to wait between a warning event and freezing an account. # How long to wait between a warning event and freezing an account.
# account-freeze.grace-period: 720h0m0s # account-freeze.grace-period: 360h0m0s
# How often to run this chore, which is how often unpaid invoices are checked. # How often to run this chore, which is how often unpaid invoices are checked.
# account-freeze.interval: 24h0m0s # account-freeze.interval: 24h0m0s
@ -145,9 +145,6 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
# interval between chore cycles # interval between chore cycles
# console-db-cleanup.interval: 24h0m0s # console-db-cleanup.interval: 24h0m0s
# maximum lifetime of project member invitation records
# console-db-cleanup.max-project-invitation-age: 168h0m0s
# maximum lifetime of unverified user account records # maximum lifetime of unverified user account records
# console-db-cleanup.max-unverified-user-age: 168h0m0s # console-db-cleanup.max-unverified-user-age: 168h0m0s
@ -629,7 +626,7 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# metainfo.max-commit-interval: 48h0m0s # metainfo.max-commit-interval: 48h0m0s
# maximum encrypted object key length # maximum encrypted object key length
# metainfo.max-encrypted-object-key-length: 1750 # metainfo.max-encrypted-object-key-length: 2000
# maximum inline segment size # maximum inline segment size
# metainfo.max-inline-segment-size: 4.0 KiB # metainfo.max-inline-segment-size: 4.0 KiB
@ -889,6 +886,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# price user should pay for storage per month in dollars/TB # price user should pay for storage per month in dollars/TB
# payments.usage-price.storage-tb: "4" # payments.usage-price.storage-tb: "4"
# whether to enable piece tracker observer with ranged loop
# piece-tracker.use-ranged-loop: true
# how often to remove unused project bandwidth rollups # how often to remove unused project bandwidth rollups
# project-bw-cleanup.interval: 24h0m0s # project-bw-cleanup.interval: 24h0m0s
@ -1099,6 +1099,9 @@ server.private-address: 127.0.0.1:7778
# how large of batches SaveRollup should process at a time # how large of batches SaveRollup should process at a time
# tally.save-rollup-batch-size: 1000 # tally.save-rollup-batch-size: 1000
# how large should be insert into tallies
# tally.save-tallies-batch-size: 10000
# whether to enable node tally with ranged loop # whether to enable node tally with ranged loop
# tally.use-ranged-loop: true # tally.use-ranged-loop: true

View File

@ -72,6 +72,8 @@ func TestUploadAndPartialDownload(t *testing.T) {
}() }()
} }
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
var totalBandwidthUsage bandwidth.Usage var totalBandwidthUsage bandwidth.Usage
for _, storagenode := range planet.StorageNodes { for _, storagenode := range planet.StorageNodes {
usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour)) usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour))
@ -191,6 +193,8 @@ func TestUpload(t *testing.T) {
} }
} }
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
from, to := date.MonthBoundary(time.Now().UTC()) from, to := date.MonthBoundary(time.Now().UTC())
summary, err := planet.StorageNodes[0].DB.Bandwidth().SatelliteIngressSummary(ctx, planet.Satellites[0].ID(), from, to) summary, err := planet.StorageNodes[0].DB.Bandwidth().SatelliteIngressSummary(ctx, planet.Satellites[0].ID(), from, to)
require.NoError(t, err) require.NoError(t, err)

View File

@ -10,10 +10,10 @@ require (
go.uber.org/zap v1.21.0 go.uber.org/zap v1.21.0
golang.org/x/sync v0.1.0 golang.org/x/sync v0.1.0
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/private v0.0.0-20230614131149-2ffd1635adea storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/storj v1.63.1 storj.io/storj v1.63.1
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0 storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
) )
require ( require (

View File

@ -1256,9 +1256,9 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs= storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0= storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0= storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg= storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ= storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0 h1:pSfGf9E9OlUd17W7LSpL4tTONIyFji6dz8I2iTDd8BY= storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0 h1:pSfGf9E9OlUd17W7LSpL4tTONIyFji6dz8I2iTDd8BY=
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0/go.mod h1:5nLgAOl1KTDVyqORAhvrp+167PtShEuS1L3pJgXPjwo= storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0/go.mod h1:5nLgAOl1KTDVyqORAhvrp+167PtShEuS1L3pJgXPjwo=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=

View File

@ -12,7 +12,7 @@ require (
go.uber.org/zap v1.23.0 go.uber.org/zap v1.23.0
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/gateway-mt v1.51.1-0.20230417204402-7d9bb25bc297 storj.io/gateway-mt v1.51.1-0.20230417204402-7d9bb25bc297
storj.io/private v0.0.0-20230614131149-2ffd1635adea storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/storj v0.12.1-0.20221125175451-ef4b564b82f7 storj.io/storj v0.12.1-0.20221125175451-ef4b564b82f7
) )
@ -223,5 +223,5 @@ require (
storj.io/minio v0.0.0-20230118205046-c025fcc9eef3 // indirect storj.io/minio v0.0.0-20230118205046-c025fcc9eef3 // indirect
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 // indirect storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 // indirect
storj.io/picobuf v0.0.1 // indirect storj.io/picobuf v0.0.1 // indirect
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 // indirect storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 // indirect
) )

View File

@ -1974,8 +1974,8 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs= storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0= storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0= storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg= storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ= storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE= storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
storj.io/zipper v0.0.0-20220124122551-2ac2d53a46f6 h1:vJQmb+uAiYn8hVfkhMl6OqjnUyMWSCPnkzW8IsjF8vE= storj.io/zipper v0.0.0-20220124122551-2ac2d53a46f6 h1:vJQmb+uAiYn8hVfkhMl6OqjnUyMWSCPnkzW8IsjF8vE=

View File

@ -17,13 +17,17 @@ export class AnalyticsHttpApi {
* Does not throw any errors so that expected UI behavior is not interrupted if the API call fails. * Does not throw any errors so that expected UI behavior is not interrupted if the API call fails.
* *
* @param eventName - name of the event * @param eventName - name of the event
* @param props - additional properties to send with the event
*/ */
public async eventTriggered(eventName: string): Promise<void> { public async eventTriggered(eventName: string, props?: {[p:string]:string}): Promise<void> {
try { try {
const path = `${this.ROOT_PATH}/event`; const path = `${this.ROOT_PATH}/event`;
const body = { const body = {
eventName: eventName, eventName: eventName,
}; };
if (props) {
body['props'] = props;
}
const response = await this.http.post(path, JSON.stringify(body)); const response = await this.http.post(path, JSON.stringify(body));
if (response.ok) { if (response.ok) {
return; return;

View File

@ -15,7 +15,6 @@ import {
} from '@/types/users'; } from '@/types/users';
import { HttpClient } from '@/utils/httpClient'; import { HttpClient } from '@/utils/httpClient';
import { ErrorTokenExpired } from '@/api/errors/ErrorTokenExpired'; import { ErrorTokenExpired } from '@/api/errors/ErrorTokenExpired';
import { Duration } from '@/utils/time';
/** /**
* AuthHttpApi is a console Auth API. * AuthHttpApi is a console Auth API.
@ -173,6 +172,7 @@ export class AuthHttpApi implements UsersApi {
userResponse.partner, userResponse.partner,
userResponse.password, userResponse.password,
userResponse.projectLimit, userResponse.projectLimit,
userResponse.projectStorageLimit,
userResponse.paidTier, userResponse.paidTier,
userResponse.isMFAEnabled, userResponse.isMFAEnabled,
userResponse.isProfessional, userResponse.isProfessional,

View File

@ -107,6 +107,22 @@ export class ProjectMembersApiGql extends BaseGql implements ProjectMembersApi {
throw new Error(result.error || 'Failed to send project invitations'); throw new Error(result.error || 'Failed to send project invitations');
} }
/**
* Get invite link for the specified project and email.
*
* @throws Error
*/
public async getInviteLink(projectID: string, email: string): Promise<string> {
const path = `${this.ROOT_PATH}/${projectID}/invite-link?email=${email}`;
const httpResponse = await this.http.get(path);
if (httpResponse.ok) {
return await httpResponse.json();
}
throw new Error('Can not get invite link');
}
/** /**
* Method for mapping project members page from json to ProjectMembersPage type. * Method for mapping project members page from json to ProjectMembersPage type.
* *

View File

@ -110,12 +110,7 @@
<div class="access-grants__header-container"> <div class="access-grants__header-container">
<h3 class="access-grants__header-container__title">My Accesses</h3> <h3 class="access-grants__header-container__title">My Accesses</h3>
<div class="access-grants__header-container__divider" /> <div class="access-grants__header-container__divider" />
<VHeader <VSearch :search="fetch" />
class="access-header-component"
placeholder="Accesses"
:search="fetch"
style-type="access"
/>
</div> </div>
<VLoader v-if="areGrantsFetching" width="100px" height="100px" class="grants-loader" /> <VLoader v-if="areGrantsFetching" width="100px" height="100px" class="grants-loader" />
<div class="access-grants-items"> <div class="access-grants-items">
@ -175,8 +170,8 @@ import { MODALS } from '@/utils/constants/appStatePopUps';
import AccessGrantsItem from '@/components/accessGrants/AccessGrantsItem.vue'; import AccessGrantsItem from '@/components/accessGrants/AccessGrantsItem.vue';
import VButton from '@/components/common/VButton.vue'; import VButton from '@/components/common/VButton.vue';
import VLoader from '@/components/common/VLoader.vue'; import VLoader from '@/components/common/VLoader.vue';
import VHeader from '@/components/common/VHeader.vue';
import VTable from '@/components/common/VTable.vue'; import VTable from '@/components/common/VTable.vue';
import VSearch from '@/components/common/VSearch.vue';
import AccessGrantsIcon from '@/../static/images/accessGrants/accessGrantsIcon.svg'; import AccessGrantsIcon from '@/../static/images/accessGrants/accessGrantsIcon.svg';
import CLIIcon from '@/../static/images/accessGrants/cli.svg'; import CLIIcon from '@/../static/images/accessGrants/cli.svg';
@ -465,10 +460,6 @@ onBeforeUnmount(() => {
.access-grants-items { .access-grants-items {
padding-bottom: 55px; padding-bottom: 55px;
@media screen and (width <= 1150px) {
margin-top: -45px;
}
&__content { &__content {
margin-top: 20px; margin-top: 20px;
} }
@ -505,12 +496,7 @@ onBeforeUnmount(() => {
height: 1px; height: 1px;
width: auto; width: auto;
background-color: #dadfe7; background-color: #dadfe7;
margin-top: 10px; margin: 13px 0 16px;
}
&__access-header-component {
height: 55px !important;
margin-top: 15px;
} }
} }
} }

View File

@ -16,7 +16,7 @@
</div> </div>
<div class="blured-container__wrap" :class="{justify: !isMnemonic}"> <div class="blured-container__wrap" :class="{justify: !isMnemonic}">
<p v-if="isMnemonic" tabindex="0" class="blured-container__wrap__mnemonic" @keyup.space="onCopy">{{ value }}</p> <p v-if="isMnemonic" tabindex="0" class="blured-container__wrap__mnemonic" @keyup.space="onCopy">{{ value }}</p>
<p v-else tabindex="0" class="blured-container__wrap__text" @keyup.space="onCopy">{{ value }}</p> <p v-else tabindex="0" class="blured-container__wrap__text" :class="{ shown: isValueShown }" @keyup.space="onCopy">{{ value }}</p>
<div <div
v-if="!isMnemonic" v-if="!isMnemonic"
tabindex="0" tabindex="0"
@ -135,12 +135,12 @@ function onCopy(): void {
&__text { &__text {
font-size: 14px; font-size: 14px;
line-height: 20px;
color: var(--c-grey-7); color: var(--c-grey-7);
margin-right: 16px;
white-space: nowrap; white-space: nowrap;
overflow: hidden; overflow: hidden;
text-overflow: ellipsis; text-overflow: ellipsis;
margin-right: 16px; line-height: 24px;
} }
&__copy { &__copy {
@ -160,6 +160,14 @@ function onCopy(): void {
} }
} }
.shown {
white-space: unset;
text-overflow: unset;
overflow-wrap: break-word;
text-align: left;
font-family: 'Courier', monospace;
}
.justify { .justify {
justify-content: space-between; justify-content: space-between;
} }

View File

@ -145,7 +145,7 @@ import EndDateSelection from '@/components/accessGrants/createFlow/components/En
import Toggle from '@/components/accessGrants/createFlow/components/Toggle.vue'; import Toggle from '@/components/accessGrants/createFlow/components/Toggle.vue';
import VButton from '@/components/common/VButton.vue'; import VButton from '@/components/common/VButton.vue';
import SearchIcon from '@/../static/images/accessGrants/newCreateFlow/search.svg'; import SearchIcon from '@/../static/images/common/search.svg';
import CloseIcon from '@/../static/images/accessGrants/newCreateFlow/close.svg'; import CloseIcon from '@/../static/images/accessGrants/newCreateFlow/close.svg';
const props = withDefaults(defineProps<{ const props = withDefaults(defineProps<{

View File

@ -0,0 +1,62 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="dropzone" @mouseout="close" @mouseleave="close" @dragleave.self="close">
<div class="dropzone__message">
<p class="dropzone__message__text">
Drop your files to put it into the {{ bucket }} bucket.
</p>
</div>
<p class="dropzone__info">Drag and drop files here to upload</p>
</div>
</template>
<script setup lang="ts">
const props = defineProps<{
bucket: string
close: () => void
}>();
</script>
<style scoped lang="scss">
.dropzone {
z-index: 1;
position: fixed;
inset: 0;
background: rgb(0 0 0 / 35%);
border: 1px dashed var(--c-white);
display: flex;
align-items: center;
justify-content: center;
&__message {
padding: 10px 24px;
background: var(--c-green-1);
border: 1px solid var(--c-green-5);
border-radius: 8px;
position: absolute;
top: 24px;
pointer-events: none;
&__text {
font-family: 'font_medium', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-green-5);
text-align: center;
}
}
&__info {
font-family: 'font_bold', sans-serif;
font-size: 40px;
line-height: 50px;
text-align: center;
max-width: 380px;
color: var(--c-white);
text-shadow: 0 7px 20px 0 rgb(0 0 0 / 15%);
pointer-events: none;
}
}
</style>

View File

@ -9,8 +9,10 @@
v-cloak v-cloak
class="div-responsive" class="div-responsive"
@drop.prevent="upload" @drop.prevent="upload"
@dragover.prevent @dragover.prevent="showDropzone"
> >
<Dropzone v-if="isOver" :bucket="bucketName" :close="hideDropzone" />
<bread-crumbs @onUpdate="onRouteChange" @bucketClick="goToBuckets" /> <bread-crumbs @onUpdate="onRouteChange" @bucketClick="goToBuckets" />
<div class="tile-action-bar"> <div class="tile-action-bar">
@ -91,8 +93,14 @@
<div class="hr-divider" /> <div class="hr-divider" />
<MultiplePassphraseBanner <MultiplePassphraseBanner
v-if="lockedFilesNumber > 0 && isBannerShown && !fetchingFilesSpinner && !currentPath" v-if="lockedFilesEntryDisplayed && isLockedBanner"
:on-close="closeBanner" :locked-files-count="lockedFilesCount"
:on-close="closeLockedBanner"
/>
<TooManyObjectsBanner
v-if="files.length >= NUMBER_OF_DISPLAYED_OBJECTS && isTooManyObjectsBanner"
:on-close="closeTooManyObjectsBanner"
/> />
<v-table items-label="objects" :total-items-count="files.length" selectable :selected="allFilesSelected" show-select class="file-browser-table" @selectAllClicked="toggleSelectAllFiles"> <v-table items-label="objects" :total-items-count="files.length" selectable :selected="allFilesSelected" show-select class="file-browser-table" @selectAllClicked="toggleSelectAllFiles">
@ -218,7 +226,9 @@ import VButton from '@/components/common/VButton.vue';
import BucketSettingsNav from '@/components/objects/BucketSettingsNav.vue'; import BucketSettingsNav from '@/components/objects/BucketSettingsNav.vue';
import VTable from '@/components/common/VTable.vue'; import VTable from '@/components/common/VTable.vue';
import MultiplePassphraseBanner from '@/components/browser/MultiplePassphrasesBanner.vue'; import MultiplePassphraseBanner from '@/components/browser/MultiplePassphrasesBanner.vue';
import TooManyObjectsBanner from '@/components/browser/TooManyObjectsBanner.vue';
import UpEntry from '@/components/browser/UpEntry.vue'; import UpEntry from '@/components/browser/UpEntry.vue';
import Dropzone from '@/components/browser/Dropzone.vue';
import FileIcon from '@/../static/images/objects/file.svg'; import FileIcon from '@/../static/images/objects/file.svg';
import BlackArrowExpand from '@/../static/images/common/BlackArrowExpand.svg'; import BlackArrowExpand from '@/../static/images/common/BlackArrowExpand.svg';
@ -238,7 +248,9 @@ const fileInput = ref<HTMLInputElement>();
const fetchingFilesSpinner = ref<boolean>(false); const fetchingFilesSpinner = ref<boolean>(false);
const isUploadDropDownShown = ref<boolean>(false); const isUploadDropDownShown = ref<boolean>(false);
const isBannerShown = ref<boolean>(true); const isLockedBanner = ref<boolean>(true);
const isTooManyObjectsBanner = ref<boolean>(true);
const isOver = ref<boolean>(false);
/** /**
* Retrieve the pathMatch from the current route. * Retrieve the pathMatch from the current route.
*/ */
@ -285,7 +297,7 @@ const currentPath = computed((): string => {
/** /**
* Return locked files number. * Return locked files number.
*/ */
const lockedFilesNumber = computed((): number => { const lockedFilesCount = computed((): number => {
const ownObjectsCount = obStore.state.objectsCount; const ownObjectsCount = obStore.state.objectsCount;
return objectsCount.value - ownObjectsCount; return objectsCount.value - ownObjectsCount;
@ -305,7 +317,7 @@ const objectsCount = computed((): number => {
* Indicates if locked files entry is displayed. * Indicates if locked files entry is displayed.
*/ */
const lockedFilesEntryDisplayed = computed((): boolean => { const lockedFilesEntryDisplayed = computed((): boolean => {
return lockedFilesNumber.value > 0 && return lockedFilesCount.value > 0 &&
objectsCount.value <= NUMBER_OF_DISPLAYED_OBJECTS && objectsCount.value <= NUMBER_OF_DISPLAYED_OBJECTS &&
!fetchingFilesSpinner.value && !fetchingFilesSpinner.value &&
!currentPath.value; !currentPath.value;
@ -388,8 +400,15 @@ const bucket = computed((): string => {
/** /**
* Closes multiple passphrase banner. * Closes multiple passphrase banner.
*/ */
function closeBanner(): void { function closeLockedBanner(): void {
isBannerShown.value = false; isLockedBanner.value = false;
}
/**
* Closes too many objects banner.
*/
function closeTooManyObjectsBanner(): void {
isTooManyObjectsBanner.value = false;
} }
function calculateRoutePath(): string { function calculateRoutePath(): string {
@ -442,8 +461,12 @@ function filename(file: BrowserObject): string {
* Upload the current selected or dragged-and-dropped file. * Upload the current selected or dragged-and-dropped file.
*/ */
async function upload(e: Event): Promise<void> { async function upload(e: Event): Promise<void> {
if (isOver.value) {
isOver.value = false;
}
await obStore.upload({ e }); await obStore.upload({ e });
await analytics.eventTriggered(AnalyticsEvent.OBJECT_UPLOADED); analytics.eventTriggered(AnalyticsEvent.OBJECT_UPLOADED);
const target = e.target as HTMLInputElement; const target = e.target as HTMLInputElement;
target.value = ''; target.value = '';
} }
@ -494,6 +517,20 @@ function toggleUploadDropdown(): void {
isUploadDropDownShown.value = !isUploadDropDownShown.value; isUploadDropDownShown.value = !isUploadDropDownShown.value;
} }
/**
* Makes dropzone visible.
*/
function showDropzone(): void {
isOver.value = true;
}
/**
* Hides dropzone.
*/
function hideDropzone(): void {
isOver.value = false;
}
/** /**
* Closes upload options dropdown. * Closes upload options dropdown.
*/ */

View File

@ -283,5 +283,4 @@ function cancelDeleteSelection(): void {
} }
} }
} }
</style> </style>

View File

@ -419,7 +419,13 @@ function openDropdown(): void {
async function download(): Promise<void> { async function download(): Promise<void> {
try { try {
await obStore.download(props.file); await obStore.download(props.file);
notify.warning('Do not share download link with other people. If you want to share this data better use "Share" option.'); const message = `
<p class="message-title">Downloading...</p>
<p class="message-info">
Keep this download link private.<br>If you want to share, use the Share option.
</p>
`;
notify.success('', message);
} catch (error) { } catch (error) {
notify.error('Can not download your file', AnalyticsErrorEventSource.FILE_BROWSER_ENTRY); notify.error('Can not download your file', AnalyticsErrorEventSource.FILE_BROWSER_ENTRY);
} }
@ -500,7 +506,15 @@ function cancelDeletion(): void {
} }
.dropdown-item.action.p-3.action { .dropdown-item.action.p-3.action {
font-family: 'Inter', sans-serif; font-family: 'font_regular', sans-serif;
}
&:first-of-type {
border-radius: 6px 6px 0 0;
}
&:last-of-type {
border-radius: 0 0 6px 6px;
} }
&__label { &__label {

View File

@ -6,23 +6,15 @@
<div class="banner__left"> <div class="banner__left">
<LockedIcon class="banner__left__icon" /> <LockedIcon class="banner__left__icon" />
<div class="banner__left__labels"> <div class="banner__left__labels">
<template v-if="objectsCount <= NUMBER_OF_DISPLAYED_OBJECTS"> <h2 class="banner__left__labels__title">
<h2 class="banner__left__labels__title"> You have at least {{ lockedFilesCount }} object{{ lockedFilesCount > 1 ? 's' : '' }} locked with a
You have at least {{ lockedFilesNumber }} object{{ lockedFilesNumber > 1 ? 's' : '' }} locked with a different passphrase.
different passphrase. </h2>
</h2> <p class="banner__left__labels__subtitle">Enter your other passphrase to access these files.</p>
<p class="banner__left__labels__subtitle">Enter your other passphrase to access these files.</p>
</template>
<template v-else>
<h2 class="banner__left__labels__title">
Due to the number of objects you have uploaded to this bucket, {{ lockedFilesNumber }} files are
not displayed.
</h2>
</template>
</div> </div>
</div> </div>
<div class="banner__right"> <div class="banner__right">
<p v-if="objectsCount <= NUMBER_OF_DISPLAYED_OBJECTS" class="banner__right__unlock" @click="openManageModal"> <p class="banner__right__unlock" @click="openManageModal">
Unlock now Unlock now
</p> </p>
<CloseIcon class="banner__right__close" @click="onClose" /> <CloseIcon class="banner__right__close" @click="onClose" />
@ -31,48 +23,19 @@
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { computed } from 'vue';
import { Bucket } from '@/types/buckets';
import { ManageProjectPassphraseStep } from '@/types/managePassphrase'; import { ManageProjectPassphraseStep } from '@/types/managePassphrase';
import { MODALS } from '@/utils/constants/appStatePopUps'; import { MODALS } from '@/utils/constants/appStatePopUps';
import { useAppStore } from '@/store/modules/appStore'; import { useAppStore } from '@/store/modules/appStore';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { useObjectBrowserStore } from '@/store/modules/objectBrowserStore';
import LockedIcon from '@/../static/images/browser/locked.svg'; import LockedIcon from '@/../static/images/browser/locked.svg';
import CloseIcon from '@/../static/images/browser/close.svg'; import CloseIcon from '@/../static/images/browser/close.svg';
const props = withDefaults(defineProps<{ const props = defineProps<{
onClose?: () => void; lockedFilesCount: number
}>(), { onClose: () => void
onClose: () => {}, }>();
});
const appStore = useAppStore(); const appStore = useAppStore();
const bucketsStore = useBucketsStore();
const obStore = useObjectBrowserStore();
const NUMBER_OF_DISPLAYED_OBJECTS = 1000;
/**
* Returns locked files number.
*/
const lockedFilesNumber = computed((): number => {
const ownObjectsCount = obStore.state.objectsCount;
return objectsCount.value - ownObjectsCount;
});
/**
* Returns bucket objects count from store.
*/
const objectsCount = computed((): number => {
const name: string = obStore.state.bucket;
const data: Bucket | undefined = bucketsStore.state.page.buckets.find((bucket: Bucket) => bucket.name === name);
return data?.objectCount || 0;
});
/** /**
* Opens switch passphrase modal. * Opens switch passphrase modal.

View File

@ -0,0 +1,79 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="banner">
<div class="banner__left">
<LockedIcon class="banner__left__icon" />
<p class="banner__left__title">
Due to the number of objects you have uploaded, some files may not be displayed.
To list all objects you can use
<a
class="banner__left__title__link"
href="https://docs.storj.io/dcs/getting-started/quickstart-uplink-cli/prerequisites"
target="_blank"
rel="noopener noreferrer"
>
Uplink CLI
</a>
tool.
</p>
</div>
<CloseIcon class="banner__close" @click="onClose" />
</div>
</template>
<script setup lang="ts">
import LockedIcon from '@/../static/images/browser/locked.svg';
import CloseIcon from '@/../static/images/browser/close.svg';
const props = defineProps<{
onClose: () => void
}>();
</script>
<style scoped lang="scss">
.banner {
padding: 16px;
background: #fec;
border: 1px solid var(--c-yellow-2);
box-shadow: 0 7px 20px rgb(0 0 0 / 15%);
border-radius: 10px;
display: flex;
align-items: center;
justify-content: space-between;
font-family: 'font_regular', sans-serif;
margin-bottom: 21px;
&__left {
display: flex;
align-items: center;
margin-right: 15px;
&__icon {
margin-right: 16px;
min-width: 32px;
}
&__title {
font-family: 'font_bold', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-black);
&__link {
color: var(--c-blue-3);
&:visited {
color: var(--c-blue-3);
}
}
}
}
&__close {
min-width: 12px;
cursor: pointer;
}
}
</style>

View File

@ -2,18 +2,28 @@
// See LICENSE for copying information. // See LICENSE for copying information.
<template> <template>
<div class="button-icon" :class="{ active: isActive }" @click="onPress"> <VInfo>
<component :is="icon" /> <template #icon>
</div> <div class="button-icon" :class="{ active: isActive }" @click="onPress">
<component :is="icon" />
</div>
</template>
<template #message>
<p class="message">{{ info }}</p>
</template>
</VInfo>
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { Component } from 'vue'; import { Component } from 'vue';
import VInfo from '@/components/common/VInfo.vue';
const props = withDefaults(defineProps<{ const props = withDefaults(defineProps<{
isActive?: boolean isActive?: boolean
icon: string icon: string
onPress: () => void onPress: () => void
info: string
}>(), { }>(), {
isActive: false, isActive: false,
}); });
@ -28,9 +38,41 @@ const props = withDefaults(defineProps<{
align-items: center; align-items: center;
justify-content: center; justify-content: center;
cursor: pointer; cursor: pointer;
&:hover {
background: rgb(255 255 255 / 10%);
}
} }
.active { .active {
background: rgb(255 255 255 / 10%); background: rgb(255 255 255 / 10%);
} }
.message {
font-family: 'font_medium', sans-serif;
font-size: 12px;
line-height: 18px;
color: var(--c-white);
white-space: nowrap;
}
:deep(.info__box) {
width: auto;
cursor: default;
top: 100%;
left: 50%;
}
:deep(.info__box__message) {
background: var(--c-grey-6);
border-radius: 4px;
padding: 10px 8px;
}
:deep(.info__box__arrow) {
background: var(--c-grey-6);
width: 10px;
height: 10px;
margin-bottom: -3px;
}
</style> </style>

View File

@ -3,7 +3,7 @@
<template> <template>
<Teleport to="#app"> <Teleport to="#app">
<div ref="viewContainer" class="gallery" tabindex="0" @keydown.esc="closeModal"> <div ref="viewContainer" class="gallery" tabindex="0" @keydown.esc="closeModal" @keydown.right="onNext" @keydown.left="onPrevious">
<div class="gallery__header"> <div class="gallery__header">
<LogoIcon class="gallery__header__logo" /> <LogoIcon class="gallery__header__logo" />
<SmallLogoIcon class="gallery__header__small-logo" /> <SmallLogoIcon class="gallery__header__small-logo" />
@ -19,13 +19,33 @@
:icon="DotsIcon" :icon="DotsIcon"
:on-press="toggleDropdown" :on-press="toggleDropdown"
:is-active="isOptionsDropdown === true" :is-active="isOptionsDropdown === true"
info="More"
/>
<ButtonIcon
class="gallery__header__functional__item"
:icon="MapIcon"
:on-press="() => setActiveModal(DistributionModal)"
info="Geographic Distribution"
/>
<ButtonIcon
:icon="DownloadIcon"
:on-press="download"
info="Download"
/>
<ButtonIcon
class="gallery__header__functional__item"
:icon="ShareIcon"
:on-press="() => setActiveModal(ShareModal)"
info="Share"
/>
<ButtonIcon
:icon="CloseIcon"
:on-press="closeModal"
info="Close"
/> />
<ButtonIcon :icon="MapIcon" :on-press="() => setActiveModal(DistributionModal)" />
<ButtonIcon class="gallery__header__functional__item" :icon="DownloadIcon" :on-press="download" />
<ButtonIcon class="gallery__header__functional__item" :icon="ShareIcon" :on-press="() => setActiveModal(ShareModal)" />
<ButtonIcon :icon="CloseIcon" :on-press="closeModal" />
<OptionsDropdown <OptionsDropdown
v-if="isOptionsDropdown" v-if="isOptionsDropdown"
:on-distribution="() => setActiveModal(DistributionModal)"
:on-view-details="() => setActiveModal(DetailsModal)" :on-view-details="() => setActiveModal(DetailsModal)"
:on-download="download" :on-download="download"
:on-share="() => setActiveModal(ShareModal)" :on-share="() => setActiveModal(ShareModal)"
@ -96,10 +116,11 @@ import { Component, computed, onBeforeMount, onMounted, ref, Teleport, watch } f
import { useRoute } from 'vue-router'; import { useRoute } from 'vue-router';
import prettyBytes from 'pretty-bytes'; import prettyBytes from 'pretty-bytes';
import { BrowserObject, useObjectBrowserStore } from '@/store/modules/objectBrowserStore'; import { BrowserObject, PreviewCache, useObjectBrowserStore } from '@/store/modules/objectBrowserStore';
import { AnalyticsErrorEventSource } from '@/utils/constants/analyticsEventNames'; import { AnalyticsErrorEventSource } from '@/utils/constants/analyticsEventNames';
import { useAppStore } from '@/store/modules/appStore'; import { useAppStore } from '@/store/modules/appStore';
import { useNotify } from '@/utils/hooks'; import { useNotify } from '@/utils/hooks';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { RouteConfig } from '@/types/router'; import { RouteConfig } from '@/types/router';
import ButtonIcon from '@/components/browser/galleryView/ButtonIcon.vue'; import ButtonIcon from '@/components/browser/galleryView/ButtonIcon.vue';
@ -125,6 +146,7 @@ import ArrowIcon from '@/../static/images/browser/galleryView/arrow.svg';
const appStore = useAppStore(); const appStore = useAppStore();
const obStore = useObjectBrowserStore(); const obStore = useObjectBrowserStore();
const bucketsStore = useBucketsStore();
const notify = useNotify(); const notify = useNotify();
const route = useRoute(); const route = useRoute();
@ -139,6 +161,13 @@ const objectPreviewUrl = ref<string>('');
const folderType = 'folder'; const folderType = 'folder';
/**
* Returns object preview URLs cache from store.
*/
const cachedObjectPreviewURLs = computed((): Map<string, PreviewCache> => {
return obStore.state.cachedObjectPreviewURLs;
});
/** /**
* Retrieve the file object that the modal is set to from the store. * Retrieve the file object that the modal is set to from the store.
*/ */
@ -174,6 +203,13 @@ const extension = computed((): string | undefined => {
return filePath.value.split('.').pop(); return filePath.value.split('.').pop();
}); });
/**
* Returns bucket name from store.
*/
const bucket = computed((): string => {
return bucketsStore.state.fileComponentBucketName;
});
/** /**
* Check to see if the current file is an image file. * Check to see if the current file is an image file.
*/ */
@ -243,6 +279,9 @@ async function fetchPreviewAndMapUrl(): Promise<void> {
return; return;
} }
const encodedPath = encodeURIComponent(`${bucket.value}/${filePath.value.trim()}`);
obStore.cacheObjectPreviewURL(encodedPath, { url, lastModified: file.value.LastModified.getTime() });
objectMapUrl.value = `${url}?map=1`; objectMapUrl.value = `${url}?map=1`;
objectPreviewUrl.value = `${url}?view=1`; objectPreviewUrl.value = `${url}?view=1`;
isLoading.value = false; isLoading.value = false;
@ -253,7 +292,6 @@ async function fetchPreviewAndMapUrl(): Promise<void> {
*/ */
async function onDelete(): Promise<void> { async function onDelete(): Promise<void> {
try { try {
const objectsCount = obStore.sortedFiles.length;
let newFile: BrowserObject | undefined = obStore.sortedFiles[fileIndex.value + 1]; let newFile: BrowserObject | undefined = obStore.sortedFiles[fileIndex.value + 1];
if (!newFile || newFile.type === folderType) { if (!newFile || newFile.type === folderType) {
newFile = obStore.sortedFiles.find(f => f.type !== folderType && f.Key !== file.value.Key); newFile = obStore.sortedFiles.find(f => f.type !== folderType && f.Key !== file.value.Key);
@ -282,7 +320,13 @@ async function onDelete(): Promise<void> {
async function download(): Promise<void> { async function download(): Promise<void> {
try { try {
await obStore.download(file.value); await obStore.download(file.value);
notify.warning('Do not share download link with other people. If you want to share this data better use "Share" option.'); const message = `
<p class="message-title">Downloading...</p>
<p class="message-info">
Keep this download link private.<br>If you want to share, use the Share option.
</p>
`;
notify.success('', message);
} catch (error) { } catch (error) {
notify.error('Can not download your file', AnalyticsErrorEventSource.OBJECT_DETAILS_MODAL); notify.error('Can not download your file', AnalyticsErrorEventSource.OBJECT_DETAILS_MODAL);
} }
@ -361,11 +405,41 @@ function setNewObjectPath(objectKey: string): void {
obStore.setObjectPathForModal(`${currentPath.value}${objectKey}`); obStore.setObjectPathForModal(`${currentPath.value}${objectKey}`);
} }
/**
* Loads object URL from cache or generates new URL.
*/
function processFilePath(): void {
const url = findCachedURL();
if (!url) {
fetchPreviewAndMapUrl();
return;
}
objectMapUrl.value = `${url}?map=1`;
objectPreviewUrl.value = `${url}?view=1`;
}
/**
* Try to find current object path in cache.
*/
function findCachedURL(): string | undefined {
const encodedPath = encodeURIComponent(`${bucket.value}/${filePath.value.trim()}`);
const cache = cachedObjectPreviewURLs.value.get(encodedPath);
if (!cache) return undefined;
if (cache.lastModified !== file.value.LastModified.getTime()) {
obStore.removeFromObjectPreviewCache(encodedPath);
return undefined;
}
return cache.url;
}
/** /**
* Call `fetchPreviewAndMapUrl` on before mount lifecycle method. * Call `fetchPreviewAndMapUrl` on before mount lifecycle method.
*/ */
onBeforeMount((): void => { onBeforeMount((): void => {
fetchPreviewAndMapUrl(); processFilePath();
}); });
onMounted((): void => { onMounted((): void => {
@ -378,7 +452,7 @@ onMounted((): void => {
watch(filePath, () => { watch(filePath, () => {
if (!filePath.value) return; if (!filePath.value) return;
fetchPreviewAndMapUrl(); processFilePath();
}); });
</script> </script>
@ -500,6 +574,16 @@ watch(filePath, () => {
cursor: pointer; cursor: pointer;
min-width: 46px; min-width: 46px;
&:hover {
:deep(rect) {
&:first-of-type {
fill: rgb(255 255 255 / 10%);
}
}
}
@media screen and (width <= 600px) { @media screen and (width <= 600px) {
display: none; display: none;
} }
@ -564,6 +648,16 @@ watch(filePath, () => {
svg { svg {
width: 30px; width: 30px;
height: 30px; height: 30px;
&:hover {
:deep(rect) {
&:first-of-type {
fill: rgb(255 255 255 / 10%);
}
}
}
} }
@media screen and (width <= 600px) { @media screen and (width <= 600px) {

View File

@ -3,16 +3,20 @@
<template> <template>
<div class="options"> <div class="options">
<div class="options__item" @click="onDistribution">
<MapIcon />
<p class="options__item__label">Distribution</p>
</div>
<div class="options__item" @click="onViewDetails"> <div class="options__item" @click="onViewDetails">
<DetailsIcon /> <DetailsIcon />
<p class="options__item__label">View details</p> <p class="options__item__label">View details</p>
</div> </div>
<div class="options__item" @click="onDownload"> <div class="options__item" @click="onDownload">
<SmallDownloadIcon /> <DownloadIcon />
<p class="options__item__label">Download</p> <p class="options__item__label">Download</p>
</div> </div>
<div class="options__item" @click="onShare"> <div class="options__item" @click="onShare">
<SmallShareIcon /> <ShareIcon />
<p class="options__item__label">Share</p> <p class="options__item__label">Share</p>
</div> </div>
<div class="options__item" @click="onDelete"> <div class="options__item" @click="onDelete">
@ -24,11 +28,13 @@
<script setup lang="ts"> <script setup lang="ts">
import DetailsIcon from '@/../static/images/browser/galleryView/details.svg'; import DetailsIcon from '@/../static/images/browser/galleryView/details.svg';
import SmallDownloadIcon from '@/../static/images/browser/galleryView/downloadSmall.svg'; import DownloadIcon from '@/../static/images/browser/galleryView/download.svg';
import SmallShareIcon from '@/../static/images/browser/galleryView/shareSmall.svg'; import ShareIcon from '@/../static/images/browser/galleryView/share.svg';
import DeleteIcon from '@/../static/images/browser/galleryView/delete.svg'; import DeleteIcon from '@/../static/images/browser/galleryView/delete.svg';
import MapIcon from '@/../static/images/browser/galleryView/map.svg';
const props = defineProps<{ const props = defineProps<{
onDistribution: () => void
onViewDetails: () => void onViewDetails: () => void
onDownload: () => void onDownload: () => void
onShare: () => void onShare: () => void
@ -59,6 +65,15 @@ const props = defineProps<{
cursor: pointer; cursor: pointer;
padding: 16px; padding: 16px;
svg {
width: 18px;
height: 18px;
:deep(path) {
fill: var(--c-grey-6);
}
}
&__label { &__label {
margin-left: 16px; margin-left: 16px;
font-size: 14px; font-size: 14px;

View File

@ -24,7 +24,7 @@
</p> </p>
</div> </div>
<div class="modal__item last"> <div class="modal__item last">
<p class="modal__item__label">Saved in</p> <p class="modal__item__label">Bucket</p>
<p class="modal__item__label right" :title="bucket">{{ bucket }}</p> <p class="modal__item__label right" :title="bucket">{{ bucket }}</p>
</div> </div>
<VButton <VButton

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@
</p> </p>
<p v-else :class="{primary: index === 0}" :title="val" @click.stop="(e) => cellContentClicked(index, e)"> <p v-else :class="{primary: index === 0}" :title="val" @click.stop="(e) => cellContentClicked(index, e)">
<middle-truncate v-if="keyVal === 'fileName'" :text="val" /> <middle-truncate v-if="keyVal === 'fileName'" :text="val" />
<project-ownership-tag v-else-if="keyVal === 'role'" :no-icon="itemType !== 'project' && val !== ProjectRole.Invited" :role="val" /> <project-ownership-tag v-else-if="keyVal === 'role'" :no-icon="!isProjectRoleIconShown(val)" :role="val" />
<span v-else>{{ val }}</span> <span v-else>{{ val }}</span>
</p> </p>
<div v-if="showBucketGuide(index)" class="animation"> <div v-if="showBucketGuide(index)" class="animation">
@ -83,15 +83,19 @@ const icon = computed((): string => ObjectType.findIcon(props.itemType));
const customIconClasses = computed(() => { const customIconClasses = computed(() => {
const classes = {}; const classes = {};
if (props.itemType === 'project') { if (props.itemType === 'project') {
if (props.item['owner']) { if (props.item['role'] === ProjectRole.Owner) {
classes['project-owner'] = true; classes['project-owner'] = true;
} else { } else if (props.item['role'] === ProjectRole.Member) {
classes['project-member'] = true; classes['project-member'] = true;
} }
} }
return classes; return classes;
}); });
function isProjectRoleIconShown(role: ProjectRole) {
return props.itemType === 'project' || role === ProjectRole.Invited || role === ProjectRole.InviteExpired;
}
function selectClicked(event: Event): void { function selectClicked(event: Event): void {
emit('selectClicked', event); emit('selectClicked', event);
} }

View File

@ -64,6 +64,8 @@ import DocumentIcon from '@/../static/images/common/documentIcon.svg';
import DownloadIcon from '@/../static/images/common/download.svg'; import DownloadIcon from '@/../static/images/common/download.svg';
import FolderIcon from '@/../static/images/objects/newFolder.svg'; import FolderIcon from '@/../static/images/objects/newFolder.svg';
import ResourcesIcon from '@/../static/images/navigation/resources.svg'; import ResourcesIcon from '@/../static/images/navigation/resources.svg';
import UploadIcon from '@/../static/images/common/upload.svg';
import ProjectIcon from '@/../static/images/navigation/project.svg';
const props = withDefaults(defineProps<{ const props = withDefaults(defineProps<{
link?: string; link?: string;
@ -119,6 +121,8 @@ const icons = new Map<string, string>([
['resources', ResourcesIcon], ['resources', ResourcesIcon],
['addcircle', AddCircleIcon], ['addcircle', AddCircleIcon],
['add', WhitePlusIcon], ['add', WhitePlusIcon],
['upload', UploadIcon],
['project', ProjectIcon],
]); ]);
const iconComponent = computed((): string | undefined => icons.get(props.icon.toLowerCase())); const iconComponent = computed((): string | undefined => icons.get(props.icon.toLowerCase()));
@ -313,8 +317,7 @@ function handleClick(): void {
background-color: #0059d0; background-color: #0059d0;
&.transparent, &.transparent,
&.blue-white, &.blue-white {
&.white {
box-shadow: none !important; box-shadow: none !important;
background-color: #2683ff !important; background-color: #2683ff !important;
border: 1px solid #2683ff !important; border: 1px solid #2683ff !important;
@ -329,6 +332,20 @@ function handleClick(): void {
} }
} }
&.white {
box-shadow: none !important;
border: 1px solid var(--c-blue-3) !important;
:deep(path),
:deep(rect) {
fill: var(--c-blue-3) !important;
}
.label {
color: var(--c-blue-3) !important;
}
}
&.grey-blue { &.grey-blue {
background-color: #2683ff !important; background-color: #2683ff !important;
border-color: #2683ff !important; border-color: #2683ff !important;

View File

@ -1,86 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="header-container">
<div class="header-container__buttons-area">
<slot />
</div>
<div v-if="styleType === 'common'" class="search-container">
<VSearch
ref="searchInput"
:placeholder="placeholder"
:search="search"
/>
</div>
<div v-if="styleType === 'access'">
<VSearchAlternateStyling
ref="searchInput"
:placeholder="placeholder"
:search="search"
/>
</div>
</div>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import VSearch from '@/components/common/VSearch.vue';
import VSearchAlternateStyling from '@/components/common/VSearchAlternateStyling.vue';
type searchCallback = (search: string) => Promise<void>;
const props = withDefaults(defineProps<{
placeholder: string;
search: searchCallback;
styleType?: string;
}>(), {
placeholder: '',
styleType: 'common',
});
const searchInput = ref<{ clearSearch: () => void }>();
function clearSearch(): void {
searchInput.value?.clearSearch();
}
defineExpose({ clearSearch });
</script>
<style scoped lang="scss">
.header-container {
width: 100%;
height: 85px;
position: relative;
display: flex;
align-items: center;
justify-content: space-between;
&__buttons-area {
width: auto;
display: flex;
align-items: center;
justify-content: space-between;
}
.search-container {
position: relative;
}
}
@media screen and (width <= 1150px) {
.header-container {
flex-direction: column;
align-items: flex-start;
margin-bottom: 75px;
.search-container {
width: 100%;
margin-top: 30px;
}
}
}
</style>

View File

@ -182,6 +182,11 @@ watch(() => props.initValue, (val, oldVal) => {
onBeforeMount(() => { onBeforeMount(() => {
type.value = props.isPassword ? passwordType : textType; type.value = props.isPassword ? passwordType : textType;
if (props.initValue) {
value.value = props.initValue;
emit('setData', props.initValue);
}
}); });
</script> </script>

View File

@ -75,10 +75,29 @@ onMounted((): void => {
&__close { &__close {
position: absolute; position: absolute;
right: 24px; right: 3px;
top: 24px; top: 3px;
padding: 10px;
border-radius: 16px;
cursor: pointer; cursor: pointer;
opacity: 0.55;
&:hover {
background-color: var(--c-grey-2);
}
&:active {
background-color: var(--c-grey-4);
}
svg {
display: block;
width: 12px;
height: 12px;
:deep(path) {
fill: var(--c-black);
}
}
} }
} }
} }

View File

@ -2,76 +2,46 @@
// See LICENSE for copying information. // See LICENSE for copying information.
<template> <template>
<input <div class="search-container">
ref="input" <SearchIcon class="search-container__icon" />
v-model="searchQuery" <input
readonly v-model="searchQuery"
class="common-search-input" class="search-container__input"
:placeholder="`Search ${placeholder}`" placeholder="Search"
:style="style" type="text"
type="text" autocomplete="off"
autocomplete="off" readonly
maxlength="72" maxlength="72"
@mouseenter="onMouseEnter" @input="processSearchQuery"
@mouseleave="onMouseLeave" @focus="removeReadOnly"
@input="processSearchQuery" @blur="addReadOnly"
@focus="removeReadOnly" >
@blur="addReadOnly" </div>
>
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { computed, ref } from 'vue'; import { ref } from 'vue';
import { useDOM } from '@/composables/DOM'; import { useDOM } from '@/composables/DOM';
type searchCallback = (search: string) => Promise<void>; import SearchIcon from '@/../static/images/common/search.svg';
interface SearchStyle {
width: string;
}
const props = withDefaults(defineProps<{ declare type searchCallback = (search: string) => Promise<void>;
search: searchCallback;
placeholder?: string; const props = defineProps<{
}>(), { search: searchCallback,
placeholder: '', }>();
});
const { removeReadOnly, addReadOnly } = useDOM(); const { removeReadOnly, addReadOnly } = useDOM();
const inputWidth = ref<string>('56px');
const searchQuery = ref<string>(''); const searchQuery = ref<string>('');
const input = ref<HTMLInputElement>();
const style = computed((): SearchStyle => {
return { width: inputWidth.value };
});
/** /**
* Expands search input. * Clears search query.
*/
function onMouseEnter(): void {
inputWidth.value = '540px';
input.value?.focus();
}
/**
* Collapses search input if no search query.
*/
function onMouseLeave(): void {
if (!searchQuery.value) {
inputWidth.value = '56px';
input.value?.blur();
}
}
/**
* Clears search query and collapses input.
*/ */
function clearSearch(): void { function clearSearch(): void {
searchQuery.value = ''; searchQuery.value = '';
processSearchQuery(); processSearchQuery();
inputWidth.value = '56px';
} }
async function processSearchQuery(): Promise<void> { async function processSearchQuery(): Promise<void> {
@ -82,31 +52,37 @@ defineExpose({ clearSearch });
</script> </script>
<style scoped lang="scss"> <style scoped lang="scss">
.common-search-input { .search-container {
position: absolute; padding: 8px;
right: 0; display: flex;
bottom: 50%; align-items: center;
transform: translateY(50%);
padding: 0 38px 0 18px;
border: 1px solid #f2f2f2;
box-sizing: border-box; box-sizing: border-box;
box-shadow: 0 4px 4px rgb(231 232 238 / 60%); border: 1px solid var(--c-grey-3);
outline: none; border-radius: 10px;
border-radius: 36px; width: 250px;
height: 56px; background-color: #fff;
font-family: 'font_regular', sans-serif;
font-size: 16px;
transition: all 0.4s ease-in-out;
background-image: url('../../../static/images/common/search.png');
background-repeat: no-repeat;
background-size: 22px 22px;
background-position: top 16px right 16px;
}
@media screen and (width <= 1150px) { @media screen and (width <= 1150px) {
width: 100%;
}
.common-search-input { &__icon {
width: 100% !important; margin: 0 12px 0 4px;
}
&__input {
flex: 1;
background-color: transparent;
outline: none;
border: none;
font-family: 'font_regular', sans-serif;
font-size: 14px;
line-height: 20px;
} }
} }
::placeholder {
color: var(--c-grey-6);
opacity: 0.7;
}
</style> </style>

View File

@ -1,78 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<input
v-model="searchQuery"
class="access-search-input"
:placeholder="`Search ${placeholder}`"
type="text"
autocomplete="off"
readonly
maxlength="72"
@input="processSearchQuery"
@focus="removeReadOnly"
@blur="addReadOnly"
>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { useDOM } from '@/composables/DOM';
declare type searchCallback = (search: string) => Promise<void>;
const props = withDefaults(defineProps<{
placeholder?: string,
search: searchCallback,
}>(), { placeholder: '' });
const { removeReadOnly, addReadOnly } = useDOM();
const searchQuery = ref<string>('');
/**
* Clears search query.
*/
function clearSearch(): void {
searchQuery.value = '';
processSearchQuery();
}
async function processSearchQuery(): Promise<void> {
await props.search(searchQuery.value);
}
defineExpose({ clearSearch });
</script>
<style scoped lang="scss">
.access-search-input {
position: absolute;
left: 0;
bottom: 0;
padding: 0 10px 0 50px;
box-sizing: border-box;
outline: none;
border: 1px solid var(--c-grey-3);
border-radius: 10px;
height: 40px;
width: 250px;
font-family: 'font_regular', sans-serif;
font-size: 16px;
background-color: #fff;
background-image: url('../../../static/images/common/search-gray.png');
background-repeat: no-repeat;
background-size: 22px 22px;
background-position: top 8px left 14px;
@media screen and (width <= 1150px) {
width: 100%;
}
}
::placeholder {
color: #afb7c1;
}
</style>

Some files were not shown because too many files have changed in this diff Show More