Compare commits

..

8 Commits

Author SHA1 Message Date
f75ec5ba34 HACK: prebuild storagenode gui 2023-07-30 22:55:41 +01:00
littleskunk
cb65ebe81c
release v1.82.1 2023-06-26 14:46:05 +02:00
Michal Niewrzal
e1f8434a03 satellite/accounting/tally: save tallies in a batches
Because we are saving all tallies as a single SQL statement we finally
reached maximum message size. With this change we will call SaveTallies multiple times in batches.

https://github.com/storj/storj/issues/5977

Change-Id: I0c7dd27779b1743ede66448fb891e65c361aa3b0
2023-06-26 14:42:58 +02:00
Wilfred Asomani
433493a935 web/satellite: prevent unauthorized access to project settings page
This change further restricts projects members from accessing the
projects settings page by navigating to (all) projects dashboard when
/edit-project-details is visited or project is switched.
It also applies a white background to the project ownership tag to
improve contrast and visibility.

Change-Id: Ib855c4e3aa4be7ec9ec1e9b312041118442358ad
2023-06-26 14:42:58 +02:00
Wilfred Asomani
8f1d4a6506 web/satellite: use correct color for projects table icons
This change uses the correct color corresponding to the role of a user
on a project.

Change-Id: Ibd8f9ccae4486a8039f77bae5c2533b060e73be9
2023-06-26 14:42:58 +02:00
Wilfred Asomani
e2603461ab web/satellite: hide project settings option for members
This change follows up on 8f7c59d to hide project settings option on
the all projects dashboard table for members.

Change-Id: I0ac246e0f6018d7b3028b68439049df3081fce29
2023-06-26 14:42:58 +02:00
Cameron
f4297e42d0 satellite/payments/accountfreeze: set grace period default to 15 days
Change-Id: Ied8f3758b579b83ebf04cba0fde9715c689bac4f
2023-06-26 14:42:58 +02:00
Wilfred Asomani
0ad544731d Revert "satellite/db: fix long loadtime for charges endpoint"
This reverts commit 676178299f.

Reason for revert:
The new query used by this commit performs a full table scan.
It's been reverted pending a fix for that.

Change-Id: Idc53954459aa6f5a692056232b8674b11d1928ce
2023-06-26 14:42:58 +02:00
141 changed files with 13099 additions and 3275 deletions

View File

@ -37,13 +37,6 @@ satellite-web:
SAVE ARTIFACT dist AS LOCAL web/satellite/dist
SAVE ARTIFACT static AS LOCAL web/satellite/static
satellite-admin:
FROM node:16
WORKDIR /build
COPY satellite/admin/ui .
RUN ./build.sh
SAVE ARTIFACT build AS LOCAL satellite/admin/ui/build
storagenode-bin:
COPY go.mod go.mod
COPY go.sum go.sum
@ -119,7 +112,6 @@ build-tagged-image:
FROM img.dev.storj.io/storjup/base:20230208-1
COPY +multinode-web/dist /var/lib/storj/storj/web/multinode/dist
COPY +satellite-web/dist /var/lib/storj/storj/web/satellite/dist
COPY +satellite-admin/build /app/satellite-admin/
COPY +satellite-web/static /var/lib/storj/storj/web/satellite/static
COPY +storagenode-web/dist /var/lib/storj/storj/web/storagenode/dist
COPY +storagenode-web/static /var/lib/storj/storj/web/storagenode/static

View File

@ -208,14 +208,7 @@ var (
Long: "Applies free tier coupon to Stripe customers without a coupon",
RunE: cmdApplyFreeTierCoupons,
}
setInvoiceStatusCmd = &cobra.Command{
Use: "set-invoice-status [start-period] [end-period] [status]",
Short: "set all open invoices status",
Long: "set all open invoices in the specified date ranges to the provided status. Period is a UTC date formatted like YYYY-MM.",
Args: cobra.ExactArgs(3),
RunE: cmdSetInvoiceStatus,
}
createCustomerBalanceInvoiceItemsCmd = &cobra.Command{
createCustomerBalanceInvoiceItems = &cobra.Command{
Use: "create-balance-invoice-items",
Short: "Creates stripe invoice line items for stripe customer balance",
Long: "Creates stripe invoice line items for stripe customer balances obtained from past invoices and other miscellaneous charges.",
@ -349,9 +342,6 @@ var (
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
Before string `help:"select only exited nodes before this UTC date formatted like YYYY-MM. Date cannot be newer than the current time (required)"`
}
setInvoiceStatusCfg struct {
DryRun bool `help:"do not update stripe" default:"false"`
}
confDir string
identityDir string
@ -391,8 +381,7 @@ func init() {
compensationCmd.AddCommand(recordPeriodCmd)
compensationCmd.AddCommand(recordOneOffPaymentsCmd)
billingCmd.AddCommand(applyFreeTierCouponsCmd)
billingCmd.AddCommand(setInvoiceStatusCmd)
billingCmd.AddCommand(createCustomerBalanceInvoiceItemsCmd)
billingCmd.AddCommand(createCustomerBalanceInvoiceItems)
billingCmd.AddCommand(prepareCustomerInvoiceRecordsCmd)
billingCmd.AddCommand(createCustomerProjectInvoiceItemsCmd)
billingCmd.AddCommand(createCustomerInvoicesCmd)
@ -424,9 +413,7 @@ func init() {
process.Bind(reportsVerifyGEReceiptCmd, &reportsVerifyGracefulExitReceiptCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(partnerAttributionCmd, &partnerAttribtionCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(applyFreeTierCouponsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(setInvoiceStatusCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(setInvoiceStatusCmd, &setInvoiceStatusCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerBalanceInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerBalanceInvoiceItems, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(prepareCustomerInvoiceRecordsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerProjectInvoiceItemsCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
process.Bind(createCustomerInvoicesCmd, &runCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.IdentityDir(identityDir))
@ -767,30 +754,6 @@ func cmdValueAttribution(cmd *cobra.Command, args []string) (err error) {
return reports.GenerateAttributionCSV(ctx, partnerAttribtionCfg.Database, start, end, userAgents, file)
}
// cmdSetInvoiceStatus sets the status of all open invoices within the provided period to the provided status.
// args[0] is the start of the period in YYYY-MM format.
// args[1] is the end of the period in YYYY-MM format.
// args[2] is the status to set the invoices to.
func cmdSetInvoiceStatus(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd)
periodStart, err := parseYearMonth(args[0])
if err != nil {
return err
}
periodEnd, err := parseYearMonth(args[1])
if err != nil {
return err
}
// parseYearMonth returns the first day of the month, but we want the period end to be the last day of the month
periodEnd = periodEnd.AddDate(0, 1, -1)
return runBillingCmd(ctx, func(ctx context.Context, payments *stripe.Service, _ satellite.DB) error {
return payments.SetInvoiceStatus(ctx, periodStart, periodEnd, args[2], setInvoiceStatusCfg.DryRun)
})
}
func cmdCreateCustomerBalanceInvoiceItems(cmd *cobra.Command, _ []string) (err error) {
ctx, _ := process.Ctx(cmd)

View File

@ -1,38 +0,0 @@
# Billing Page Testplan
 
## Background
This testplan is going to cover the new Billing Page. It will go over the figma design seen here - [Billing Page](https://www.figma.com/file/HlmasFJNHxs2lzGerq3WYH/Satellite-GUI-Public?node-id=11080%3A68109)
 
 
| Test Scenario | Test Cases | Description | Comments |
|--------------------------------------|--------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
| | | | |
| Roles behaviour | 1. Owner role. | Can invite, remove members. Make all project operations (upload/list/download/delete/generate/accesses). Update project info and project limits. | |
| | 2. Member role. | Can make all project operations (upload/list/download/delete/generate/accesses). Project member shouldn't see project in the billing screen | |
| | 3. Invited role. | This role signifies that the member has not accepted their invitation and cannot interact with the project in any way. | |
| Adding and removing Project Members. | 4. Adding member who has an account | If an invited member already has an activated user account on the project's satellite, the invitation email will contain a link that directs them to the satellite's login page. | |
| | 5. Adding member who doesn't have an account | If the member does not have an account, the invitation emails link will direct them to the registration page. | |
| | 6. Adding member who has not activated account | If the invited members account has not been activated, the invitation email will contain an account activation link and a message informing them that they will need to activate their account before they can accept the invitation. | |
| | 7. Security. Existed vs unexisted user invitation | Invite an existing user vs inviting an none existing user. For security reasons the behavior should be the same. If a user exists the invite should look the same. That also means at that point we can't display the user name and have to stick with the email name. | |
| | 8. User's name showing. User who accept invite vs User who doesn't | For user who hasn't accepted invite - we can't display the user name and have to stick with the email name. After invite was accepted the list should show the full name of the customer. | |
| | 9. Invite a person who already a member | Invite a person twice after the first invite was already accepted. -> Show error message about user already a member | |
| | 10. Resend invitation | Invite a person twice without the first invite beeing accepted. -> Show info message about duplicate | |
| | 11. Invitation token expiration | Invite token should have an expiration date. It should be as low as account activation. Maybe a full week would be a good balance. | |
| | 12. Token inactivation after resending | What should happen if Bob accepts the first invite but rejects the second invite (reminder email)? | |
| | 13. Token inactivation after removing | Alice invites Bob, Bob has not accepted the invite yet, Alice deletes the invite, Bob tries to accept the invite. Which error message do we show Bob? Should Alice deleting a project member also send out a notification email? | |
| | 14. Invite after removing | Alice removes Bob from her project and after that sends out a new invite. | |
| | 15. Invitation email - special scenario | Bob creates a user with the normal signup process, Bob doesn't confirm the activation email, Alice sends an invite. Which email do we send? According to the google doc we would send the account creation email but that shouldn't work here because there is already an account in our DB just not activated yet. Maybe just login the user and show him the invite instead of the signup process | |
| | 16. Invite rejection after creating acc | User create account but reject invite. Should they see an empty All project Dashboard? | |
| Billing | 17. Billing estimation | Only Owner can see billing estimation, member can't. Security -> try send API request for estimation https://satellite.qa.storj.io/api/v0/payments/account/charges?from=1680307200&to=168207756 with Member's token | |
| | 18. Invoices | Project is added to invoice only for Owner, not for member | |
| Functional | 19. Search | Search by name & email fields | |
| | 20. Sorting | Sort by name, date added (email?) | |
| | 21. Paginator | Amount of pages should be calculated correct | |
| | 22. Drop-list for chosing amount of rows | Check when change rows amount -> amount of pages changes | |
| | 23. Remove user 2 ways | Should be the same behaviour with user email confirmation | |
| | 24. Resend invite 2 ways | Should be called the same endpoints for inviting users | |

4
go.mod
View File

@ -63,8 +63,8 @@ require (
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/drpc v0.0.33
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
storj.io/private v0.0.0-20230614131149-2ffd1635adea
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33
)
require (

8
go.sum
View File

@ -1022,7 +1022,7 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=

View File

@ -6,16 +6,16 @@ package version
import _ "unsafe" // needed for go:linkname
//go:linkname buildTimestamp storj.io/private/version.buildTimestamp
var buildTimestamp string
var buildTimestamp string = "1687783565"
//go:linkname buildCommitHash storj.io/private/version.buildCommitHash
var buildCommitHash string
var buildCommitHash string = "e1f8434a03290f36202d40a3f887da1e4dc68ee5"
//go:linkname buildVersion storj.io/private/version.buildVersion
var buildVersion string
var buildVersion string = "v1.82.1"
//go:linkname buildRelease storj.io/private/version.buildRelease
var buildRelease string
var buildRelease string = "true"
// ensure that linter understands that the variables are being used.
func init() { use(buildTimestamp, buildCommitHash, buildVersion, buildRelease) }

View File

@ -1098,8 +1098,8 @@ func TestProjectUsage_BandwidthDeadAllocation(t *testing.T) {
total, err := io.ReadFull(reader, p)
require.NoError(t, err)
require.Equal(t, total, len(p))
require.NoError(t, reader.Close())
require.NoError(t, cleanFn())
require.NoError(t, reader.Close())
planet.Satellites[0].Orders.Chore.Loop.TriggerWait()

View File

@ -422,7 +422,6 @@ A successful response body:
},
"owner": {
"id": "12345678-1234-1234-1234-123456789abc",
"fullName": "test user",
"email": "bob@example.test",
"paidTier": true
}

View File

@ -164,7 +164,6 @@ func (server *Server) getAPIKey(w http.ResponseWriter, r *http.Request) {
}
type ownerData struct {
ID uuid.UUID `json:"id"`
FullName string `json:"fullName"`
Email string `json:"email"`
PaidTier bool `json:"paidTier"`
}
@ -184,10 +183,8 @@ func (server *Server) getAPIKey(w http.ResponseWriter, r *http.Request) {
Name: project.Name,
},
Owner: ownerData{
ID: user.ID,
FullName: user.FullName,
Email: user.Email,
PaidTier: user.PaidTier,
ID: user.ID,
Email: user.Email,
},
})
if err != nil {

View File

@ -264,36 +264,9 @@ func TestAPIKeyManagementGet(t *testing.T) {
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
user, err := planet.Satellites[0].AddUser(ctx, console.CreateUser{
FullName: "testuser123",
Email: "test@email.com",
}, 1)
require.NoError(t, err)
project, err := planet.Satellites[0].AddProject(ctx, user.ID, "testproject")
require.NoError(t, err)
secret, err := macaroon.NewSecret()
require.NoError(t, err)
apiKey, err := macaroon.NewAPIKey(secret)
require.NoError(t, err)
apiKeyInfo, err := planet.Satellites[0].DB.Console().APIKeys().Create(ctx, apiKey.Head(), console.APIKeyInfo{
Name: "testkey",
ProjectID: project.ID,
Secret: secret,
})
require.NoError(t, err)
userCtx, err := planet.Satellites[0].UserContext(ctx, user.ID)
require.NoError(t, err)
_, err = planet.Satellites[0].API.Console.Service.Payments().AddCreditCard(userCtx, "test")
require.NoError(t, err)
address := planet.Satellites[0].Admin.Admin.Listener.Addr()
link := fmt.Sprintf("http://"+address.String()+"/api/apikeys/%s", apiKey.Serialize())
apikey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
link := fmt.Sprintf("http://"+address.String()+"/api/apikeys/%s", apikey.Serialize())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, link, nil)
require.NoError(t, err)
@ -315,7 +288,6 @@ func TestAPIKeyManagementGet(t *testing.T) {
}
type ownerData struct {
ID uuid.UUID `json:"id"`
FullName string `json:"fullName"`
Email string `json:"email"`
PaidTier bool `json:"paidTier"`
}
@ -328,21 +300,29 @@ func TestAPIKeyManagementGet(t *testing.T) {
var apiResp response
require.NoError(t, json.NewDecoder(resp.Body).Decode(&apiResp))
apiKeyInfo, err := planet.Satellites[0].DB.Console().APIKeys().GetByHead(ctx, apikey.Head())
require.NoError(t, err)
project, err := planet.Satellites[0].DB.Console().Projects().Get(ctx, apiKeyInfo.ProjectID)
require.NoError(t, err)
owner, err := planet.Satellites[0].DB.Console().Users().Get(ctx, project.OwnerID)
require.NoError(t, err)
require.Equal(t, response{
APIKey: apiKeyData{
ID: apiKeyInfo.ID,
Name: "testkey",
Name: apiKeyInfo.Name,
CreatedAt: apiKeyInfo.CreatedAt.UTC(),
},
Project: projectData{
ID: project.ID,
Name: "testproject",
Name: project.Name,
},
Owner: ownerData{
ID: user.ID,
FullName: "testuser123",
Email: "test@email.com",
PaidTier: true,
ID: owner.ID,
Email: owner.Email,
PaidTier: owner.PaidTier,
},
}, apiResp)
})

View File

@ -88,10 +88,6 @@ const (
eventExpiredCreditRemoved = "Expired Credit Removed"
eventProjectInvitationAccepted = "Project Invitation Accepted"
eventProjectInvitationDeclined = "Project Invitation Declined"
eventGalleryViewClicked = "Gallery View Clicked"
eventResendInviteClicked = "Resend Invite Clicked"
eventCopyInviteLinkClicked = "Copy Invite Link Clicked"
eventRemoveProjectMemberCLicked = "Remove Member Clicked"
)
var (
@ -160,8 +156,7 @@ func NewService(log *zap.Logger, config Config, satelliteName string) *Service {
eventSeePaymentsClicked, eventEditPaymentMethodClicked, eventUsageDetailedInfoClicked, eventAddNewPaymentMethodClicked,
eventApplyNewCouponClicked, eventCreditCardRemoved, eventCouponCodeApplied, eventInvoiceDownloaded, eventCreditCardAddedFromBilling,
eventStorjTokenAddedFromBilling, eventAddFundsClicked, eventProjectMembersInviteSent, eventError, eventProjectNameUpdated, eventProjectDescriptionUpdated,
eventProjectStorageLimitUpdated, eventProjectBandwidthLimitUpdated, eventProjectInvitationAccepted, eventProjectInvitationDeclined,
eventGalleryViewClicked, eventResendInviteClicked, eventRemoveProjectMemberCLicked, eventCopyInviteLinkClicked} {
eventProjectStorageLimitUpdated, eventProjectBandwidthLimitUpdated, eventProjectInvitationAccepted, eventProjectInvitationDeclined} {
service.clientEvents[name] = true
}
@ -468,7 +463,7 @@ func (service *Service) TrackAccountVerified(userID uuid.UUID, email string) {
// TrackEvent sends an arbitrary event associated with user ID to Segment.
// It is used for tracking occurrences of client-side events.
func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email string, customProps map[string]string) {
func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email string) {
if !service.config.Enabled {
return
}
@ -482,10 +477,6 @@ func (service *Service) TrackEvent(eventName string, userID uuid.UUID, email str
props := segment.NewProperties()
props.Set("email", email)
for key, value := range customProps {
props.Set(key, value)
}
service.enqueueMessage(segment.Track{
UserId: userID.String(),
Event: service.satelliteName + " " + eventName,

View File

@ -48,7 +48,7 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
satellitePeer = planet.Satellites[0]
nodeID = planet.StorageNodes[0].ID()
report = audit.Report{
Fails: metabase.Pieces{{StorageNode: nodeID}},
Fails: storj.NodeIDList{nodeID},
}
)
satellitePeer.Audit.Worker.Loop.Pause()

View File

@ -11,7 +11,6 @@ import (
"go.uber.org/zap"
"storj.io/common/storj"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
)
@ -23,7 +22,6 @@ type reporter struct {
log *zap.Logger
reputations *reputation.Service
overlay *overlay.Service
metabase *metabase.DB
containment Containment
maxRetries int
maxReverifyCount int32
@ -42,10 +40,8 @@ type Reporter interface {
// succeeded, failed, were offline, have pending audits, or failed for unknown
// reasons and their current reputation status.
type Report struct {
Segment *metabase.Segment
Successes storj.NodeIDList
Fails metabase.Pieces
Fails storj.NodeIDList
Offlines storj.NodeIDList
PendingAudits []*ReverificationJob
Unknown storj.NodeIDList
@ -53,12 +49,11 @@ type Report struct {
}
// NewReporter instantiates a reporter.
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, metabase *metabase.DB, containment Containment, maxRetries int, maxReverifyCount int32) Reporter {
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) Reporter {
return &reporter{
log: log,
reputations: reputations,
overlay: overlay,
metabase: metabase,
containment: containment,
maxRetries: maxRetries,
maxReverifyCount: maxReverifyCount,
@ -77,11 +72,7 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
offlines := req.Offlines
pendingAudits := req.PendingAudits
logger := reporter.log
if req.Segment != nil {
logger = logger.With(zap.Stringer("stream ID", req.Segment.StreamID), zap.Uint64("position", req.Segment.Position.Encode()))
}
logger.Debug("Reporting audits",
reporter.log.Debug("Reporting audits",
zap.Int("successes", len(successes)),
zap.Int("failures", len(fails)),
zap.Int("unknowns", len(unknowns)),
@ -111,8 +102,8 @@ func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess)
reportFailures(tries, "successful", err, successes, nil)
fails, err = reporter.recordFailedAudits(ctx, req.Segment, fails, nodesReputation)
reportFailures(tries, "failed", err, nil, nil)
fails, err = reporter.recordAuditStatus(ctx, fails, nodesReputation, reputation.AuditFailure)
reportFailures(tries, "failed", err, fails, nil)
unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown)
reportFailures(tries, "unknown", err, unknowns, nil)
offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline)
@ -133,7 +124,7 @@ func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.N
err = reporter.reputations.ApplyAudit(ctx, nodeID, nodesReputation[nodeID], auditOutcome)
if err != nil {
failed = append(failed, nodeID)
errors.Add(Error.New("failed to record audit status %s in overlay for node %s: %w", auditOutcome.String(), nodeID, err))
errors.Add(Error.New("failed to record audit status %s in overlay for node %s: %w", auditOutcome.String(), nodeID.String(), err))
}
}
return failed, errors.Err()
@ -191,50 +182,6 @@ func (reporter *reporter) recordPendingAudits(ctx context.Context, pendingAudits
return nil, nil
}
const maxPiecesToRemoveAtOnce = 6
// recordFailedAudits performs reporting and response to hard-failed audits. Failed audits generally
// mean the piece is gone. Remove the pieces from the relevant pointers so that the segment can be
// repaired if appropriate, and so that we don't continually dock reputation for the same missing
// piece(s).
func (reporter *reporter) recordFailedAudits(ctx context.Context, segment *metabase.Segment, failures []metabase.Piece, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failedToRecord []metabase.Piece, err error) {
defer mon.Task()(&ctx)(&err)
piecesToRemove := make(metabase.Pieces, 0, len(failures))
var errors errs.Group
for _, f := range failures {
err = reporter.reputations.ApplyAudit(ctx, f.StorageNode, nodesReputation[f.StorageNode], reputation.AuditFailure)
if err != nil {
failedToRecord = append(failedToRecord, f)
errors.Add(Error.New("failed to record audit failure in overlay for node %s: %w", f.StorageNode, err))
}
piecesToRemove = append(piecesToRemove, f)
}
if segment != nil {
// Safety check. If, say, 30 pieces all started having audit failures at the same time, the
// problem is more likely with the audit system itself and not with the pieces.
if len(piecesToRemove) > maxPiecesToRemoveAtOnce {
reporter.log.Error("cowardly refusing to remove large number of pieces for failed audit",
zap.Int("piecesToRemove", len(piecesToRemove)),
zap.Int("threshold", maxPiecesToRemoveAtOnce))
return failedToRecord, errors.Err()
}
pieces, err := segment.Pieces.Remove(piecesToRemove)
if err != nil {
errors.Add(err)
return failedToRecord, errors.Err()
}
errors.Add(reporter.metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
StreamID: segment.StreamID,
Position: segment.Position,
OldPieces: segment.Pieces,
NewRedundancy: segment.Redundancy,
NewPieces: pieces,
}))
}
return failedToRecord, errors.Err()
}
func (reporter *reporter) ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error) {
defer mon.Task()(&ctx)(&err)
@ -267,26 +214,7 @@ func (reporter *reporter) RecordReverificationResult(ctx context.Context, pendin
report.Successes = append(report.Successes, pendingJob.Locator.NodeID)
keepInQueue = false
case OutcomeFailure:
// We have to look up the segment metainfo and pass it on to RecordAudits so that
// the segment can be modified (removing this piece). We don't persist this
// information through the reverification queue.
segmentInfo, err := reporter.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
StreamID: pendingJob.Locator.StreamID,
Position: pendingJob.Locator.Position,
})
if err != nil {
reporter.log.Error("could not look up segment after audit reverification",
zap.Stringer("stream ID", pendingJob.Locator.StreamID),
zap.Uint64("position", pendingJob.Locator.Position.Encode()),
zap.Error(err),
)
} else {
report.Segment = &segmentInfo
}
report.Fails = append(report.Fails, metabase.Piece{
StorageNode: pendingJob.Locator.NodeID,
Number: uint16(pendingJob.Locator.PieceNum),
})
report.Fails = append(report.Fails, pendingJob.Locator.NodeID)
keepInQueue = false
case OutcomeTimedOut:
// This will get re-added to the reverification queue, but that is idempotent

View File

@ -11,14 +11,11 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
)
@ -101,7 +98,7 @@ func TestRecordAuditsCorrectOutcome(t *testing.T) {
report := audit.Report{
Successes: []storj.NodeID{goodNode},
Fails: metabase.Pieces{{StorageNode: dqNode}},
Fails: []storj.NodeID{dqNode},
Unknown: []storj.NodeID{suspendedNode},
PendingAudits: []*audit.ReverificationJob{
{
@ -216,7 +213,7 @@ func TestGracefullyExitedNotUpdated(t *testing.T) {
}
report = audit.Report{
Successes: storj.NodeIDList{successNode.ID()},
Fails: metabase.Pieces{{StorageNode: failedNode.ID()}},
Fails: storj.NodeIDList{failedNode.ID()},
Offlines: storj.NodeIDList{offlineNode.ID()},
PendingAudits: []*audit.ReverificationJob{&pending},
Unknown: storj.NodeIDList{unknownNode.ID()},
@ -264,52 +261,3 @@ func TestReportOfflineAudits(t *testing.T) {
require.EqualValues(t, 0, info.UnknownAuditReputationBeta)
})
}
func TestReportingAuditFailureResultsInRemovalOfPiece(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
func(log *zap.Logger, index int, config *satellite.Config) {
// disable reputation write cache so changes are immediate
config.Reputation.FlushInterval = 0
},
testplanet.ReconfigureRS(4, 5, 6, 6),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
ul := planet.Uplinks[0]
testData := testrand.Bytes(1 * memory.MiB)
err := ul.Upload(ctx, satellite, "bucket-for-test", "path/of/testness", testData)
require.NoError(t, err)
segment, _ := getRemoteSegment(ctx, t, satellite, ul.Projects[0].ID, "bucket-for-test")
report := audit.Report{
Segment: &segment,
Fails: metabase.Pieces{
metabase.Piece{
Number: segment.Pieces[0].Number,
StorageNode: segment.Pieces[0].StorageNode,
},
},
}
satellite.Audit.Reporter.RecordAudits(ctx, report)
// piece marked as failed is no longer in the segment
afterSegment, _ := getRemoteSegment(ctx, t, satellite, ul.Projects[0].ID, "bucket-for-test")
require.Len(t, afterSegment.Pieces, len(segment.Pieces)-1)
for i, p := range afterSegment.Pieces {
assert.NotEqual(t, segment.Pieces[0].Number, p.Number, i)
assert.NotEqual(t, segment.Pieces[0].StorageNode, p.StorageNode, i)
}
// segment is still retrievable
gotData, err := ul.Download(ctx, satellite, "bucket-for-test", "path/of/testness")
require.NoError(t, err)
require.Equal(t, testData, gotData)
})
}

View File

@ -130,7 +130,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
}
var offlineNodes storj.NodeIDList
var failedNodes metabase.Pieces
var failedNodes storj.NodeIDList
var unknownNodes storj.NodeIDList
containedNodes := make(map[int]storj.NodeID)
sharesToAudit := make(map[int]Share)
@ -206,10 +206,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
case RequestFailure:
if errs2.IsRPC(share.Error, rpcstatus.NotFound) {
// missing share
failedNodes = append(failedNodes, metabase.Piece{
Number: uint16(share.PieceNum),
StorageNode: share.NodeID,
})
failedNodes = append(failedNodes, share.NodeID)
errLogger.Info("Verify: piece not found (audit failed)")
continue
}
@ -261,7 +258,6 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
mon.Counter("could_not_verify_audit_shares").Inc(1) //mon:locked
verifier.log.Error("could not verify shares", zap.String("Segment", segmentInfoString(segment)), zap.Error(err))
return Report{
Segment: &segmentInfo,
Fails: failedNodes,
Offlines: offlineNodes,
Unknown: unknownNodes,
@ -272,10 +268,7 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
verifier.log.Info("Verify: share data altered (audit failed)",
zap.Stringer("Node ID", shares[pieceNum].NodeID),
zap.String("Segment", segmentInfoString(segment)))
failedNodes = append(failedNodes, metabase.Piece{
StorageNode: shares[pieceNum].NodeID,
Number: uint16(pieceNum),
})
failedNodes = append(failedNodes, shares[pieceNum].NodeID)
}
successNodes := getSuccessNodes(ctx, shares, failedNodes, offlineNodes, unknownNodes, containedNodes)
@ -283,7 +276,6 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
pendingAudits, err := createPendingAudits(ctx, containedNodes, segment)
if err != nil {
return Report{
Segment: &segmentInfo,
Successes: successNodes,
Fails: failedNodes,
Offlines: offlineNodes,
@ -292,7 +284,6 @@ func (verifier *Verifier) Verify(ctx context.Context, segment Segment, skip map[
}
return Report{
Segment: &segmentInfo,
Successes: successNodes,
Fails: failedNodes,
Offlines: offlineNodes,
@ -551,11 +542,11 @@ func getOfflineNodes(segment metabase.Segment, limits []*pb.AddressedOrderLimit,
}
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit.
func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes metabase.Pieces, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
defer mon.Task()(&ctx)(nil)
fails := make(map[storj.NodeID]bool)
for _, fail := range failedNodes {
fails[fail.StorageNode] = true
fails[fail] = true
}
for _, offline := range offlineNodes {
fails[offline] = true

View File

@ -968,15 +968,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
assert.Len(t, report.Successes, origNumPieces-1)
require.Len(t, report.Fails, 1)
assert.Equal(t, metabase.Piece{
StorageNode: piece.StorageNode,
Number: piece.Number,
}, report.Fails[0])
require.NotNil(t, report.Segment)
assert.Equal(t, segment.StreamID, report.Segment.StreamID)
assert.Equal(t, segment.Position, report.Segment.Position)
assert.Equal(t, segment.Redundancy, report.Segment.Redundancy)
assert.Equal(t, segment.Pieces, report.Segment.Pieces)
assert.Equal(t, report.Fails[0], piece.StorageNode)
assert.Len(t, report.Offlines, 0)
require.Len(t, report.PendingAudits, 0)
})
@ -1204,15 +1196,7 @@ func TestAuditRepairedSegmentInExcludedCountries(t *testing.T) {
}, nil)
require.NoError(t, err)
require.Len(t, report.Fails, 1)
require.Equal(t, metabase.Piece{
StorageNode: lastPiece.StorageNode,
Number: lastPiece.Number,
}, report.Fails[0])
require.NotNil(t, report.Segment)
assert.Equal(t, segmentAfterRepair.StreamID, report.Segment.StreamID)
assert.Equal(t, segmentAfterRepair.Position, report.Segment.Position)
assert.Equal(t, segmentAfterRepair.Redundancy, report.Segment.Redundancy)
assert.Equal(t, segmentAfterRepair.Pieces, report.Segment.Pieces)
require.Equal(t, report.Fails[0], lastPiece.StorageNode)
})
}

View File

@ -219,7 +219,6 @@ func NewAuditor(log *zap.Logger, full *identity.FullIdentity,
log.Named("reporter"),
peer.Reputation,
peer.Overlay,
metabaseDB,
containmentDB,
config.Audit.MaxRetriesStatDB,
int32(config.Audit.MaxReverifyCount))

View File

@ -23,9 +23,7 @@ type WebappSessions interface {
// DeleteAllByUserID deletes all webapp sessions by user ID.
DeleteAllByUserID(ctx context.Context, userID uuid.UUID) (int64, error)
// UpdateExpiration updates the expiration time of the session.
UpdateExpiration(ctx context.Context, sessionID uuid.UUID, expiresAt time.Time) error
// DeleteExpired deletes all sessions that have expired before the provided timestamp.
DeleteExpired(ctx context.Context, now time.Time, asOfSystemTimeInterval time.Duration, pageSize int) error
UpdateExpiration(ctx context.Context, sessionID uuid.UUID, expiresAt time.Time) (err error)
}
// WebappSession represents a session on the satellite web app.

View File

@ -250,10 +250,8 @@ func TestSetPermission_Uplink(t *testing.T) {
require.NoError(t, err)
err = upload.Commit()
require.True(t, errors.Is(err, uplink.ErrPermissionDenied))
download, err := project.DownloadObject(ctx, testbucket1, testfilename1, nil)
_, err = project.DownloadObject(ctx, testbucket1, testfilename1, nil)
require.NoError(t, err)
require.NoError(t, download.Close())
// Only one bucket should be visible
buckets := getAllBuckets(ctx, project)
@ -359,9 +357,8 @@ func TestSetPermission_Uplink(t *testing.T) {
objects := getAllObjects(ctx, project, testbucket3)
require.Equal(t, 1, len(objects))
download, err := project.DownloadObject(ctx, testbucket3, testfilename2, nil)
_, err = project.DownloadObject(ctx, testbucket3, testfilename2, nil)
require.NoError(t, err)
require.NoError(t, download.Close())
_, err = project.DeleteBucketWithObjects(ctx, testbucket3)
require.NoError(t, err)

View File

@ -36,10 +36,9 @@ func NewAnalytics(log *zap.Logger, service *console.Service, a *analytics.Servic
}
type eventTriggeredBody struct {
EventName string `json:"eventName"`
Link string `json:"link"`
ErrorEventSource string `json:"errorEventSource"`
Props map[string]string `json:"props"`
EventName string `json:"eventName"`
Link string `json:"link"`
ErrorEventSource string `json:"errorEventSource"`
}
type pageVisitBody struct {
@ -73,7 +72,7 @@ func (a *Analytics) EventTriggered(w http.ResponseWriter, r *http.Request) {
} else if et.Link != "" {
a.analytics.TrackLinkEvent(et.EventName, user.ID, user.Email, et.Link)
} else {
a.analytics.TrackEvent(et.EventName, user.ID, user.Email, et.Props)
a.analytics.TrackEvent(et.EventName, user.ID, user.Email)
}
w.WriteHeader(http.StatusOK)
}

View File

@ -31,6 +31,12 @@ var (
// errNotImplemented is the error value used by handlers of this package to
// response with status Not Implemented.
errNotImplemented = errs.New("not implemented")
// supportedCORSOrigins allows us to support visitors who sign up from the website.
supportedCORSOrigins = map[string]bool{
"https://storj.io": true,
"https://www.storj.io": true,
}
)
// Auth is an api controller that exposes all auth functionality.
@ -204,6 +210,19 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
var err error
defer mon.Task()(&ctx)(&err)
origin := r.Header.Get("Origin")
if supportedCORSOrigins[origin] {
// we should send the exact origin back, rather than a wildcard
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
// OPTIONS is a pre-flight check for cross-origin (CORS) permissions
if r.Method == "OPTIONS" {
return
}
var registerData struct {
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
@ -333,7 +352,7 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
FullName: user.FullName,
Email: user.Email,
Type: analytics.Personal,
OriginHeader: r.Header.Get("Origin"),
OriginHeader: origin,
Referrer: referrer,
HubspotUTK: hubspotUTK,
UserAgent: string(user.UserAgent),
@ -446,7 +465,6 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
Email string `json:"email"`
Partner string `json:"partner"`
ProjectLimit int `json:"projectLimit"`
ProjectStorageLimit int64 `json:"projectStorageLimit"`
IsProfessional bool `json:"isProfessional"`
Position string `json:"position"`
CompanyName string `json:"companyName"`
@ -472,7 +490,6 @@ func (a *Auth) GetAccount(w http.ResponseWriter, r *http.Request) {
user.Partner = string(consoleUser.UserAgent)
}
user.ProjectLimit = consoleUser.ProjectLimit
user.ProjectStorageLimit = consoleUser.ProjectStorageLimit
user.IsProfessional = consoleUser.IsProfessional
user.CompanyName = consoleUser.CompanyName
user.Position = consoleUser.Position

View File

@ -107,6 +107,103 @@ func TestAuth_Register(t *testing.T) {
})
}
func TestAuth_Register_CORS(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Console.OpenRegistrationEnabled = true
config.Console.RateLimit.Burst = 10
config.Mail.AuthType = "nomail"
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
email := "user@test.com"
fullName := "testuser"
jsonBody := []byte(fmt.Sprintf(`{"email":"%s","fullName":"%s","password":"abc123","shortName":"test"}`, email, fullName))
url := planet.Satellites[0].ConsoleURL() + "/api/v0/auth/register"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonBody))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
// 1. OPTIONS request
// 1.1 CORS headers should not be set with origin other than storj.io or www.storj.io
req.Header.Set("Origin", "https://someexternalorigin.test")
req.Method = http.MethodOptions
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Methods"))
require.Equal(t, "", resp.Header.Get("Access-Control-Allow-Headers"))
require.NoError(t, resp.Body.Close())
// 1.2 CORS headers should be set with a domain of storj.io
req.Header.Set("Origin", "https://storj.io")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders := strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.NoError(t, resp.Body.Close())
// 1.3 CORS headers should be set with a domain of www.storj.io
req.Header.Set("Origin", "https://www.storj.io")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://www.storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders = strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.NoError(t, resp.Body.Close())
// 2. POST request with origin www.storj.io
req.Method = http.MethodPost
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
defer func() {
err = resp.Body.Close()
require.NoError(t, err)
}()
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, "https://www.storj.io", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, "POST, OPTIONS", resp.Header.Get("Access-Control-Allow-Methods"))
allowedHeaders = strings.Split(resp.Header.Get("Access-Control-Allow-Headers"), ", ")
require.ElementsMatch(t, allowedHeaders, []string{
"Content-Type",
"Content-Length",
"Accept",
"Accept-Encoding",
"X-CSRF-Token",
"Authorization",
})
require.Len(t, planet.Satellites, 1)
// this works only because we configured 'nomail' above. Mail send simulator won't click to activation link.
_, users, err := planet.Satellites[0].API.Console.Service.GetUserByEmailWithUnverified(ctx, email)
require.NoError(t, err)
require.Len(t, users, 1)
require.Equal(t, fullName, users[0].FullName)
})
}
func TestDeleteAccount(t *testing.T) {
ctx := testcontext.New(t)
log := testplanet.NewLogger(t)

View File

@ -870,7 +870,7 @@ func TestWrongUser(t *testing.T) {
}`}))
require.Contains(t, body, "not authorized")
// TODO: wrong error code
require.Equal(t, http.StatusUnauthorized, resp.StatusCode)
require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
}
{ // get bucket usages

View File

@ -132,7 +132,6 @@ type Server struct {
listener net.Listener
server http.Server
router *mux.Router
cookieAuth *consolewebauth.CookieAuth
ipRateLimiter *web.RateLimiter
userIDRateLimiter *web.RateLimiter
@ -240,7 +239,6 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
}
router := mux.NewRouter()
server.router = router
// N.B. This middleware has to be the first one because it has to be called
// the earliest in the HTTP chain.
router.Use(newTraceRequestMiddleware(logger, router))
@ -254,104 +252,95 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
consoleapi.NewUserManagement(logger, mon, server.service, router, &apiAuth{&server})
}
router.Handle("/api/v0/config", server.withCORS(http.HandlerFunc(server.frontendConfigHandler)))
router.HandleFunc("/api/v0/config", server.frontendConfigHandler)
router.Handle("/api/v0/graphql", server.withCORS(server.withAuth(http.HandlerFunc(server.graphqlHandler))))
router.Handle("/api/v0/graphql", server.withAuth(http.HandlerFunc(server.graphqlHandler)))
router.HandleFunc("/registrationToken/", server.createRegistrationTokenHandler)
router.HandleFunc("/robots.txt", server.seoHandler)
projectsController := consoleapi.NewProjects(logger, service)
projectsRouter := router.PathPrefix("/api/v0/projects").Subrouter()
projectsRouter.Use(server.withCORS)
projectsRouter.Use(server.withAuth)
projectsRouter.Handle("/{id}/salt", http.HandlerFunc(projectsController.GetSalt)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/{id}/invite", http.HandlerFunc(projectsController.InviteUsers)).Methods(http.MethodPost, http.MethodOptions)
projectsRouter.Handle("/{id}/invite-link", http.HandlerFunc(projectsController.GetInviteLink)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/invitations", http.HandlerFunc(projectsController.GetUserInvitations)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/invitations/{id}/respond", http.HandlerFunc(projectsController.RespondToInvitation)).Methods(http.MethodPost, http.MethodOptions)
projectsRouter.Handle("/{id}/salt", server.withAuth(http.HandlerFunc(projectsController.GetSalt))).Methods(http.MethodGet)
projectsRouter.Handle("/{id}/invite", server.withAuth(http.HandlerFunc(projectsController.InviteUsers))).Methods(http.MethodPost)
projectsRouter.Handle("/{id}/invite-link", server.withAuth(http.HandlerFunc(projectsController.GetInviteLink))).Methods(http.MethodGet)
projectsRouter.Handle("/invitations", server.withAuth(http.HandlerFunc(projectsController.GetUserInvitations))).Methods(http.MethodGet)
projectsRouter.Handle("/invitations/{id}/respond", server.withAuth(http.HandlerFunc(projectsController.RespondToInvitation))).Methods(http.MethodPost)
usageLimitsController := consoleapi.NewUsageLimits(logger, service)
projectsRouter.Handle("/{id}/usage-limits", http.HandlerFunc(usageLimitsController.ProjectUsageLimits)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/usage-limits", http.HandlerFunc(usageLimitsController.TotalUsageLimits)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/{id}/daily-usage", http.HandlerFunc(usageLimitsController.DailyUsage)).Methods(http.MethodGet, http.MethodOptions)
projectsRouter.Handle("/{id}/usage-limits", server.withAuth(http.HandlerFunc(usageLimitsController.ProjectUsageLimits))).Methods(http.MethodGet)
projectsRouter.Handle("/usage-limits", server.withAuth(http.HandlerFunc(usageLimitsController.TotalUsageLimits))).Methods(http.MethodGet)
projectsRouter.Handle("/{id}/daily-usage", server.withAuth(http.HandlerFunc(usageLimitsController.DailyUsage))).Methods(http.MethodGet)
authController := consoleapi.NewAuth(logger, service, accountFreezeService, mailService, server.cookieAuth, server.analytics, config.SatelliteName, server.config.ExternalAddress, config.LetUsKnowURL, config.TermsAndConditionsURL, config.ContactInfoURL, config.GeneralRequestURL)
authRouter := router.PathPrefix("/api/v0/auth").Subrouter()
authRouter.Use(server.withCORS)
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.GetAccount))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.UpdateAccount))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/account/change-email", server.withAuth(http.HandlerFunc(authController.ChangeEmail))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/account/change-password", server.withAuth(server.userIDRateLimiter.Limit(http.HandlerFunc(authController.ChangePassword)))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/account/freezestatus", server.withAuth(http.HandlerFunc(authController.GetFreezeStatus))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.GetUserSettings))).Methods(http.MethodGet, http.MethodOptions)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.SetUserSettings))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/account/onboarding", server.withAuth(http.HandlerFunc(authController.SetOnboardingStatus))).Methods(http.MethodPatch, http.MethodOptions)
authRouter.Handle("/account/delete", server.withAuth(http.HandlerFunc(authController.DeleteAccount))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/enable", server.withAuth(http.HandlerFunc(authController.EnableUserMFA))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/disable", server.withAuth(http.HandlerFunc(authController.DisableUserMFA))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/generate-secret-key", server.withAuth(http.HandlerFunc(authController.GenerateMFASecretKey))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/mfa/generate-recovery-codes", server.withAuth(http.HandlerFunc(authController.GenerateMFARecoveryCodes))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/logout", server.withAuth(http.HandlerFunc(authController.Logout))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/token", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Token))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/token-by-api-key", server.ipRateLimiter.Limit(http.HandlerFunc(authController.TokenByAPIKey))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.GetAccount))).Methods(http.MethodGet)
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.UpdateAccount))).Methods(http.MethodPatch)
authRouter.Handle("/account/change-email", server.withAuth(http.HandlerFunc(authController.ChangeEmail))).Methods(http.MethodPost)
authRouter.Handle("/account/change-password", server.withAuth(server.userIDRateLimiter.Limit(http.HandlerFunc(authController.ChangePassword)))).Methods(http.MethodPost)
authRouter.Handle("/account/freezestatus", server.withAuth(http.HandlerFunc(authController.GetFreezeStatus))).Methods(http.MethodGet)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.GetUserSettings))).Methods(http.MethodGet)
authRouter.Handle("/account/settings", server.withAuth(http.HandlerFunc(authController.SetUserSettings))).Methods(http.MethodPatch)
authRouter.Handle("/account/onboarding", server.withAuth(http.HandlerFunc(authController.SetOnboardingStatus))).Methods(http.MethodPatch)
authRouter.Handle("/account/delete", server.withAuth(http.HandlerFunc(authController.DeleteAccount))).Methods(http.MethodPost)
authRouter.Handle("/mfa/enable", server.withAuth(http.HandlerFunc(authController.EnableUserMFA))).Methods(http.MethodPost)
authRouter.Handle("/mfa/disable", server.withAuth(http.HandlerFunc(authController.DisableUserMFA))).Methods(http.MethodPost)
authRouter.Handle("/mfa/generate-secret-key", server.withAuth(http.HandlerFunc(authController.GenerateMFASecretKey))).Methods(http.MethodPost)
authRouter.Handle("/mfa/generate-recovery-codes", server.withAuth(http.HandlerFunc(authController.GenerateMFARecoveryCodes))).Methods(http.MethodPost)
authRouter.Handle("/logout", server.withAuth(http.HandlerFunc(authController.Logout))).Methods(http.MethodPost)
authRouter.Handle("/token", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Token))).Methods(http.MethodPost)
authRouter.Handle("/token-by-api-key", server.ipRateLimiter.Limit(http.HandlerFunc(authController.TokenByAPIKey))).Methods(http.MethodPost)
authRouter.Handle("/register", server.ipRateLimiter.Limit(http.HandlerFunc(authController.Register))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/forgot-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ForgotPassword))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/resend-email/{email}", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResendEmail))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/reset-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResetPassword))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/refresh-session", server.withAuth(http.HandlerFunc(authController.RefreshSession))).Methods(http.MethodPost, http.MethodOptions)
authRouter.Handle("/forgot-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ForgotPassword))).Methods(http.MethodPost)
authRouter.Handle("/resend-email/{email}", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResendEmail))).Methods(http.MethodPost)
authRouter.Handle("/reset-password", server.ipRateLimiter.Limit(http.HandlerFunc(authController.ResetPassword))).Methods(http.MethodPost)
authRouter.Handle("/refresh-session", server.withAuth(http.HandlerFunc(authController.RefreshSession))).Methods(http.MethodPost)
if config.ABTesting.Enabled {
abController := consoleapi.NewABTesting(logger, abTesting)
abRouter := router.PathPrefix("/api/v0/ab").Subrouter()
abRouter.Use(server.withCORS)
abRouter.Use(server.withAuth)
abRouter.Handle("/values", http.HandlerFunc(abController.GetABValues)).Methods(http.MethodGet, http.MethodOptions)
abRouter.Handle("/hit/{action}", http.HandlerFunc(abController.SendHit)).Methods(http.MethodPost, http.MethodOptions)
abRouter.Handle("/values", server.withAuth(http.HandlerFunc(abController.GetABValues))).Methods(http.MethodGet)
abRouter.Handle("/hit/{action}", server.withAuth(http.HandlerFunc(abController.SendHit))).Methods(http.MethodPost)
}
paymentController := consoleapi.NewPayments(logger, service, accountFreezeService, packagePlans)
paymentsRouter := router.PathPrefix("/api/v0/payments").Subrouter()
paymentsRouter.Use(server.withCORS)
paymentsRouter.Use(server.withAuth)
paymentsRouter.Handle("/cards", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.AddCreditCard))).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/cards", paymentController.MakeCreditCardDefault).Methods(http.MethodPatch, http.MethodOptions)
paymentsRouter.HandleFunc("/cards", paymentController.ListCreditCards).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/cards/{cardId}", paymentController.RemoveCreditCard).Methods(http.MethodDelete, http.MethodOptions)
paymentsRouter.HandleFunc("/account/charges", paymentController.ProjectsCharges).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/account/balance", paymentController.AccountBalance).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/account", paymentController.SetupAccount).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch, http.MethodOptions)
paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/pricing", paymentController.GetProjectUsagePriceModel).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.Handle("/cards", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.AddCreditCard))).Methods(http.MethodPost)
paymentsRouter.HandleFunc("/cards", paymentController.MakeCreditCardDefault).Methods(http.MethodPatch)
paymentsRouter.HandleFunc("/cards", paymentController.ListCreditCards).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/cards/{cardId}", paymentController.RemoveCreditCard).Methods(http.MethodDelete)
paymentsRouter.HandleFunc("/account/charges", paymentController.ProjectsCharges).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/account/balance", paymentController.AccountBalance).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/account", paymentController.SetupAccount).Methods(http.MethodPost)
paymentsRouter.HandleFunc("/wallet", paymentController.GetWallet).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/wallet", paymentController.ClaimWallet).Methods(http.MethodPost)
paymentsRouter.HandleFunc("/wallet/payments", paymentController.WalletPayments).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet)
paymentsRouter.Handle("/coupon/apply", server.userIDRateLimiter.Limit(http.HandlerFunc(paymentController.ApplyCouponCode))).Methods(http.MethodPatch)
paymentsRouter.HandleFunc("/coupon", paymentController.GetCoupon).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/pricing", paymentController.GetProjectUsagePriceModel).Methods(http.MethodGet)
if config.PricingPackagesEnabled {
paymentsRouter.HandleFunc("/purchase-package", paymentController.PurchasePackage).Methods(http.MethodPost, http.MethodOptions)
paymentsRouter.HandleFunc("/package-available", paymentController.PackageAvailable).Methods(http.MethodGet, http.MethodOptions)
paymentsRouter.HandleFunc("/purchase-package", paymentController.PurchasePackage).Methods(http.MethodPost)
paymentsRouter.HandleFunc("/package-available", paymentController.PackageAvailable).Methods(http.MethodGet)
}
bucketsController := consoleapi.NewBuckets(logger, service)
bucketsRouter := router.PathPrefix("/api/v0/buckets").Subrouter()
bucketsRouter.Use(server.withCORS)
bucketsRouter.Use(server.withAuth)
bucketsRouter.HandleFunc("/bucket-names", bucketsController.AllBucketNames).Methods(http.MethodGet, http.MethodOptions)
bucketsRouter.HandleFunc("/bucket-names", bucketsController.AllBucketNames).Methods(http.MethodGet)
apiKeysController := consoleapi.NewAPIKeys(logger, service)
apiKeysRouter := router.PathPrefix("/api/v0/api-keys").Subrouter()
apiKeysRouter.Use(server.withCORS)
apiKeysRouter.Use(server.withAuth)
apiKeysRouter.HandleFunc("/delete-by-name", apiKeysController.DeleteByNameAndProjectID).Methods(http.MethodDelete, http.MethodOptions)
apiKeysRouter.HandleFunc("/api-key-names", apiKeysController.GetAllAPIKeyNames).Methods(http.MethodGet, http.MethodOptions)
apiKeysRouter.HandleFunc("/delete-by-name", apiKeysController.DeleteByNameAndProjectID).Methods(http.MethodDelete)
apiKeysRouter.HandleFunc("/api-key-names", apiKeysController.GetAllAPIKeyNames).Methods(http.MethodGet)
analyticsController := consoleapi.NewAnalytics(logger, service, server.analytics)
analyticsRouter := router.PathPrefix("/api/v0/analytics").Subrouter()
analyticsRouter.Use(server.withCORS)
analyticsRouter.Use(server.withAuth)
analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost, http.MethodOptions)
analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost, http.MethodOptions)
analyticsRouter.HandleFunc("/event", analyticsController.EventTriggered).Methods(http.MethodPost)
analyticsRouter.HandleFunc("/page", analyticsController.PageEventTriggered).Methods(http.MethodPost)
if server.config.StaticDir != "" {
oidc := oidc.NewEndpoint(
@ -367,7 +356,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
router.Handle("/oauth/v2/clients/{id}", server.withAuth(http.HandlerFunc(oidc.GetClient))).Methods(http.MethodGet)
fs := http.FileServer(http.Dir(server.config.StaticDir))
router.PathPrefix("/static/").Handler(server.withCORS(server.brotliMiddleware(http.StripPrefix("/static", fs))))
router.PathPrefix("/static/").Handler(server.brotliMiddleware(http.StripPrefix("/static", fs)))
router.HandleFunc("/invited", server.handleInvited)
@ -378,9 +367,9 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, oidc
slashRouter.HandleFunc("/cancel-password-recovery", server.cancelPasswordRecoveryHandler)
if server.config.UseVuetifyProject {
router.PathPrefix("/vuetifypoc").Handler(server.withCORS(http.HandlerFunc(server.vuetifyAppHandler)))
router.PathPrefix("/vuetifypoc").Handler(http.HandlerFunc(server.vuetifyAppHandler))
}
router.PathPrefix("/").Handler(server.withCORS(http.HandlerFunc(server.appHandler)))
router.PathPrefix("/").Handler(http.HandlerFunc(server.appHandler))
}
server.server = http.Server{
@ -517,29 +506,6 @@ func (server *Server) vuetifyAppHandler(w http.ResponseWriter, r *http.Request)
http.ServeContent(w, r, path, info.ModTime(), file)
}
// withCORS handles setting CORS-related headers on an http request.
func (server *Server) withCORS(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", strings.Trim(server.config.ExternalAddress, "/"))
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
w.Header().Set("Access-Control-Expose-Headers", "*, Authorization")
if r.Method == http.MethodOptions {
match := &mux.RouteMatch{}
if server.router.Match(r, match) {
methods, err := match.Route.GetMethods()
if err == nil && len(methods) > 0 {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(methods, ", "))
}
}
return
}
handler.ServeHTTP(w, r)
})
}
// withAuth performs initial authorization before every request.
func (server *Server) withAuth(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -776,43 +742,8 @@ func (server *Server) handleInvited(w http.ResponseWriter, r *http.Request) {
return
}
user, _, err := server.service.GetUserByEmailWithUnverified(ctx, invite.Email)
if err != nil && !console.ErrEmailNotFound.Has(err) {
server.log.Error("error getting invitation recipient", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
if user != nil {
http.Redirect(w, r, loginLink+"?email="+user.Email, http.StatusTemporaryRedirect)
return
}
params := url.Values{"email": {strings.ToLower(invite.Email)}}
if invite.InviterID != nil {
inviter, err := server.service.GetUser(ctx, *invite.InviterID)
if err != nil {
server.log.Error("error getting invitation sender", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
name := inviter.ShortName
if name == "" {
name = inviter.FullName
}
params.Add("inviter", name)
params.Add("inviter_email", inviter.Email)
}
proj, err := server.service.GetProjectNoAuth(ctx, invite.ProjectID)
if err != nil {
server.log.Error("error getting invitation project", zap.Error(err))
server.serveError(w, http.StatusInternalServerError)
return
}
params.Add("project", proj.Name)
http.Redirect(w, r, server.config.ExternalAddress+"signup?"+params.Encode(), http.StatusTemporaryRedirect)
email := strings.ToLower(invite.Email)
http.Redirect(w, r, loginLink+"?email="+email, http.StatusTemporaryRedirect)
}
// graphqlHandler is graphql endpoint http handler function.

View File

@ -85,15 +85,23 @@ func TestInvitedRouting(t *testing.T) {
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
sat := planet.Satellites[0]
service := sat.API.Console.Service
invitedEmail := "invited@mail.test"
owner, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Project Owner",
Email: "owner@mail.test",
user, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Test User",
Email: "u@mail.test",
}, 1)
require.NoError(t, err)
project, err := sat.AddProject(ctx, owner.ID, "Test Project")
user2, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Test User2",
Email: "u2@mail.test",
}, 1)
require.NoError(t, err)
ctx1, err := sat.UserContext(ctx, user.ID)
require.NoError(t, err)
project, err := sat.AddProject(ctx1, user.ID, "Test Project")
require.NoError(t, err)
client := http.Client{}
@ -120,34 +128,24 @@ func TestInvitedRouting(t *testing.T) {
loginURL := baseURL + "login"
invalidURL := loginURL + "?invite_invalid=true"
tokenInvalidProj, err := service.CreateInviteToken(ctx, project.ID, invitedEmail, time.Now())
tokenInvalidProj, err := service.CreateInviteToken(ctx, project.ID, user2.Email, time.Now())
require.NoError(t, err)
token, err := service.CreateInviteToken(ctx, project.PublicID, invitedEmail, time.Now())
token, err := service.CreateInviteToken(ctx, project.PublicID, user2.Email, time.Now())
require.NoError(t, err)
checkInvitedRedirect("Invited - Invalid projectID", invalidURL, tokenInvalidProj)
checkInvitedRedirect("Invited - User not invited", invalidURL, token)
ownerCtx, err := sat.UserContext(ctx, owner.ID)
require.NoError(t, err)
_, err = service.InviteProjectMembers(ownerCtx, project.ID, []string{invitedEmail})
_, err = service.InviteProjectMembers(ctx1, project.ID, []string{user2.Email})
require.NoError(t, err)
// Valid invite for nonexistent user should redirect to registration page with
// query parameters containing invitation information.
params := "email=invited%40mail.test&inviter=Project+Owner&inviter_email=owner%40mail.test&project=Test+Project"
checkInvitedRedirect("Invited - Nonexistent user", baseURL+"signup?"+params, token)
invitedUser, err := sat.AddUser(ctx, console.CreateUser{
FullName: "Invited User",
Email: invitedEmail,
}, 1)
token, err = service.CreateInviteToken(ctx, project.PublicID, user2.Email, time.Now())
require.NoError(t, err)
// valid invite should redirect to login page with email.
checkInvitedRedirect("Invited - User invited", loginURL+"?email="+invitedUser.Email, token)
checkInvitedRedirect("Invited - User invited", loginURL+"?email="+user2.Email, token)
})
}

View File

@ -24,7 +24,8 @@ type Config struct {
AsOfSystemTimeInterval time.Duration `help:"interval for 'AS OF SYSTEM TIME' clause (CockroachDB specific) to read from the DB at a specific time in the past" default:"-5m" testDefault:"0"`
PageSize int `help:"maximum number of database records to scan at once" default:"1000"`
MaxUnverifiedUserAge time.Duration `help:"maximum lifetime of unverified user account records" default:"168h"`
MaxUnverifiedUserAge time.Duration `help:"maximum lifetime of unverified user account records" default:"168h"`
MaxProjectInvitationAge time.Duration `help:"maximum lifetime of project member invitation records" default:"168h"`
}
// Chore periodically removes unwanted records from the satellite console database.
@ -55,9 +56,10 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
chore.log.Error("Error deleting unverified users", zap.Error(err))
}
err = chore.db.WebappSessions().DeleteExpired(ctx, time.Now(), chore.config.AsOfSystemTimeInterval, chore.config.PageSize)
before = time.Now().Add(-chore.config.MaxProjectInvitationAge)
err = chore.db.ProjectInvitations().DeleteBefore(ctx, before, chore.config.AsOfSystemTimeInterval, chore.config.PageSize)
if err != nil {
chore.log.Error("Error deleting expired webapp sessions", zap.Error(err))
chore.log.Error("Error deleting project member invitations", zap.Error(err))
}
return nil

View File

@ -14,16 +14,20 @@ import (
//
// architecture: Database
type ProjectInvitations interface {
// Upsert updates a project member invitation if it exists and inserts it otherwise.
Upsert(ctx context.Context, invite *ProjectInvitation) (*ProjectInvitation, error)
// Insert inserts a project member invitation into the database.
Insert(ctx context.Context, invite *ProjectInvitation) (*ProjectInvitation, error)
// Get returns a project member invitation from the database.
Get(ctx context.Context, projectID uuid.UUID, email string) (*ProjectInvitation, error)
// GetByProjectID returns all of the project member invitations for the project specified by the given ID.
GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]ProjectInvitation, error)
// GetByEmail returns all of the project member invitations for the specified email address.
GetByEmail(ctx context.Context, email string) ([]ProjectInvitation, error)
// Update updates the project member invitation specified by the given project ID and email address.
Update(ctx context.Context, projectID uuid.UUID, email string, request UpdateProjectInvitationRequest) (*ProjectInvitation, error)
// Delete removes a project member invitation from the database.
Delete(ctx context.Context, projectID uuid.UUID, email string) error
// DeleteBefore deletes project member invitations created prior to some time from the database.
DeleteBefore(ctx context.Context, before time.Time, asOfSystemTimeInterval time.Duration, pageSize int) error
}
// ProjectInvitation represents a pending project member invitation.
@ -33,3 +37,9 @@ type ProjectInvitation struct {
InviterID *uuid.UUID
CreatedAt time.Time
}
// UpdateProjectInvitationRequest contains all fields which may be updated by ProjectInvitations.Update.
type UpdateProjectInvitationRequest struct {
CreatedAt *time.Time
InviterID *uuid.UUID
}

View File

@ -1813,11 +1813,12 @@ func (s *Service) UpdateProject(ctx context.Context, projectID uuid.UUID, update
return nil, Error.Wrap(err)
}
_, project, err := s.isProjectOwner(ctx, user.ID, projectID)
isMember, err := s.isProjectMember(ctx, user.ID, projectID)
if err != nil {
return nil, Error.Wrap(err)
}
project := isMember.project
if updatedProject.Name != project.Name {
passesNameCheck, err := s.checkProjectName(ctx, updatedProject, user.ID)
if err != nil || !passesNameCheck {
@ -3544,6 +3545,7 @@ func (s *Service) RespondToProjectInvitation(ctx context.Context, projectID uuid
}
if s.IsProjectInvitationExpired(invite) {
deleteWithLog()
return ErrProjectInviteInvalid.New(projInviteInvalidErrMsg)
}
@ -3578,8 +3580,8 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
}
projectID = isMember.project.ID
var users []*User
var newUserEmails []string
// collect user querying errors
users := make([]*User, 0)
for _, email := range emails {
invitedUser, err := s.store.Users().GetByEmail(ctx, email)
if err == nil {
@ -3598,9 +3600,7 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
return nil, ErrProjectInviteActive.New(projInviteActiveErrMsg, invitedUser.Email)
}
users = append(users, invitedUser)
} else if errs.Is(err, sql.ErrNoRows) {
newUserEmails = append(newUserEmails, email)
} else {
} else if !errs.Is(err, sql.ErrNoRows) {
return nil, Error.Wrap(err)
}
}
@ -3608,20 +3608,30 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
inviteTokens := make(map[string]string)
// add project invites in transaction scope
err = s.store.WithTx(ctx, func(ctx context.Context, tx DBTx) error {
for _, email := range emails {
invite, err := tx.ProjectInvitations().Upsert(ctx, &ProjectInvitation{
for _, invited := range users {
invite, err := tx.ProjectInvitations().Insert(ctx, &ProjectInvitation{
ProjectID: projectID,
Email: email,
Email: invited.Email,
InviterID: &user.ID,
})
if err != nil {
return err
if !dbx.IsConstraintError(err) {
return err
}
now := time.Now()
invite, err = tx.ProjectInvitations().Update(ctx, projectID, invited.Email, UpdateProjectInvitationRequest{
CreatedAt: &now,
InviterID: &user.ID,
})
if err != nil {
return err
}
}
token, err := s.CreateInviteToken(ctx, isMember.project.PublicID, email, invite.CreatedAt)
token, err := s.CreateInviteToken(ctx, isMember.project.PublicID, invited.Email, invite.CreatedAt)
if err != nil {
return err
}
inviteTokens[email] = token
inviteTokens[invited.Email] = token
invites = append(invites, *invite)
}
return nil
@ -3648,18 +3658,6 @@ func (s *Service) InviteProjectMembers(ctx context.Context, projectID uuid.UUID,
},
)
}
for _, email := range newUserEmails {
inviteLink := fmt.Sprintf("%s?invite=%s", baseLink, inviteTokens[email])
s.mailService.SendRenderedAsync(
ctx,
[]post.Address{{Address: email}},
&NewUserProjectInvitationEmail{
InviterEmail: user.Email,
Region: s.satelliteName,
SignUpLink: inviteLink,
},
)
}
return invites, nil
}

View File

@ -11,7 +11,6 @@ import (
"fmt"
"math/rand"
"sort"
"strings"
"testing"
"time"
@ -270,19 +269,6 @@ func TestService(t *testing.T) {
})
require.Error(t, err)
require.Nil(t, updatedProject)
user2, userCtx2 := getOwnerAndCtx(ctx, up2Proj)
_, err = service.AddProjectMembers(userCtx1, up1Proj.ID, []string{user2.Email})
require.NoError(t, err)
// Members should not be able to update project.
_, err = service.UpdateProject(userCtx2, up1Proj.ID, console.ProjectInfo{
Name: updatedName,
})
require.Error(t, err)
require.True(t, console.ErrUnauthorized.Has(err))
// remove user2.
err = service.DeleteProjectMembersAndInvitations(userCtx1, up1Proj.ID, []string{user2.Email})
require.NoError(t, err)
})
t.Run("AddProjectMembers", func(t *testing.T) {
@ -328,7 +314,7 @@ func TestService(t *testing.T) {
require.NoError(t, err)
for _, id := range []uuid.UUID{up1Proj.ID, up2Proj.ID} {
_, err = sat.DB.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
_, err = sat.DB.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{
ProjectID: id,
Email: invitedUser.Email,
})
@ -1989,7 +1975,7 @@ func TestProjectInvitations(t *testing.T) {
}
addInvite := func(t *testing.T, ctx context.Context, project *console.Project, email string) *console.ProjectInvitation {
invite, err := sat.DB.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
invite, err := sat.DB.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{
ProjectID: project.ID,
Email: email,
InviterID: &project.OwnerID,
@ -1999,18 +1985,11 @@ func TestProjectInvitations(t *testing.T) {
return invite
}
setInviteDate := func(t *testing.T, ctx context.Context, invite *console.ProjectInvitation, createdAt time.Time) {
result, err := sat.DB.Testing().RawDB().ExecContext(ctx,
"UPDATE project_invitations SET created_at = $1 WHERE project_id = $2 AND email = $3",
createdAt, invite.ProjectID, strings.ToUpper(invite.Email),
)
require.NoError(t, err)
count, err := result.RowsAffected()
require.NoError(t, err)
require.EqualValues(t, 1, count)
newInvite, err := sat.DB.Console().ProjectInvitations().Get(ctx, invite.ProjectID, invite.Email)
expireInvite := func(t *testing.T, ctx context.Context, invite *console.ProjectInvitation) {
createdAt := time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration)
newInvite, err := sat.DB.Console().ProjectInvitations().Update(ctx, invite.ProjectID, invite.Email, console.UpdateProjectInvitationRequest{
CreatedAt: &createdAt,
})
require.NoError(t, err)
*invite = *newInvite
}
@ -2031,14 +2010,15 @@ func TestProjectInvitations(t *testing.T) {
require.NoError(t, err)
require.Len(t, invites, 1)
// adding in a non-existent user should work.
// adding in a non-existent user should not fail the invitation.
invites, err = service.InviteProjectMembers(ctx, project.ID, []string{user3.Email, "notauser@mail.com"})
require.NoError(t, err)
require.Len(t, invites, 2)
require.Len(t, invites, 1)
invites, err = service.GetUserProjectInvitations(ctx3)
require.NoError(t, err)
require.Len(t, invites, 1)
user3Invite := invites[0]
// prevent unauthorized users from inviting others (user2 is not a member of the project yet).
_, err = service.InviteProjectMembers(ctx2, project.ID, []string{"other@mail.com"})
@ -2053,12 +2033,10 @@ func TestProjectInvitations(t *testing.T) {
require.Empty(t, invites)
// expire the invitation.
user3Invite, err := sat.DB.Console().ProjectInvitations().Get(ctx, project.ID, user3.Email)
require.NoError(t, err)
require.False(t, service.IsProjectInvitationExpired(user3Invite))
require.False(t, service.IsProjectInvitationExpired(&user3Invite))
oldCreatedAt := user3Invite.CreatedAt
setInviteDate(t, ctx, user3Invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
require.True(t, service.IsProjectInvitationExpired(user3Invite))
expireInvite(t, ctx, &user3Invite)
require.True(t, service.IsProjectInvitationExpired(&user3Invite))
// resending an expired invitation should succeed.
invites, err = service.InviteProjectMembers(ctx2, project.ID, []string{user3.Email})
@ -2088,7 +2066,7 @@ func TestProjectInvitations(t *testing.T) {
require.Equal(t, invite.InviterID, invites[0].InviterID)
require.WithinDuration(t, invite.CreatedAt, invites[0].CreatedAt, time.Second)
setInviteDate(t, ctx, &invites[0], time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
expireInvite(t, ctx, &invites[0])
invites, err = service.GetUserProjectInvitations(ctx)
require.NoError(t, err)
require.Empty(t, invites)
@ -2177,7 +2155,7 @@ func TestProjectInvitations(t *testing.T) {
require.NotNil(t, inviteFromToken)
require.Equal(t, invite, inviteFromToken)
setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
expireInvite(t, ctx, invite)
invites, err := service.GetUserProjectInvitations(ctx)
require.NoError(t, err)
require.Empty(t, invites)
@ -2200,24 +2178,16 @@ func TestProjectInvitations(t *testing.T) {
proj := addProject(t, ctx)
invite := addInvite(t, ctx, proj, user.Email)
// Expect an error when accepting an expired invitation.
// The invitation should remain in the database.
setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
expireInvite(t, ctx, invite)
err := service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept)
require.True(t, console.ErrProjectInviteInvalid.Has(err))
_, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err)
// Expect no error when accepting an active invitation.
// The invitation should be removed from the database, and the user should be added as a member.
setInviteDate(t, ctx, invite, time.Now())
require.NoError(t, err)
addInvite(t, ctx, proj, user.Email)
require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationAccept))
_, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.ErrorIs(t, err, sql.ErrNoRows)
invites, err := service.GetUserProjectInvitations(ctx)
require.NoError(t, err)
require.Empty(t, invites)
memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID)
require.NoError(t, err)
@ -2236,25 +2206,12 @@ func TestProjectInvitations(t *testing.T) {
user, ctx := getUserAndCtx(t)
proj := addProject(t, ctx)
invite := addInvite(t, ctx, proj, user.Email)
// Expect an error when rejecting an expired invitation.
// The invitation should remain in the database.
setInviteDate(t, ctx, invite, time.Now().Add(-sat.Config.Console.ProjectInvitationExpiration))
err := service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationDecline)
require.True(t, console.ErrProjectInviteInvalid.Has(err))
_, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.NoError(t, err)
// Expect no error when rejecting an active invitation.
// The invitation should be removed from the database.
setInviteDate(t, ctx, invite, time.Now())
require.NoError(t, err)
addInvite(t, ctx, proj, user.Email)
require.NoError(t, service.RespondToProjectInvitation(ctx, proj.ID, console.ProjectInvitationDecline))
_, err = sat.DB.Console().ProjectInvitations().Get(ctx, proj.ID, user.Email)
require.ErrorIs(t, err, sql.ErrNoRows)
invites, err := service.GetUserProjectInvitations(ctx)
require.NoError(t, err)
require.Empty(t, invites)
memberships, err := sat.DB.Console().ProjectMembers().GetByMemberID(ctx, user.ID)
require.NoError(t, err)

View File

@ -1,135 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker
import (
"context"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/storj"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/overlay"
)
var (
// Error is a standard error class for this package.
Error = errs.Class("piecetracker")
mon = monkit.Package()
// check if Observer and Partial interfaces are satisfied.
_ rangedloop.Observer = (*Observer)(nil)
_ rangedloop.Partial = (*observerFork)(nil)
)
// Observer implements piecetraker ranged loop observer.
//
// The piecetracker counts the number of pieces currently expected to reside on each node,
// then passes the counts to the overlay with UpdatePieceCounts().
type Observer struct {
log *zap.Logger
config Config
overlay overlay.DB
metabaseDB *metabase.DB
pieceCounts map[metabase.NodeAlias]int64
}
// NewObserver creates new piecetracker ranged loop observer.
func NewObserver(log *zap.Logger, metabaseDB *metabase.DB, overlay overlay.DB, config Config) *Observer {
return &Observer{
log: log,
overlay: overlay,
metabaseDB: metabaseDB,
config: config,
pieceCounts: map[metabase.NodeAlias]int64{},
}
}
// Start implements ranged loop observer start method.
func (observer *Observer) Start(ctx context.Context, time time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
observer.pieceCounts = map[metabase.NodeAlias]int64{}
return nil
}
// Fork implements ranged loop observer fork method.
func (observer *Observer) Fork(ctx context.Context) (_ rangedloop.Partial, err error) {
defer mon.Task()(&ctx)(&err)
return newObserverFork(), nil
}
// Join joins piecetracker ranged loop partial to main observer updating piece counts map.
func (observer *Observer) Join(ctx context.Context, partial rangedloop.Partial) (err error) {
defer mon.Task()(&ctx)(&err)
pieceTracker, ok := partial.(*observerFork)
if !ok {
return Error.New("expected %T but got %T", pieceTracker, partial)
}
// Merge piece counts for each node.
for nodeAlias, pieceCount := range pieceTracker.pieceCounts {
observer.pieceCounts[nodeAlias] += pieceCount
}
return nil
}
// Finish updates piece counts in the DB.
func (observer *Observer) Finish(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
observer.log.Info("piecetracker observer finished")
nodeAliasMap, err := observer.metabaseDB.LatestNodesAliasMap(ctx)
pieceCounts := make(map[storj.NodeID]int64, len(observer.pieceCounts))
for nodeAlias, count := range observer.pieceCounts {
nodeID, ok := nodeAliasMap.Node(nodeAlias)
if !ok {
observer.log.Error("unrecognized node alias in piecetracker ranged-loop", zap.Int32("node-alias", int32(nodeAlias)))
continue
}
pieceCounts[nodeID] = count
}
err = observer.overlay.UpdatePieceCounts(ctx, pieceCounts)
if err != nil {
observer.log.Error("error updating piece counts", zap.Error(err))
return Error.Wrap(err)
}
return nil
}
type observerFork struct {
pieceCounts map[metabase.NodeAlias]int64
}
// newObserverFork creates new piecetracker ranged loop fork.
func newObserverFork() *observerFork {
return &observerFork{
pieceCounts: map[metabase.NodeAlias]int64{},
}
}
// Process iterates over segment range updating partial piece counts for each node.
func (fork *observerFork) Process(ctx context.Context, segments []rangedloop.Segment) error {
for _, segment := range segments {
if segment.Inline() {
continue
}
for _, piece := range segment.AliasPieces {
fork.pieceCounts[piece.Alias]++
}
}
return nil
}

View File

@ -1,82 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
)
func TestObserverPieceTracker(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.PieceTracker.UseRangedLoop = true
config.RangedLoop.Parallelism = 4
config.RangedLoop.BatchSize = 4
// configure RS
config.Metainfo.RS.Min = 2
config.Metainfo.RS.Repair = 3
config.Metainfo.RS.Success = 4
config.Metainfo.RS.Total = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// ensure that the piece counts are empty
pieceCounts, err := planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(pieceCounts))
// Setup: create 50KiB of data for the uplink to upload
testdata := testrand.Bytes(50 * memory.KiB)
testBucket := "testbucket"
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], testBucket, "test/path", testdata)
require.NoError(t, err)
// Run the ranged loop
_, err = planet.Satellites[0].RangedLoop.RangedLoop.Service.RunOnce(ctx)
require.NoError(t, err)
// Check that the piece counts are correct
pieceCounts, err = planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.True(t, len(pieceCounts) > 0)
for node, count := range pieceCounts {
require.Equal(t, int64(1), count, "node %s should have 1 piece", node)
}
// upload more objects
numOfObjects := 10
for i := 0; i < numOfObjects; i++ {
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], testBucket, fmt.Sprintf("test/path%d", i), testdata)
require.NoError(t, err)
}
// Run the ranged loop again
_, err = planet.Satellites[0].RangedLoop.RangedLoop.Service.RunOnce(ctx)
require.NoError(t, err)
// Check that the piece counts are correct
pieceCounts, err = planet.Satellites[0].Overlay.DB.AllPieceCounts(ctx)
require.NoError(t, err)
require.True(t, len(pieceCounts) > 0)
for node, count := range pieceCounts {
require.Equal(t, int64(numOfObjects+1), count, "node %s should have %d pieces", node, numOfObjects+1)
}
})
}

View File

@ -1,9 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package piecetracker
// Config is the configuration for the piecetracker.
type Config struct {
UseRangedLoop bool `help:"whether to enable piece tracker observer with ranged loop" default:"true"`
}

View File

@ -253,7 +253,7 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
)
RETURNING
created_at`,
opts.ProjectID, []byte(opts.NewBucket), opts.NewEncryptedObjectKey, nextAvailableVersion, opts.NewStreamID,
opts.ProjectID, opts.NewBucket, opts.NewEncryptedObjectKey, nextAvailableVersion, opts.NewStreamID,
sourceObject.ExpiresAt, sourceObject.SegmentCount,
encryptionParameters{&sourceObject.Encryption},
copyMetadata, opts.NewEncryptedMetadataKeyNonce, opts.NewEncryptedMetadataKey,

View File

@ -249,8 +249,7 @@ func (db *DB) deleteInactiveObjectsAndSegments(ctx context.Context, objects []Ob
for _, obj := range objects {
batch.Queue(`
WITH check_segments AS (
SELECT 1 FROM segments
WHERE stream_id = $5::BYTEA AND created_at > $6
SELECT 1 FROM segments WHERE stream_id = $5::BYTEA AND created_at > $6
), deleted_objects AS (
DELETE FROM objects
WHERE
@ -259,7 +258,9 @@ func (db *DB) deleteInactiveObjectsAndSegments(ctx context.Context, objects []Ob
RETURNING stream_id
)
DELETE FROM segments
WHERE segments.stream_id IN (SELECT stream_id FROM deleted_objects)
`+db.impl.AsOfSystemInterval(opts.AsOfSystemInterval)+`
WHERE
segments.stream_id IN (SELECT stream_id FROM deleted_objects)
`, obj.ProjectID, []byte(obj.BucketName), []byte(obj.ObjectKey), obj.Version, obj.StreamID, opts.InactiveDeadline)
}

View File

@ -332,9 +332,8 @@ func TestDeleteZombieObjects(t *testing.T) {
// object will be checked if is inactive and will be deleted with segment
metabasetest.DeleteZombieObjects{
Opts: metabase.DeleteZombieObjects{
DeadlineBefore: now.Add(1 * time.Hour),
InactiveDeadline: now.Add(2 * time.Hour),
AsOfSystemInterval: -1 * time.Microsecond,
DeadlineBefore: now.Add(1 * time.Hour),
InactiveDeadline: now.Add(2 * time.Hour),
},
}.Check(ctx, t, db)

View File

@ -62,7 +62,7 @@ func (db *DB) ListObjects(ctx context.Context, opts ListObjects) (result ListObj
var entries []ObjectEntry
err = withRows(db.db.QueryContext(ctx, opts.getSQLQuery(),
opts.ProjectID, []byte(opts.BucketName), opts.startKey(), opts.Cursor.Version,
opts.ProjectID, opts.BucketName, opts.startKey(), opts.Cursor.Version,
opts.stopKey(), opts.Status,
opts.Limit+1, len(opts.Prefix)+1))(func(rows tagsql.Rows) error {
entries, err = scanListObjectsResult(rows, opts)

View File

@ -196,7 +196,7 @@ func (db *DB) ListBucketsStreamIDs(ctx context.Context, opts ListBucketsStreamID
LIMIT $3
`, pgutil.UUIDArray(projectIDs), pgutil.ByteaArray(bucketNamesBytes),
opts.Limit,
opts.CursorBucket.ProjectID, []byte(opts.CursorBucket.BucketName), opts.CursorStreamID,
opts.CursorBucket.ProjectID, opts.CursorBucket.BucketName, opts.CursorStreamID,
))(func(rows tagsql.Rows) error {
for rows.Next() {
var streamID uuid.UUID

View File

@ -27,7 +27,7 @@ func TestZombieDeletion(t *testing.T) {
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.ZombieDeletion.Interval = 500 * time.Millisecond
config.ZombieDeletion.AsOfSystemInterval = -1 * time.Microsecond
config.ZombieDeletion.AsOfSystemInterval = 0
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {

View File

@ -130,7 +130,7 @@ type Config struct {
MaxInlineSegmentSize memory.Size `default:"4KiB" help:"maximum inline segment size"`
// we have such default value because max value for ObjectKey is 1024(1 Kib) but EncryptedObjectKey
// has encryption overhead 16 bytes. So overall size is 1024 + 16 * 16.
MaxEncryptedObjectKeyLength int `default:"2000" help:"maximum encrypted object key length"`
MaxEncryptedObjectKeyLength int `default:"1750" help:"maximum encrypted object key length"`
MaxSegmentSize memory.Size `default:"64MiB" help:"maximum segment size"`
MaxMetadataSize memory.Size `default:"2KiB" help:"maximum segment metadata size"`
MaxCommitInterval time.Duration `default:"48h" testDefault:"1h" help:"maximum time allowed to pass between creating and committing a segment"`

View File

@ -64,12 +64,14 @@ func BenchmarkOverlay(b *testing.B) {
check = append(check, testrand.NodeID())
}
b.Run("KnownReliable", func(b *testing.B) {
onlineWindow := 1000 * time.Hour
b.Run("KnownUnreliableOrOffline", func(b *testing.B) {
criteria := &overlay.NodeCriteria{
OnlineWindow: 1000 * time.Hour,
}
for i := 0; i < b.N; i++ {
online, _, err := overlaydb.KnownReliable(ctx, check, onlineWindow, 0)
badNodes, err := overlaydb.KnownUnreliableOrOffline(ctx, criteria, check)
require.NoError(b, err)
require.Len(b, online, OnlineCount)
require.Len(b, badNodes, OfflineCount)
}
})

View File

@ -17,7 +17,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/exp/slices"
"storj.io/common/memory"
"storj.io/common/pb"
@ -114,45 +113,36 @@ func TestMinimumDiskSpace(t *testing.T) {
})
}
func TestOnlineOffline(t *testing.T) {
func TestOffline(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
service := satellite.Overlay.Service
// TODO: handle cleanup
online, offline, err := service.KnownReliable(ctx, []storj.NodeID{
result, err := service.KnownUnreliableOrOffline(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
})
require.NoError(t, err)
require.Empty(t, offline)
require.Len(t, online, 1)
require.Empty(t, result)
online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
result, err = service.KnownUnreliableOrOffline(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
planet.StorageNodes[1].ID(),
planet.StorageNodes[2].ID(),
})
require.NoError(t, err)
require.Empty(t, offline)
require.Len(t, online, 3)
require.Empty(t, result)
unreliableNodeID := storj.NodeID{1, 2, 3, 4}
online, offline, err = service.KnownReliable(ctx, []storj.NodeID{
result, err = service.KnownUnreliableOrOffline(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
unreliableNodeID,
{1, 2, 3, 4}, // note that this succeeds by design
planet.StorageNodes[2].ID(),
})
require.NoError(t, err)
require.Empty(t, offline)
require.Len(t, online, 2)
require.False(t, slices.ContainsFunc(online, func(node overlay.SelectedNode) bool {
return node.ID == unreliableNodeID
}))
require.False(t, slices.ContainsFunc(offline, func(node overlay.SelectedNode) bool {
return node.ID == unreliableNodeID
}))
require.Len(t, result, 1)
require.Equal(t, result[0], storj.NodeID{1, 2, 3, 4})
})
}

View File

@ -11,7 +11,6 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/exp/maps"
"storj.io/common/pb"
"storj.io/common/storj"
@ -48,6 +47,8 @@ var ErrLowDifficulty = errs.Class("node id difficulty too low")
//
// architecture: Database
type DB interface {
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs
GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf AsOfSystemTimeConfig) (map[storj.NodeID]*SelectedNode, error)
// GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs.
// The return value contains necessary information to create orders as well as nodes'
// current reputation status.
@ -61,10 +62,14 @@ type DB interface {
// Get looks up the node by nodeID
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
// KnownOffline filters a set of nodes to offline nodes
KnownOffline(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new
KnownUnreliableOrOffline(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownReliableInExcludedCountries filters healthy nodes that are in excluded countries.
KnownReliableInExcludedCountries(context.Context, *NodeCriteria, storj.NodeIDList) (storj.NodeIDList, error)
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []SelectedNode, offline []SelectedNode, err error)
KnownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) ([]*pb.Node, error)
// Reliable returns all nodes that are reliable
Reliable(context.Context, *NodeCriteria) (storj.NodeIDList, error)
// UpdateReputation updates the DB columns for all reputation fields in ReputationStatus.
@ -391,6 +396,13 @@ func (service *Service) Get(ctx context.Context, nodeID storj.NodeID) (_ *NodeDo
return service.db.Get(ctx, nodeID)
}
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs.
func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.GetOnlineNodesForGetDelete(ctx, nodeIDs, service.config.Node.OnlineWindow, service.config.Node.AsOfSystemTime)
}
// CachedGetOnlineNodesForGet returns a map of nodes from the download selection cache from the suppliedIDs.
func (service *Service) CachedGetOnlineNodesForGet(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]*SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
@ -539,6 +551,24 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
return nodes, nil
}
// KnownOffline filters a set of nodes to offline nodes.
func (service *Service) KnownOffline(ctx context.Context, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{
OnlineWindow: service.config.Node.OnlineWindow,
}
return service.db.KnownOffline(ctx, criteria, nodeIds)
}
// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new.
func (service *Service) KnownUnreliableOrOffline(ctx context.Context, nodeIds storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
criteria := &NodeCriteria{
OnlineWindow: service.config.Node.OnlineWindow,
}
return service.db.KnownUnreliableOrOffline(ctx, criteria, nodeIds)
}
// InsertOfflineNodeEvents inserts offline events into node events.
func (service *Service) InsertOfflineNodeEvents(ctx context.Context, cooldown time.Duration, cutoff time.Duration, limit int) (count int, err error) {
defer mon.Task()(&ctx)(&err)
@ -584,11 +614,9 @@ func (service *Service) KnownReliableInExcludedCountries(ctx context.Context, no
}
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (onlineNodes []SelectedNode, offlineNodes []SelectedNode, err error) {
func (service *Service) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
// TODO add as of system time
return service.db.KnownReliable(ctx, nodeIDs, service.config.Node.OnlineWindow, 0)
return service.db.KnownReliable(ctx, service.config.Node.OnlineWindow, nodeIDs)
}
// Reliable filters a set of nodes that are reliable, independent of new.
@ -763,23 +791,23 @@ func (service *Service) UpdateCheckIn(ctx context.Context, node NodeCheckInInfo,
// GetMissingPieces returns the list of offline nodes and the corresponding pieces.
func (service *Service) GetMissingPieces(ctx context.Context, pieces metabase.Pieces) (missingPieces []uint16, err error) {
defer mon.Task()(&ctx)(&err)
// TODO this method will be removed completely in subsequent change
var nodeIDs storj.NodeIDList
missingPiecesMap := map[storj.NodeID]uint16{}
for _, p := range pieces {
nodeIDs = append(nodeIDs, p.StorageNode)
missingPiecesMap[p.StorageNode] = p.Number
}
onlineNodes, _, err := service.KnownReliable(ctx, nodeIDs)
badNodeIDs, err := service.KnownUnreliableOrOffline(ctx, nodeIDs)
if err != nil {
return nil, Error.New("error getting nodes %s", err)
}
for _, node := range onlineNodes {
delete(missingPiecesMap, node.ID)
for _, p := range pieces {
for _, nodeID := range badNodeIDs {
if nodeID == p.StorageNode {
missingPieces = append(missingPieces, p.Number)
}
}
}
return maps.Values(missingPiecesMap), nil
return missingPieces, nil
}
// GetReliablePiecesInExcludedCountries returns the list of pieces held by nodes located in excluded countries.

View File

@ -388,6 +388,47 @@ func TestNodeInfo(t *testing.T) {
})
}
func TestGetOnlineNodesForGetDelete(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// pause chores that might update node data
planet.Satellites[0].RangedLoop.RangedLoop.Service.Loop.Stop()
planet.Satellites[0].Repair.Repairer.Loop.Pause()
for _, node := range planet.StorageNodes {
node.Contact.Chore.Pause(ctx)
}
// should not return anything if nodeIDs aren't in the nodes table
actualNodes, err := planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, []storj.NodeID{})
require.NoError(t, err)
require.Equal(t, 0, len(actualNodes))
actualNodes, err = planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, []storj.NodeID{testrand.NodeID()})
require.NoError(t, err)
require.Equal(t, 0, len(actualNodes))
expectedNodes := make(map[storj.NodeID]*overlay.SelectedNode, len(planet.StorageNodes))
nodeIDs := make([]storj.NodeID, len(planet.StorageNodes)+1)
for i, node := range planet.StorageNodes {
nodeIDs[i] = node.ID()
dossier, err := planet.Satellites[0].Overlay.Service.Get(ctx, node.ID())
require.NoError(t, err)
expectedNodes[dossier.Id] = &overlay.SelectedNode{
ID: dossier.Id,
Address: dossier.Address,
LastNet: dossier.LastNet,
LastIPPort: dossier.LastIPPort,
}
}
// add a fake node ID to make sure GetOnlineNodesForGetDelete doesn't error and still returns the expected nodes.
nodeIDs[len(planet.StorageNodes)] = testrand.NodeID()
actualNodes, err = planet.Satellites[0].Overlay.Service.GetOnlineNodesForGetDelete(ctx, nodeIDs)
require.NoError(t, err)
require.Equal(t, expectedNodes, actualNodes)
})
}
func TestKnownReliable(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
@ -434,7 +475,7 @@ func TestKnownReliable(t *testing.T) {
require.NoError(t, err)
// Check that only storage nodes #4 and #5 are reliable
online, _, err := service.KnownReliable(ctx, []storj.NodeID{
result, err := service.KnownReliable(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
planet.StorageNodes[1].ID(),
planet.StorageNodes[2].ID(),
@ -443,7 +484,7 @@ func TestKnownReliable(t *testing.T) {
planet.StorageNodes[5].ID(),
})
require.NoError(t, err)
require.Len(t, online, 2)
require.Len(t, result, 2)
// Sort the storage nodes for predictable checks
expectedReliable := []storj.NodeURL{
@ -451,11 +492,11 @@ func TestKnownReliable(t *testing.T) {
planet.StorageNodes[5].NodeURL(),
}
sort.Slice(expectedReliable, func(i, j int) bool { return expectedReliable[i].ID.Less(expectedReliable[j].ID) })
sort.Slice(online, func(i, j int) bool { return online[i].ID.Less(online[j].ID) })
sort.Slice(result, func(i, j int) bool { return result[i].Id.Less(result[j].Id) })
// Assert the reliable nodes are the expected ones
for i, node := range online {
assert.Equal(t, expectedReliable[i].ID, node.ID)
for i, node := range result {
assert.Equal(t, expectedReliable[i].ID, node.Id)
assert.Equal(t, expectedReliable[i].Address, node.Address.Address)
}
})

View File

@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"storj.io/common/pb"
"storj.io/common/storj"
@ -26,10 +25,13 @@ func TestStatDB(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
testDatabase(ctx, t, db.OverlayCache())
})
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
testDatabase(ctx, t, db.OverlayCache())
})
}
func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{ // Test KnownReliable and Reliable
{ // TestKnownUnreliableOrOffline and TestReliable
for i, tt := range []struct {
nodeID storj.NodeID
unknownAuditSuspended bool
@ -106,24 +108,16 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
ExcludedCountries: []string{"FR", "BE"},
}
contains := func(nodeID storj.NodeID) func(node overlay.SelectedNode) bool {
return func(node overlay.SelectedNode) bool {
return node.ID == nodeID
}
}
online, offline, err := cache.KnownReliable(ctx, nodeIds, criteria.OnlineWindow, criteria.AsOfSystemInterval)
invalid, err := cache.KnownUnreliableOrOffline(ctx, criteria, nodeIds)
require.NoError(t, err)
// unrealiable nodes shouldn't be in results
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{2}))) // disqualified
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{3}))) // unknown audit suspended
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{5}))) // gracefully exited
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{6}))) // offline suspended
require.False(t, slices.ContainsFunc(append(online, offline...), contains(storj.NodeID{9}))) // not in db
require.True(t, slices.ContainsFunc(offline, contains(storj.NodeID{4}))) // offline
require.Len(t, append(online, offline...), 4)
require.Contains(t, invalid, storj.NodeID{2}) // disqualified
require.Contains(t, invalid, storj.NodeID{3}) // unknown audit suspended
require.Contains(t, invalid, storj.NodeID{4}) // offline
require.Contains(t, invalid, storj.NodeID{5}) // gracefully exited
require.Contains(t, invalid, storj.NodeID{6}) // offline suspended
require.Contains(t, invalid, storj.NodeID{9}) // not in db
require.Len(t, invalid, 6)
valid, err := cache.Reliable(ctx, criteria)
require.NoError(t, err)
@ -245,5 +239,6 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
require.NoError(t, err)
_, err = cache.Get(ctx, nodeID)
require.NoError(t, err)
}
}

View File

@ -64,8 +64,6 @@ type Invoices interface {
Pay(id string, params *stripe.InvoicePayParams) (*stripe.Invoice, error)
Del(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error)
Get(id string, params *stripe.InvoiceParams) (*stripe.Invoice, error)
MarkUncollectible(id string, params *stripe.InvoiceMarkUncollectibleParams) (*stripe.Invoice, error)
VoidInvoice(id string, params *stripe.InvoiceVoidParams) (*stripe.Invoice, error)
}
// InvoiceItems Stripe InvoiceItems interface.

View File

@ -860,86 +860,6 @@ func (service *Service) createInvoices(ctx context.Context, customers []Customer
return scheduled, draft, errGrp.Err()
}
// SetInvoiceStatus will set all open invoices within the specified date range to the requested status.
func (service *Service) SetInvoiceStatus(ctx context.Context, startPeriod, endPeriod time.Time, status string, dryRun bool) (err error) {
defer mon.Task()(&ctx)(&err)
switch stripe.InvoiceStatus(strings.ToLower(status)) {
case stripe.InvoiceStatusUncollectible:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to uncollectible", zap.String("invoiceId", invoiceId))
if !dryRun {
_, err := service.stripeClient.Invoices().MarkUncollectible(invoiceId, &stripe.InvoiceMarkUncollectibleParams{})
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
case stripe.InvoiceStatusVoid:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to void", zap.String("invoiceId", invoiceId))
if !dryRun {
_, err = service.stripeClient.Invoices().VoidInvoice(invoiceId, &stripe.InvoiceVoidParams{})
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
case stripe.InvoiceStatusPaid:
err = service.iterateInvoicesInTimeRange(ctx, startPeriod, endPeriod, func(invoiceId string) error {
service.log.Info("updating invoice status to paid", zap.String("invoiceId", invoiceId))
if !dryRun {
payParams := &stripe.InvoicePayParams{
Params: stripe.Params{Context: ctx},
PaidOutOfBand: stripe.Bool(true),
}
_, err = service.stripeClient.Invoices().Pay(invoiceId, payParams)
if err != nil {
return Error.Wrap(err)
}
}
return nil
})
default:
// unknown
service.log.Error("Unknown status provided. Valid options are uncollectible, void, or paid.", zap.String("status", status))
return Error.New("unknown status provided")
}
return err
}
func (service *Service) iterateInvoicesInTimeRange(ctx context.Context, startPeriod, endPeriod time.Time, updateStatus func(string) error) (err error) {
defer mon.Task()(&ctx)(&err)
params := &stripe.InvoiceListParams{
ListParams: stripe.ListParams{
Context: ctx,
Limit: stripe.Int64(100),
},
Status: stripe.String("open"),
CreatedRange: &stripe.RangeQueryParams{
GreaterThanOrEqual: startPeriod.Unix(),
LesserThanOrEqual: endPeriod.Unix(),
},
}
numInvoices := 0
invoicesIterator := service.stripeClient.Invoices().List(params)
for invoicesIterator.Next() {
numInvoices++
stripeInvoice := invoicesIterator.Invoice()
err := updateStatus(stripeInvoice.ID)
if err != nil {
return Error.Wrap(err)
}
}
service.log.Info("found " + strconv.Itoa(numInvoices) + " total invoices")
return Error.Wrap(invoicesIterator.Err())
}
// CreateBalanceInvoiceItems will find users with a stripe balance, create an invoice
// item with the charges due, and zero out the stripe balance.
func (service *Service) CreateBalanceInvoiceItems(ctx context.Context) (err error) {

View File

@ -36,292 +36,6 @@ import (
stripe1 "storj.io/storj/satellite/payments/stripe"
)
func TestService_SetInvoiceStatusUncollectible(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to uncollectible", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "uncollectible", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusUncollectible, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusVoid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to void", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "void", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusVoid, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusPaid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to paid", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "paid", false)
require.NoError(t, err)
iter := satellite.API.Payments.StripeClient.Invoices().List(&stripe.InvoiceListParams{
ListParams: stripe.ListParams{Context: ctx},
})
iter.Next()
require.Equal(t, stripe.InvoiceStatusPaid, iter.Invoice().Status)
})
})
}
func TestService_SetInvoiceStatusInvalid(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Payments.StripeCoinPayments.ListingLimit = 4
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
payments := satellite.API.Payments
invoiceBalance := currency.AmountFromBaseUnits(800, currency.USDollars)
usdCurrency := string(stripe.CurrencyUSD)
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "testuser",
Email: "user@test",
}, 1)
require.NoError(t, err)
customer, err := satellite.DB.StripeCoinPayments().Customers().GetCustomerID(ctx, user.ID)
require.NoError(t, err)
// create invoice item
invItem, err := satellite.API.Payments.StripeClient.InvoiceItems().New(&stripe.InvoiceItemParams{
Params: stripe.Params{Context: ctx},
Amount: stripe.Int64(invoiceBalance.BaseUnits()),
Currency: stripe.String(usdCurrency),
Customer: &customer,
})
require.NoError(t, err)
InvItems := make([]*stripe.InvoiceUpcomingInvoiceItemParams, 0, 1)
InvItems = append(InvItems, &stripe.InvoiceUpcomingInvoiceItemParams{
InvoiceItem: &invItem.ID,
Amount: &invItem.Amount,
Currency: stripe.String(usdCurrency),
})
// create invoice
inv, err := satellite.API.Payments.StripeClient.Invoices().New(&stripe.InvoiceParams{
Params: stripe.Params{Context: ctx},
Customer: &customer,
InvoiceItems: InvItems,
})
require.NoError(t, err)
finalizeParams := &stripe.InvoiceFinalizeParams{Params: stripe.Params{Context: ctx}}
// finalize invoice
inv, err = satellite.API.Payments.StripeClient.Invoices().FinalizeInvoice(inv.ID, finalizeParams)
require.NoError(t, err)
require.Equal(t, stripe.InvoiceStatusOpen, inv.Status)
// run update invoice status to uncollectible
// beginning of last month
startPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, -1, 0)
// end of current month
endPeriod := time.Date(time.Now().Year(), time.Now().Month(), 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1)
t.Run("update invoice status to invalid", func(t *testing.T) {
err = payments.StripeService.SetInvoiceStatus(ctx, startPeriod, endPeriod, "not a real status", false)
require.Error(t, err)
})
})
}
func TestService_BalanceInvoiceItems(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,

View File

@ -497,32 +497,6 @@ type mockInvoices struct {
invoiceItems *mockInvoiceItems
}
func (m *mockInvoices) MarkUncollectible(id string, params *stripe.InvoiceMarkUncollectibleParams) (*stripe.Invoice, error) {
for _, invoices := range m.invoices {
for _, invoice := range invoices {
if invoice.ID == id {
invoice.Status = stripe.InvoiceStatusUncollectible
return invoice, nil
}
}
}
return nil, errors.New("invoice not found")
}
func (m *mockInvoices) VoidInvoice(id string, params *stripe.InvoiceVoidParams) (*stripe.Invoice, error) {
for _, invoices := range m.invoices {
for _, invoice := range invoices {
if invoice.ID == id {
invoice.Status = stripe.InvoiceStatusVoid
return invoice, nil
}
}
}
return nil, errors.New("invoice not found")
}
func newMockInvoices(root *mockStripeState, invoiceItems *mockInvoiceItems) *mockInvoices {
return &mockInvoices{
root: root,
@ -665,9 +639,8 @@ func (m *mockInvoices) Pay(id string, params *stripe.InvoicePayParams) (*stripe.
invoice.AmountRemaining = 0
return invoice, nil
}
} else if invoice.AmountRemaining == 0 || (params.PaidOutOfBand != nil && *params.PaidOutOfBand) {
} else if invoice.AmountRemaining == 0 {
invoice.Status = stripe.InvoiceStatusPaid
invoice.AmountRemaining = 0
}
return invoice, nil
}

View File

@ -43,7 +43,6 @@ import (
"storj.io/storj/satellite/console/userinfo"
"storj.io/storj/satellite/contact"
"storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/gc/piecetracker"
"storj.io/storj/satellite/gc/sender"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/mailservice"
@ -216,8 +215,6 @@ type Config struct {
ProjectLimit accounting.ProjectLimitConfig
Analytics analytics.Config
PieceTracker piecetracker.Config
}
func setupMailService(log *zap.Logger, config Config) (*mailservice.Service, error) {

View File

@ -18,7 +18,6 @@ import (
"storj.io/storj/private/lifecycle"
"storj.io/storj/satellite/accounting/nodetally"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/gc/piecetracker"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
@ -66,10 +65,6 @@ type RangedLoop struct {
NodeTallyObserver *nodetally.Observer
}
PieceTracker struct {
Observer *piecetracker.Observer
}
RangedLoop struct {
Service *rangedloop.Service
}
@ -129,15 +124,6 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
metabaseDB)
}
{ // setup piece tracker observer
peer.PieceTracker.Observer = piecetracker.NewObserver(
log.Named("piecetracker"),
metabaseDB,
peer.DB.OverlayCache(),
config.PieceTracker,
)
}
{ // setup overlay
peer.Overlay.Service, err = overlay.NewService(peer.Log.Named("overlay"), peer.DB.OverlayCache(), peer.DB.NodeEvents(), config.Console.ExternalAddress, config.Console.SatelliteName, config.Overlay)
if err != nil {
@ -181,10 +167,6 @@ func NewRangedLoop(log *zap.Logger, db DB, metabaseDB *metabase.DB, config *Conf
observers = append(observers, peer.Repair.Observer)
}
if config.PieceTracker.UseRangedLoop {
observers = append(observers, peer.PieceTracker.Observer)
}
segments := rangedloop.NewMetabaseRangeSplitter(metabaseDB, config.RangedLoop.AsOfSystemInterval, config.RangedLoop.BatchSize)
peer.RangedLoop.Service = rangedloop.NewService(log.Named("rangedloop"), config.RangedLoop, segments, observers)

View File

@ -15,6 +15,7 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"storj.io/common/pb"
"storj.io/common/storj"
@ -194,15 +195,65 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
stats.repairSegmentSize.Observe(int64(segment.EncryptedSize))
piecesCheck, err := repairer.classifySegmentPieces(ctx, segment)
if err != nil {
return false, err
}
pieces := segment.Pieces
numRetrievable := len(pieces) - len(piecesCheck.MissingPiecesSet)
numHealthy := len(pieces) - len(piecesCheck.MissingPiecesSet) - piecesCheck.NumUnhealthyRetrievable
allNodeIDs := make([]storj.NodeID, len(pieces))
for i, p := range pieces {
allNodeIDs[i] = p.StorageNode
}
excludeNodeIDs := allNodeIDs
missingPieces, err := repairer.overlay.GetMissingPieces(ctx, pieces)
if err != nil {
return false, overlayQueryError.New("error identifying missing pieces: %w", err)
}
var clumpedPieces metabase.Pieces
var clumpedPiecesSet map[uint16]bool
if repairer.doDeclumping {
// if multiple pieces are on the same last_net, keep only the first one. The rest are
// to be considered retrievable but unhealthy.
lastNets, err := repairer.overlay.GetNodesNetworkInOrder(ctx, allNodeIDs)
if err != nil {
return false, metainfoGetError.Wrap(err)
}
clumpedPieces = repair.FindClumpedPieces(segment.Pieces, lastNets)
clumpedPiecesSet = make(map[uint16]bool)
for _, clumpedPiece := range clumpedPieces {
clumpedPiecesSet[clumpedPiece.Number] = true
}
}
var outOfPlacementPieces metabase.Pieces
var outOfPlacementPiecesSet map[uint16]bool
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
var err error
outOfPlacementNodes, err := repairer.overlay.GetNodesOutOfPlacement(ctx, allNodeIDs, segment.Placement)
if err != nil {
return false, metainfoGetError.Wrap(err)
}
outOfPlacementPiecesSet = make(map[uint16]bool)
for _, piece := range pieces {
if slices.Contains(outOfPlacementNodes, piece.StorageNode) {
outOfPlacementPieces = append(outOfPlacementPieces, piece)
outOfPlacementPiecesSet[piece.Number] = true
}
}
}
numUnhealthyRetrievable := len(clumpedPieces) + len(outOfPlacementPieces)
if len(clumpedPieces) != 0 && len(outOfPlacementPieces) != 0 {
// verify that some of clumped pieces and out of placement pieces are not the same
unhealthyRetrievableSet := map[uint16]bool{}
maps.Copy(unhealthyRetrievableSet, clumpedPiecesSet)
maps.Copy(unhealthyRetrievableSet, outOfPlacementPiecesSet)
numUnhealthyRetrievable = len(unhealthyRetrievableSet)
}
numRetrievable := len(pieces) - len(missingPieces)
numHealthy := len(pieces) - len(missingPieces) - numUnhealthyRetrievable
// irreparable segment
if numRetrievable < int(segment.Redundancy.RequiredShares) {
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
@ -246,15 +297,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// repair not needed
if numHealthy-numHealthyInExcludedCountries > int(repairThreshold) {
// remove pieces out of placement without repairing as we are above repair threshold
if len(piecesCheck.OutOfPlacementPiecesSet) > 0 {
var outOfPlacementPieces metabase.Pieces
for _, piece := range pieces {
if _, ok := piecesCheck.OutOfPlacementPiecesSet[piece.Number]; ok {
outOfPlacementPieces = append(outOfPlacementPieces, piece)
}
}
if len(outOfPlacementPieces) > 0 {
newPieces, err := segment.Pieces.Update(nil, outOfPlacementPieces)
if err != nil {
return false, metainfoPutError.Wrap(err)
@ -274,13 +317,13 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
return false, metainfoPutError.Wrap(err)
}
mon.Meter("dropped_out_of_placement_pieces").Mark(len(piecesCheck.OutOfPlacementPiecesSet))
mon.Meter("dropped_out_of_placement_pieces").Mark(len(outOfPlacementPieces))
}
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
stats.repairUnnecessary.Mark(1)
repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold),
zap.Int("numClumped", len(piecesCheck.ClumpedPiecesSet)), zap.Int("numOffPieces", len(piecesCheck.OutOfPlacementPiecesSet)))
zap.Int("numClumped", len(clumpedPieces)), zap.Int("numOffPieces", len(outOfPlacementPieces)))
return true, nil
}
@ -291,7 +334,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked
stats.healthyRatioBeforeRepair.Observe(healthyRatioBeforeRepair)
lostPiecesSet := piecesCheck.MissingPiecesSet
lostPiecesSet := sliceToSet(missingPieces)
var retrievablePieces metabase.Pieces
unhealthyPieces := make(map[metabase.Piece]struct{})
@ -299,11 +342,12 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// Populate retrievablePieces with all pieces from the segment except those correlating to indices in lostPieces.
// Populate unhealthyPieces with all pieces in lostPieces, clumpedPieces or outOfPlacementPieces.
for _, piece := range pieces {
excludeNodeIDs = append(excludeNodeIDs, piece.StorageNode)
if lostPiecesSet[piece.Number] {
unhealthyPieces[piece] = struct{}{}
} else {
retrievablePieces = append(retrievablePieces, piece)
if piecesCheck.ClumpedPiecesSet[piece.Number] || piecesCheck.OutOfPlacementPiecesSet[piece.Number] {
if clumpedPiecesSet[piece.Number] || outOfPlacementPiecesSet[piece.Number] {
unhealthyPieces[piece] = struct{}{}
} else {
healthySet[int32(piece.Number)] = struct{}{}
@ -355,7 +399,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
// Request Overlay for n-h new storage nodes
request := overlay.FindStorageNodesRequest{
RequestedCount: requestCount,
ExcludedIDs: piecesCheck.ExcludeNodeIDs,
ExcludedIDs: excludeNodeIDs,
Placement: segment.Placement,
}
newNodes, err := repairer.overlay.FindStorageNodesForUpload(ctx, request)
@ -486,7 +530,6 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
}
report := audit.Report{
Segment: &segment,
NodesReputation: cachedNodesReputation,
}
@ -494,10 +537,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
report.Successes = append(report.Successes, outcome.Piece.StorageNode)
}
for _, outcome := range piecesReport.Failed {
report.Fails = append(report.Fails, metabase.Piece{
StorageNode: outcome.Piece.StorageNode,
Number: outcome.Piece.Number,
})
report.Fails = append(report.Fails, outcome.Piece.StorageNode)
}
for _, outcome := range piecesReport.Offline {
report.Offlines = append(report.Offlines, outcome.Piece.StorageNode)
@ -627,8 +667,8 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
repairer.log.Debug("repaired segment",
zap.Stringer("Stream ID", segment.StreamID),
zap.Uint64("Position", segment.Position.Encode()),
zap.Int("clumped pieces", len(piecesCheck.ClumpedPiecesSet)),
zap.Int("out of placement pieces", len(piecesCheck.OutOfPlacementPiecesSet)),
zap.Int("clumped pieces", len(clumpedPieces)),
zap.Int("out of placement pieces", len(outOfPlacementPieces)),
zap.Int("in excluded countries", numHealthyInExcludedCountries),
zap.Int("removed pieces", len(toRemove)),
zap.Int("repaired pieces", len(repairedPieces)),
@ -637,98 +677,6 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, queueSegment *queue
return true, nil
}
type piecesCheckResult struct {
ExcludeNodeIDs []storj.NodeID
MissingPiecesSet map[uint16]bool
ClumpedPiecesSet map[uint16]bool
OutOfPlacementPiecesSet map[uint16]bool
NumUnhealthyRetrievable int
}
func (repairer *SegmentRepairer) classifySegmentPieces(ctx context.Context, segment metabase.Segment) (result piecesCheckResult, err error) {
defer mon.Task()(&ctx)(&err)
pieces := segment.Pieces
allNodeIDs := make([]storj.NodeID, len(pieces))
nodeIDPieceMap := map[storj.NodeID]uint16{}
result.MissingPiecesSet = map[uint16]bool{}
for i, p := range pieces {
allNodeIDs[i] = p.StorageNode
nodeIDPieceMap[p.StorageNode] = p.Number
result.MissingPiecesSet[p.Number] = true
}
result.ExcludeNodeIDs = allNodeIDs
online, offline, err := repairer.overlay.KnownReliable(ctx, allNodeIDs)
if err != nil {
return piecesCheckResult{}, overlayQueryError.New("error identifying missing pieces: %w", err)
}
// remove online nodes from missing pieces
for _, onlineNode := range online {
pieceNum := nodeIDPieceMap[onlineNode.ID]
delete(result.MissingPiecesSet, pieceNum)
}
if repairer.doDeclumping {
// if multiple pieces are on the same last_net, keep only the first one. The rest are
// to be considered retrievable but unhealthy.
lastNets := make([]string, 0, len(allNodeIDs))
reliablePieces := metabase.Pieces{}
collectLastNets := func(reliable []overlay.SelectedNode) {
for _, node := range reliable {
pieceNum := nodeIDPieceMap[node.ID]
reliablePieces = append(reliablePieces, metabase.Piece{
Number: pieceNum,
StorageNode: node.ID,
})
lastNets = append(lastNets, node.LastNet)
}
}
collectLastNets(online)
collectLastNets(offline)
clumpedPieces := repair.FindClumpedPieces(reliablePieces, lastNets)
result.ClumpedPiecesSet = map[uint16]bool{}
for _, clumpedPiece := range clumpedPieces {
result.ClumpedPiecesSet[clumpedPiece.Number] = true
}
}
if repairer.doPlacementCheck && segment.Placement != storj.EveryCountry {
result.OutOfPlacementPiecesSet = map[uint16]bool{}
checkPlacement := func(reliable []overlay.SelectedNode) {
for _, node := range reliable {
if segment.Placement.AllowedCountry(node.CountryCode) {
continue
}
result.OutOfPlacementPiecesSet[nodeIDPieceMap[node.ID]] = true
}
}
checkPlacement(online)
checkPlacement(offline)
}
result.NumUnhealthyRetrievable = len(result.ClumpedPiecesSet) + len(result.OutOfPlacementPiecesSet)
if len(result.ClumpedPiecesSet) != 0 && len(result.OutOfPlacementPiecesSet) != 0 {
// verify that some of clumped pieces and out of placement pieces are not the same
unhealthyRetrievableSet := map[uint16]bool{}
maps.Copy(unhealthyRetrievableSet, result.ClumpedPiecesSet)
maps.Copy(unhealthyRetrievableSet, result.OutOfPlacementPiecesSet)
result.NumUnhealthyRetrievable = len(unhealthyRetrievableSet)
}
return result, nil
}
// checkIfSegmentAltered checks if oldSegment has been altered since it was selected for audit.
func (repairer *SegmentRepairer) checkIfSegmentAltered(ctx context.Context, oldSegment metabase.Segment) (err error) {
defer mon.Task()(&ctx)(&err)
@ -845,6 +793,15 @@ func (repairer *SegmentRepairer) AdminFetchPieces(ctx context.Context, seg *meta
return pieceInfos, nil
}
// sliceToSet converts the given slice to a set.
func sliceToSet(slice []uint16) map[uint16]bool {
set := make(map[uint16]bool, len(slice))
for _, value := range slice {
set[value] = true
}
return set
}
// commaSeparatedArray concatenates an array into a comma-separated string,
// lazily.
type commaSeparatedArray []string

View File

@ -5,7 +5,6 @@ package repairer_test
import (
"context"
"strconv"
"testing"
"time"
@ -15,7 +14,6 @@ import (
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/storj/location"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
@ -29,15 +27,13 @@ import (
func TestSegmentRepairPlacement(t *testing.T) {
piecesCount := 4
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 12, UplinkCount: 1,
SatelliteCount: 1, StorageNodeCount: 10, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(1, 1, piecesCount, piecesCount),
Satellite: testplanet.ReconfigureRS(1, 2, piecesCount, piecesCount),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))
defaultLocation := location.Poland
_, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{
ProjectID: planet.Uplinks[0].Projects[0].ID,
Name: "testbucket",
@ -45,85 +41,65 @@ func TestSegmentRepairPlacement(t *testing.T) {
})
require.NoError(t, err)
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL"))
}
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
type testCase struct {
piecesOutOfPlacement int
piecesAfterRepair int
// how many from out of placement pieces should be also offline
piecesOutOfPlacementOffline int
}
for i, tc := range []testCase{
for _, tc := range []testCase{
// all pieces/nodes are out of placement, repair download/upload should be triggered
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount},
// all pieces/nodes are out of placement, repair download/upload should be triggered, some pieces are offline
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: piecesCount, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 2},
// few pieces/nodes are out of placement, repair download/upload should be triggered
{piecesOutOfPlacement: piecesCount - 1, piecesAfterRepair: piecesCount},
{piecesOutOfPlacement: piecesCount - 1, piecesAfterRepair: piecesCount, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: piecesCount - 2, piecesAfterRepair: piecesCount},
// single piece/node is out of placement, NO download/upload repair, we are only removing piece from segment
// as segment is still above repair threshold
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1},
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
{piecesOutOfPlacement: 1, piecesAfterRepair: piecesCount - 1, piecesOutOfPlacementOffline: 1},
} {
t.Run("#"+strconv.Itoa(i), func(t *testing.T) {
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), defaultLocation.String()))
}
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL"))
}
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
expectedData := testrand.Bytes(5 * memory.KiB)
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", expectedData)
require.NoError(t, err)
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.Len(t, segments[0].Pieces, piecesCount)
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.Len(t, segments[0].Pieces, piecesCount)
for _, piece := range segments[0].Pieces[:tc.piecesOutOfPlacement] {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, piece.StorageNode, "US"))
}
for index, piece := range segments[0].Pieces {
// make node offline if needed
require.NoError(t, updateNodeStatus(ctx, planet.Satellites[0], planet.FindNode(piece.StorageNode), index < tc.piecesOutOfPlacementOffline, defaultLocation))
// confirm that some pieces are out of placement
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.False(t, ok)
if index < tc.piecesOutOfPlacement {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, piece.StorageNode, "US"))
}
}
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
// confirm that some pieces are out of placement
ok, err := allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.False(t, ok)
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
_, err = planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.NoError(t, err)
// confirm that all pieces have correct placement
segments, err = planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, tc.piecesAfterRepair)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.True(t, ok)
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket", "object")
require.NoError(t, err)
require.Equal(t, expectedData, data)
_, err = planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.NoError(t, err)
// confirm that all pieces have correct placement
segments, err = planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.NotNil(t, segments[0].RepairedAt)
require.Len(t, segments[0].Pieces, tc.piecesAfterRepair)
ok, err = allPiecesInPlacement(ctx, planet.Satellites[0].Overlay.Service, segments[0].Pieces, segments[0].Placement)
require.NoError(t, err)
require.True(t, ok)
}
})
}
@ -214,52 +190,6 @@ func TestSegmentRepairPlacementAndClumped(t *testing.T) {
})
}
func TestSegmentRepairPlacementNotEnoughNodes(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 8, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(1, 2, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket"))
_, err := planet.Satellites[0].API.Buckets.Service.UpdateBucket(ctx, buckets.Bucket{
ProjectID: planet.Uplinks[0].Projects[0].ID,
Name: "testbucket",
Placement: storj.EU,
})
require.NoError(t, err)
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "PL"))
}
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
// change all nodes location to US
for _, node := range planet.StorageNodes {
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, node.ID(), "US"))
}
require.NoError(t, planet.Satellites[0].Repairer.Overlay.DownloadSelectionCache.Refresh(ctx))
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
require.Len(t, segments[0].Pieces, 4)
// we have bucket geofenced to EU but now all nodes are in US, repairing should fail because
// not enough nodes are available but segment shouldn't be deleted from repair queue
shouldDelete, err := planet.Satellites[0].Repairer.SegmentRepairer.Repair(ctx, &queue.InjuredSegment{
StreamID: segments[0].StreamID,
Position: segments[0].Position,
})
require.Error(t, err)
require.False(t, shouldDelete)
})
}
func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces metabase.Pieces, placement storj.PlacementConstraint) (bool, error) {
for _, piece := range pieces {
nodeDossier, err := overaly.Get(ctx, piece.StorageNode)
@ -272,26 +202,3 @@ func allPiecesInPlacement(ctx context.Context, overaly *overlay.Service, pieces
}
return true, nil
}
func updateNodeStatus(ctx context.Context, satellite *testplanet.Satellite, node *testplanet.StorageNode, offline bool, countryCode location.CountryCode) error {
timestamp := time.Now()
if offline {
timestamp = time.Now().Add(-4 * time.Hour)
}
return satellite.DB.OverlayCache().UpdateCheckIn(ctx, overlay.NodeCheckInInfo{
NodeID: node.ID(),
Address: &pb.NodeAddress{Address: node.Addr()},
IsUp: true,
Version: &pb.NodeVersion{
Version: "v0.0.0",
CommitHash: "",
Timestamp: time.Time{},
Release: true,
},
Capacity: &pb.NodeCapacity{
FreeDisk: 1 * memory.GiB.Int64(),
},
CountryCode: countryCode,
}, timestamp, satellite.Config.Overlay.Node)
}

View File

@ -195,7 +195,6 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
log.Named("reporter"),
peer.Reputation,
peer.Overlay,
metabaseDB,
containmentDB,
config.Audit.MaxRetriesStatDB,
int32(config.Audit.MaxReverifyCount))

View File

@ -16,7 +16,6 @@ import (
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
)
@ -183,7 +182,7 @@ func TestAuditSuspendExceedGracePeriod(t *testing.T) {
// give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit
report := audit.Report{
Successes: storj.NodeIDList{successNodeID},
Fails: metabase.Pieces{{StorageNode: failNodeID}},
Fails: storj.NodeIDList{failNodeID},
Offlines: storj.NodeIDList{offlineNodeID},
Unknown: storj.NodeIDList{unknownNodeID},
NodesReputation: nodesStatus,
@ -249,7 +248,7 @@ func TestAuditSuspendDQDisabled(t *testing.T) {
// give one node a successful audit, one a failed audit, one an offline audit, and one an unknown audit
report := audit.Report{
Successes: storj.NodeIDList{successNodeID},
Fails: metabase.Pieces{{StorageNode: failNodeID}},
Fails: storj.NodeIDList{failNodeID},
Offlines: storj.NodeIDList{offlineNodeID},
Unknown: storj.NodeIDList{unknownNodeID},
NodesReputation: nodesStatus,

View File

@ -48,7 +48,7 @@ func (db *ConsoleDB) ProjectMembers() console.ProjectMembers {
// ProjectInvitations is a getter for ProjectInvitations repository.
func (db *ConsoleDB) ProjectInvitations() console.ProjectInvitations {
return &projectInvitations{db.methods}
return &projectInvitations{db.db}
}
// APIKeys is a getter for APIKeys repository.
@ -78,7 +78,7 @@ func (db *ConsoleDB) ResetPasswordTokens() console.ResetPasswordTokens {
// WebappSessions is a getter for WebappSessions repository.
func (db *ConsoleDB) WebappSessions() consoleauth.WebappSessions {
return &webappSessions{db.db}
return &webappSessions{db.methods}
}
// AccountFreezeEvents is a getter for AccountFreezeEvents repository.

View File

@ -169,7 +169,7 @@ model project_invitation (
field created_at timestamp ( autoinsert, updatable )
)
create project_invitation ( replace )
create project_invitation ( )
read one (
select project_invitation

View File

@ -12869,7 +12869,7 @@ func (obj *pgxImpl) Create_ProjectMember(ctx context.Context,
}
func (obj *pgxImpl) Replace_ProjectInvitation(ctx context.Context,
func (obj *pgxImpl) Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
@ -12882,7 +12882,7 @@ func (obj *pgxImpl) Replace_ProjectInvitation(ctx context.Context,
__inviter_id_val := optional.InviterId.value()
__created_at_val := __now
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) ON CONFLICT ( project_id, email ) DO UPDATE SET project_id = EXCLUDED.project_id, email = EXCLUDED.email, inviter_id = EXCLUDED.inviter_id, created_at = EXCLUDED.created_at RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __values []interface{}
__values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val)
@ -20876,7 +20876,7 @@ func (obj *pgxcockroachImpl) Create_ProjectMember(ctx context.Context,
}
func (obj *pgxcockroachImpl) Replace_ProjectInvitation(ctx context.Context,
func (obj *pgxcockroachImpl) Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
@ -20889,7 +20889,7 @@ func (obj *pgxcockroachImpl) Replace_ProjectInvitation(ctx context.Context,
__inviter_id_val := optional.InviterId.value()
__created_at_val := __now
var __embed_stmt = __sqlbundle_Literal("UPSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __embed_stmt = __sqlbundle_Literal("INSERT INTO project_invitations ( project_id, email, inviter_id, created_at ) VALUES ( ?, ?, ?, ? ) RETURNING project_invitations.project_id, project_invitations.email, project_invitations.inviter_id, project_invitations.created_at")
var __values []interface{}
__values = append(__values, __project_id_val, __email_val, __inviter_id_val, __created_at_val)
@ -28506,6 +28506,19 @@ func (rx *Rx) Create_Project(ctx context.Context,
}
func (rx *Rx) Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Create_ProjectInvitation(ctx, project_invitation_project_id, project_invitation_email, optional)
}
func (rx *Rx) Create_ProjectMember(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field,
project_member_project_id ProjectMember_ProjectId_Field) (
@ -29694,19 +29707,6 @@ func (rx *Rx) Replace_AccountFreezeEvent(ctx context.Context,
}
func (rx *Rx) Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Replace_ProjectInvitation(ctx, project_invitation_project_id, project_invitation_email, optional)
}
func (rx *Rx) UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field,
update AccountingTimestamps_Update_Fields) (
@ -30273,6 +30273,12 @@ type Methods interface {
optional Project_Create_Fields) (
project *Project, err error)
Create_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error)
Create_ProjectMember(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field,
project_member_project_id ProjectMember_ProjectId_Field) (
@ -30802,12 +30808,6 @@ type Methods interface {
optional AccountFreezeEvent_Create_Fields) (
account_freeze_event *AccountFreezeEvent, err error)
Replace_ProjectInvitation(ctx context.Context,
project_invitation_project_id ProjectInvitation_ProjectId_Field,
project_invitation_email ProjectInvitation_Email_Field,
optional ProjectInvitation_Create_Fields) (
project_invitation *ProjectInvitation, err error)
UpdateNoReturn_AccountingTimestamps_By_Name(ctx context.Context,
accounting_timestamps_name AccountingTimestamps_Name_Field,
update AccountingTimestamps_Update_Fields) (

View File

@ -266,6 +266,62 @@ func (cache *overlaycache) Get(ctx context.Context, id storj.NodeID) (dossier *o
return convertDBNode(ctx, node)
}
// GetOnlineNodesForGetDelete returns a map of nodes for the supplied nodeIDs.
func (cache *overlaycache) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (nodes map[storj.NodeID]*overlay.SelectedNode, err error) {
for {
nodes, err = cache.getOnlineNodesForGetDelete(ctx, nodeIDs, onlineWindow, asOf)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return nodes, err
}
break
}
return nodes, err
}
func (cache *overlaycache) getOnlineNodesForGetDelete(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration, asOf overlay.AsOfSystemTimeConfig) (_ map[storj.NodeID]*overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err)
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT last_net, id, address, last_ip_port, noise_proto, noise_public_key, debounce_limit, features
FROM nodes
`+cache.db.impl.AsOfSystemInterval(asOf.Interval())+`
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow))
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
nodes := make(map[storj.NodeID]*overlay.SelectedNode)
for rows.Next() {
var node overlay.SelectedNode
node.Address = &pb.NodeAddress{}
var lastIPPort sql.NullString
var noise noiseScanner
err = rows.Scan(&node.LastNet, &node.ID, &node.Address.Address, &lastIPPort, &noise.Proto, &noise.PublicKey, &node.Address.DebounceLimit, &node.Address.Features)
if err != nil {
return nil, err
}
if lastIPPort.Valid {
node.LastIPPort = lastIPPort.String
}
node.Address.NoiseInfo = noise.Convert()
nodes[node.ID] = &node
}
return nodes, Error.Wrap(rows.Err())
}
// GetOnlineNodesForAuditRepair returns a map of nodes for the supplied nodeIDs.
func (cache *overlaycache) GetOnlineNodesForAuditRepair(ctx context.Context, nodeIDs []storj.NodeID, onlineWindow time.Duration) (nodes map[storj.NodeID]*overlay.NodeReputation, err error) {
for {
@ -322,6 +378,70 @@ func (cache *overlaycache) getOnlineNodesForAuditRepair(ctx context.Context, nod
return nodes, Error.Wrap(rows.Err())
}
// KnownOffline filters a set of nodes to offline nodes.
func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
for {
offlineNodes, err = cache.knownOffline(ctx, criteria, nodeIDs)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return offlineNodes, err
}
break
}
return offlineNodes, err
}
func (cache *overlaycache) knownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIds) == 0 {
return nil, Error.New("no ids provided")
}
// get offline nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id FROM nodes
`+cache.db.impl.AsOfSystemInterval(criteria.AsOfSystemInterval)+`
WHERE id = any($1::bytea[])
AND last_contact_success < $2
`), pgutil.NodeIDArray(nodeIds), time.Now().Add(-criteria.OnlineWindow),
)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var id storj.NodeID
err = rows.Scan(&id)
if err != nil {
return nil, err
}
offlineNodes = append(offlineNodes, id)
}
return offlineNodes, Error.Wrap(rows.Err())
}
// KnownUnreliableOrOffline filters a set of nodes to unreliable or offlines node, independent of new.
func (cache *overlaycache) KnownUnreliableOrOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
for {
badNodes, err = cache.knownUnreliableOrOffline(ctx, criteria, nodeIDs)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return badNodes, err
}
break
}
return badNodes, err
}
// GetOfflineNodesForEmail gets nodes that we want to send an email to. These are non-disqualified, non-exited nodes where
// last_contact_success is between two points: the point where it is considered offline (offlineWindow), and the point where we don't want
// to send more emails (cutoff). It also filters nodes where last_offline_email is too recent (cooldown).
@ -447,64 +567,102 @@ func (cache *overlaycache) knownReliableInExcludedCountries(ctx context.Context,
return reliableInExcluded, Error.Wrap(rows.Err())
}
// KnownReliable filters a set of nodes to reliable nodes. List is split into online and offline nodes.
func (cache *overlaycache) KnownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []overlay.SelectedNode, offline []overlay.SelectedNode, err error) {
for {
online, offline, err = cache.knownReliable(ctx, nodeIDs, onlineWindow, asOfSystemInterval)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return nil, nil, err
}
break
}
return online, offline, err
}
func (cache *overlaycache) knownReliable(ctx context.Context, nodeIDs storj.NodeIDList, onlineWindow, asOfSystemInterval time.Duration) (online []overlay.SelectedNode, offline []overlay.SelectedNode, err error) {
func (cache *overlaycache) knownUnreliableOrOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIDs storj.NodeIDList) (badNodes storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIDs) == 0 {
return nil, nil, Error.New("no ids provided")
return nil, Error.New("no ids provided")
}
err = withRows(cache.db.Query(ctx, `
SELECT id, address, last_net, last_ip_port, country_code, last_contact_success > $2 as online
FROM nodes
`+cache.db.impl.AsOfSystemInterval(asOfSystemInterval)+`
WHERE id = any($1::bytea[])
// get reliable and online nodes
var rows tagsql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`
SELECT id
FROM nodes
`+cache.db.impl.AsOfSystemInterval(criteria.AsOfSystemInterval)+`
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
`, pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow),
))(func(rows tagsql.Rows) error {
for rows.Next() {
var onlineNode bool
var node overlay.SelectedNode
node.Address = &pb.NodeAddress{}
var lastIPPort sql.NullString
err = rows.Scan(&node.ID, &node.Address.Address, &node.LastNet, &lastIPPort, &node.CountryCode, &onlineNode)
if err != nil {
return err
}
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-criteria.OnlineWindow),
)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
if lastIPPort.Valid {
node.LastIPPort = lastIPPort.String
}
if onlineNode {
online = append(online, node)
} else {
offline = append(offline, node)
}
goodNodes := make(map[storj.NodeID]struct{}, len(nodeIDs))
for rows.Next() {
var id storj.NodeID
err = rows.Scan(&id)
if err != nil {
return nil, err
}
return nil
})
goodNodes[id] = struct{}{}
}
for _, id := range nodeIDs {
if _, ok := goodNodes[id]; !ok {
badNodes = append(badNodes, id)
}
}
return badNodes, Error.Wrap(rows.Err())
}
return online, offline, Error.Wrap(err)
// KnownReliable filters a set of nodes to reliable (online and qualified) nodes.
func (cache *overlaycache) KnownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) {
for {
nodes, err = cache.knownReliable(ctx, onlineWindow, nodeIDs)
if err != nil {
if cockroachutil.NeedsRetry(err) {
continue
}
return nodes, err
}
break
}
return nodes, err
}
func (cache *overlaycache) knownReliable(ctx context.Context, onlineWindow time.Duration, nodeIDs storj.NodeIDList) (nodes []*pb.Node, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIDs) == 0 {
return nil, Error.New("no ids provided")
}
// get online nodes
rows, err := cache.db.Query(ctx, cache.db.Rebind(`
SELECT id, last_net, last_ip_port, address, protocol, noise_proto, noise_public_key, debounce_limit, features
FROM nodes
WHERE id = any($1::bytea[])
AND disqualified IS NULL
AND unknown_audit_suspended IS NULL
AND offline_suspended IS NULL
AND exit_finished_at IS NULL
AND last_contact_success > $2
`), pgutil.NodeIDArray(nodeIDs), time.Now().Add(-onlineWindow),
)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
row := &dbx.Node{}
err = rows.Scan(&row.Id, &row.LastNet, &row.LastIpPort, &row.Address, &row.Protocol, &row.NoiseProto, &row.NoisePublicKey, &row.DebounceLimit, &row.Features)
if err != nil {
return nil, err
}
node, err := convertDBNode(ctx, row)
if err != nil {
return nil, err
}
nodes = append(nodes, &node.Node)
}
return nodes, Error.Wrap(rows.Err())
}
// Reliable returns all reliable nodes.

View File

@ -418,139 +418,3 @@ func TestOverlayCache_SelectAllStorageNodesDownloadUpload(t *testing.T) {
})
}
func TestOverlayCache_KnownReliable(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache := db.OverlayCache()
allNodes := []overlay.SelectedNode{
addNode(ctx, t, cache, "online", "127.0.0.1", true, false, false, false, false),
addNode(ctx, t, cache, "offline", "127.0.0.2", false, false, false, false, false),
addNode(ctx, t, cache, "disqalified", "127.0.0.3", false, true, false, false, false),
addNode(ctx, t, cache, "audit-suspended", "127.0.0.4", false, false, true, false, false),
addNode(ctx, t, cache, "offline-suspended", "127.0.0.5", false, false, false, true, false),
addNode(ctx, t, cache, "exited", "127.0.0.6", false, false, false, false, true),
}
ids := func(nodes ...overlay.SelectedNode) storj.NodeIDList {
nodeIds := storj.NodeIDList{}
for _, node := range nodes {
nodeIds = append(nodeIds, node.ID)
}
return nodeIds
}
nodes := func(nodes ...overlay.SelectedNode) []overlay.SelectedNode {
return append([]overlay.SelectedNode{}, nodes...)
}
type testCase struct {
IDs storj.NodeIDList
Online []overlay.SelectedNode
Offline []overlay.SelectedNode
}
shuffledNodeIDs := ids(allNodes...)
rand.Shuffle(len(shuffledNodeIDs), shuffledNodeIDs.Swap)
for _, tc := range []testCase{
{
IDs: ids(allNodes[0], allNodes[1]),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
{
IDs: ids(allNodes[0]),
Online: nodes(allNodes[0]),
},
{
IDs: ids(allNodes[1]),
Offline: nodes(allNodes[1]),
},
{ // only unreliable
IDs: ids(allNodes[2], allNodes[3], allNodes[4], allNodes[5]),
},
{ // all nodes
IDs: ids(allNodes...),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
// all nodes but in shuffled order
{
IDs: shuffledNodeIDs,
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
// all nodes + one ID not from DB
{
IDs: append(ids(allNodes...), testrand.NodeID()),
Online: nodes(allNodes[0]),
Offline: nodes(allNodes[1]),
},
} {
online, offline, err := cache.KnownReliable(ctx, tc.IDs, 1*time.Hour, 0)
require.NoError(t, err)
require.ElementsMatch(t, tc.Online, online)
require.ElementsMatch(t, tc.Offline, offline)
}
_, _, err := cache.KnownReliable(ctx, storj.NodeIDList{}, 1*time.Hour, 0)
require.Error(t, err)
})
}
func addNode(ctx context.Context, t *testing.T, cache overlay.DB, address, lastIPPort string, online, disqalified, auditSuspended, offlineSuspended, exited bool) overlay.SelectedNode {
selectedNode := overlay.SelectedNode{
ID: testrand.NodeID(),
Address: &pb.NodeAddress{Address: address},
LastNet: lastIPPort,
LastIPPort: lastIPPort,
CountryCode: location.Poland,
}
checkInInfo := overlay.NodeCheckInInfo{
IsUp: true,
NodeID: selectedNode.ID,
Address: &pb.NodeAddress{Address: selectedNode.Address.Address},
LastIPPort: selectedNode.LastIPPort,
LastNet: selectedNode.LastNet,
CountryCode: selectedNode.CountryCode,
Version: &pb.NodeVersion{Version: "v0.0.0"},
}
timestamp := time.Now().UTC()
if !online {
timestamp = time.Now().Add(-10 * time.Hour)
}
err := cache.UpdateCheckIn(ctx, checkInInfo, timestamp, overlay.NodeSelectionConfig{})
require.NoError(t, err)
if disqalified {
_, err := cache.DisqualifyNode(ctx, selectedNode.ID, time.Now(), overlay.DisqualificationReasonAuditFailure)
require.NoError(t, err)
}
if auditSuspended {
require.NoError(t, cache.TestSuspendNodeUnknownAudit(ctx, selectedNode.ID, time.Now()))
}
if offlineSuspended {
require.NoError(t, cache.TestSuspendNodeOffline(ctx, selectedNode.ID, time.Now()))
}
if exited {
now := time.Now()
_, err = cache.UpdateExitStatus(ctx, &overlay.ExitStatusRequest{
NodeID: selectedNode.ID,
ExitInitiatedAt: now,
ExitLoopCompletedAt: now,
ExitFinishedAt: now,
ExitSuccess: true,
})
require.NoError(t, err)
}
return selectedNode
}

View File

@ -5,6 +5,9 @@ package satellitedb
import (
"context"
"database/sql"
"errors"
"time"
"storj.io/common/uuid"
"storj.io/storj/satellite/console"
@ -16,11 +19,11 @@ var _ console.ProjectInvitations = (*projectInvitations)(nil)
// projectInvitations is an implementation of console.ProjectInvitations.
type projectInvitations struct {
db dbx.Methods
db *satelliteDB
}
// Upsert updates a project member invitation if it exists and inserts it otherwise.
func (invites *projectInvitations) Upsert(ctx context.Context, invite *console.ProjectInvitation) (_ *console.ProjectInvitation, err error) {
// Insert inserts a project member invitation into the database.
func (invites *projectInvitations) Insert(ctx context.Context, invite *console.ProjectInvitation) (_ *console.ProjectInvitation, err error) {
defer mon.Task()(&ctx)(&err)
if invite == nil {
@ -33,7 +36,7 @@ func (invites *projectInvitations) Upsert(ctx context.Context, invite *console.P
createFields.InviterId = dbx.ProjectInvitation_InviterId(id)
}
dbxInvite, err := invites.db.Replace_ProjectInvitation(ctx,
dbxInvite, err := invites.db.Create_ProjectInvitation(ctx,
dbx.ProjectInvitation_ProjectId(invite.ProjectID[:]),
dbx.ProjectInvitation_Email(normalizeEmail(invite.Email)),
createFields,
@ -84,6 +87,30 @@ func (invites *projectInvitations) GetByEmail(ctx context.Context, email string)
return projectInvitationSliceFromDBX(dbxInvites)
}
// Update updates the project member invitation specified by the given project ID and email address.
func (invites *projectInvitations) Update(ctx context.Context, projectID uuid.UUID, email string, request console.UpdateProjectInvitationRequest) (_ *console.ProjectInvitation, err error) {
defer mon.Task()(&ctx)(&err)
update := dbx.ProjectInvitation_Update_Fields{}
if request.CreatedAt != nil {
update.CreatedAt = dbx.ProjectInvitation_CreatedAt(*request.CreatedAt)
}
if request.InviterID != nil {
update.InviterId = dbx.ProjectInvitation_InviterId((*request.InviterID)[:])
}
dbxInvite, err := invites.db.Update_ProjectInvitation_By_ProjectId_And_Email(ctx,
dbx.ProjectInvitation_ProjectId(projectID[:]),
dbx.ProjectInvitation_Email(normalizeEmail(email)),
update,
)
if err != nil {
return nil, err
}
return projectInvitationFromDBX(dbxInvite)
}
// Delete removes a project member invitation from the database.
func (invites *projectInvitations) Delete(ctx context.Context, projectID uuid.UUID, email string) (err error) {
defer mon.Task()(&ctx)(&err)
@ -95,6 +122,81 @@ func (invites *projectInvitations) Delete(ctx context.Context, projectID uuid.UU
return err
}
// DeleteBefore deletes project member invitations created prior to some time from the database.
func (invites *projectInvitations) DeleteBefore(
ctx context.Context, before time.Time, asOfSystemTimeInterval time.Duration, pageSize int) (err error) {
defer mon.Task()(&ctx)(&err)
if pageSize <= 0 {
return Error.New("expected page size to be positive; got %d", pageSize)
}
var pageCursor, pageEnd struct {
ProjectID uuid.UUID
Email string
}
aost := invites.db.impl.AsOfSystemInterval(asOfSystemTimeInterval)
for {
// Select the ID beginning this page of records
err := invites.db.QueryRowContext(ctx, `
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) > ($1, $2) AND created_at < $3
ORDER BY (project_id, email) LIMIT 1
`, pageCursor.ProjectID, pageCursor.Email, before).Scan(&pageCursor.ProjectID, &pageCursor.Email)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil
}
return Error.Wrap(err)
}
// Select the ID ending this page of records
err = invites.db.QueryRowContext(ctx, `
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) > ($1, $2)
ORDER BY (project_id, email) LIMIT 1 OFFSET $3
`, pageCursor.ProjectID, pageCursor.Email, pageSize).Scan(&pageEnd.ProjectID, &pageEnd.Email)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return Error.Wrap(err)
}
// Since this is the last page, we want to return all remaining records
_, err = invites.db.ExecContext(ctx, `
DELETE FROM project_invitations
WHERE (project_id, email) IN (
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) >= ($1, $2)
AND created_at < $3
ORDER BY (project_id, email)
)
`, pageCursor.ProjectID, pageCursor.Email, before)
return Error.Wrap(err)
}
// Delete all old, unverified records in the range between the beginning and ending IDs
_, err = invites.db.ExecContext(ctx, `
DELETE FROM project_invitations
WHERE (project_id, email) IN (
SELECT project_id, email FROM project_invitations
`+aost+`
WHERE (project_id, email) >= ($1, $2)
AND (project_id, email) <= ($3, $4)
AND created_at < $5
ORDER BY (project_id, email)
)
`, pageCursor.ProjectID, pageCursor.Email, pageEnd.ProjectID, pageEnd.Email, before)
if err != nil {
return Error.Wrap(err)
}
// Advance the cursor to the next page
pageCursor = pageEnd
}
}
// projectInvitationFromDBX converts a project member invitation from the database to a *console.ProjectInvitation.
func projectInvitationFromDBX(dbxInvite *dbx.ProjectInvitation) (_ *console.ProjectInvitation, err error) {
if dbxInvite == nil {

View File

@ -50,7 +50,7 @@ func TestProjectInvitations(t *testing.T) {
if !t.Run("insert invitations", func(t *testing.T) {
// Expect failure because no user with inviterID exists.
_, err = invitesDB.Upsert(ctx, invite)
_, err = invitesDB.Insert(ctx, invite)
require.Error(t, err)
_, err = db.Console().Users().Insert(ctx, &console.User{
@ -59,15 +59,19 @@ func TestProjectInvitations(t *testing.T) {
})
require.NoError(t, err)
invite, err = invitesDB.Upsert(ctx, invite)
invite, err = invitesDB.Insert(ctx, invite)
require.NoError(t, err)
require.WithinDuration(t, time.Now(), invite.CreatedAt, time.Minute)
require.Equal(t, projID, invite.ProjectID)
require.Equal(t, strings.ToUpper(email), invite.Email)
inviteSameEmail, err = invitesDB.Upsert(ctx, inviteSameEmail)
// Duplicate invitations should be rejected.
_, err = invitesDB.Insert(ctx, invite)
require.Error(t, err)
inviteSameEmail, err = invitesDB.Insert(ctx, inviteSameEmail)
require.NoError(t, err)
inviteSameProject, err = invitesDB.Upsert(ctx, inviteSameProject)
inviteSameProject, err = invitesDB.Insert(ctx, inviteSameProject)
require.NoError(t, err)
}) {
// None of the following subtests will pass if invitation insertion failed.
@ -122,19 +126,22 @@ func TestProjectInvitations(t *testing.T) {
t.Run("update invitation", func(t *testing.T) {
ctx := testcontext.New(t)
req := console.UpdateProjectInvitationRequest{}
newCreatedAt := invite.CreatedAt.Add(time.Hour)
req.CreatedAt = &newCreatedAt
newInvite, err := invitesDB.Update(ctx, projID, email, req)
require.NoError(t, err)
require.Equal(t, newCreatedAt, newInvite.CreatedAt)
inviter, err := db.Console().Users().Insert(ctx, &console.User{
ID: testrand.UUID(),
PasswordHash: testrand.Bytes(8),
})
require.NoError(t, err)
invite.InviterID = &inviter.ID
oldCreatedAt := invite.CreatedAt
invite, err = invitesDB.Upsert(ctx, invite)
req.InviterID = &inviter.ID
newInvite, err = invitesDB.Update(ctx, projID, email, req)
require.NoError(t, err)
require.Equal(t, inviter.ID, *invite.InviterID)
require.True(t, invite.CreatedAt.After(oldCreatedAt))
require.Equal(t, inviter.ID, *newInvite.InviterID)
})
t.Run("delete invitation", func(t *testing.T) {
@ -162,3 +169,45 @@ func TestProjectInvitations(t *testing.T) {
})
})
}
func TestDeleteBefore(t *testing.T) {
maxAge := time.Hour
now := time.Now()
expiration := now.Add(-maxAge)
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
invitesDB := db.Console().ProjectInvitations()
// Only positive page sizes should be allowed.
require.Error(t, invitesDB.DeleteBefore(ctx, time.Time{}, 0, 0))
require.Error(t, invitesDB.DeleteBefore(ctx, time.Time{}, 0, -1))
createInvite := func() *console.ProjectInvitation {
projID := testrand.UUID()
_, err := db.Console().Projects().Insert(ctx, &console.Project{ID: projID})
require.NoError(t, err)
invite, err := invitesDB.Insert(ctx, &console.ProjectInvitation{ProjectID: projID})
require.NoError(t, err)
return invite
}
newInvite := createInvite()
oldInvite := createInvite()
oldCreatedAt := expiration.Add(-time.Second)
oldInvite, err := invitesDB.Update(ctx, oldInvite.ProjectID, oldInvite.Email, console.UpdateProjectInvitationRequest{
CreatedAt: &oldCreatedAt,
})
require.NoError(t, err)
require.NoError(t, invitesDB.DeleteBefore(ctx, expiration, 0, 1))
// Ensure that the old invitation record was deleted and the other remains.
_, err = invitesDB.Get(ctx, oldInvite.ProjectID, oldInvite.Email)
require.ErrorIs(t, err, sql.ErrNoRows)
_, err = invitesDB.Get(ctx, newInvite.ProjectID, newInvite.Email)
require.NoError(t, err)
})
}

View File

@ -36,7 +36,7 @@ func TestGetPagedWithInvitationsByProjectID(t *testing.T) {
_, err = db.Console().ProjectMembers().Insert(ctx, memberUser.ID, projectID)
require.NoError(t, err)
_, err = db.Console().ProjectInvitations().Upsert(ctx, &console.ProjectInvitation{
_, err = db.Console().ProjectInvitations().Insert(ctx, &console.ProjectInvitation{
ProjectID: projectID,
Email: "bob@mail.test",
})

View File

@ -5,8 +5,6 @@ package satellitedb
import (
"context"
"database/sql"
"errors"
"time"
"storj.io/common/uuid"
@ -18,7 +16,7 @@ import (
var _ consoleauth.WebappSessions = (*webappSessions)(nil)
type webappSessions struct {
db *satelliteDB
db dbx.Methods
}
// Create creates a webapp session and returns the session info.
@ -93,75 +91,6 @@ func (db *webappSessions) DeleteAllByUserID(ctx context.Context, userID uuid.UUI
return db.db.Delete_WebappSession_By_UserId(ctx, dbx.WebappSession_UserId(userID.Bytes()))
}
// DeleteExpired deletes all sessions that have expired before the provided timestamp.
func (db *webappSessions) DeleteExpired(ctx context.Context, now time.Time, asOfSystemTimeInterval time.Duration, pageSize int) (err error) {
defer mon.Task()(&ctx)(&err)
if pageSize <= 0 {
return Error.New("expected page size to be positive; got %d", pageSize)
}
var pageCursor, pageEnd uuid.UUID
aost := db.db.impl.AsOfSystemInterval(asOfSystemTimeInterval)
for {
// Select the ID beginning this page of records
err := db.db.QueryRowContext(ctx, `
SELECT id FROM webapp_sessions
`+aost+`
WHERE id > $1 AND expires_at < $2
ORDER BY id LIMIT 1
`, pageCursor, now).Scan(&pageCursor)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil
}
return Error.Wrap(err)
}
// Select the ID ending this page of records
err = db.db.QueryRowContext(ctx, `
SELECT id FROM webapp_sessions
`+aost+`
WHERE id > $1
ORDER BY id LIMIT 1 OFFSET $2
`, pageCursor, pageSize).Scan(&pageEnd)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return Error.Wrap(err)
}
// Since this is the last page, we want to return all remaining records
_, err = db.db.ExecContext(ctx, `
DELETE FROM webapp_sessions
WHERE id IN (
SELECT id FROM webapp_sessions
`+aost+`
WHERE id >= $1 AND expires_at < $2
ORDER BY id
)
`, pageCursor, now)
return Error.Wrap(err)
}
// Delete all expired records in the range between the beginning and ending IDs
_, err = db.db.ExecContext(ctx, `
DELETE FROM webapp_sessions
WHERE id IN (
SELECT id FROM webapp_sessions
`+aost+`
WHERE id BETWEEN $1 AND $2
AND expires_at < $3
ORDER BY id
)
`, pageCursor, pageEnd, now)
if err != nil {
return Error.Wrap(err)
}
// Advance the cursor to the next page
pageCursor = pageEnd
}
}
func getSessionFromDBX(dbxSession *dbx.WebappSession) (consoleauth.WebappSession, error) {
id, err := uuid.FromBytes(dbxSession.Id)
if err != nil {

View File

@ -4,7 +4,6 @@
package satellitedb_test
import (
"database/sql"
"testing"
"time"
@ -187,26 +186,3 @@ func TestWebappSessionsDeleteAllByUserID(t *testing.T) {
require.Len(t, allSessions, 0)
})
}
func TestDeleteExpired(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
sessionsDB := db.Console().WebappSessions()
now := time.Now()
// Only positive page sizes should be allowed.
require.Error(t, sessionsDB.DeleteExpired(ctx, time.Time{}, 0, 0))
require.Error(t, sessionsDB.DeleteExpired(ctx, time.Time{}, 0, -1))
newSession, err := sessionsDB.Create(ctx, testrand.UUID(), testrand.UUID(), "", "", now.Add(time.Second))
require.NoError(t, err)
oldSession, err := sessionsDB.Create(ctx, testrand.UUID(), testrand.UUID(), "", "", now.Add(-time.Second))
require.NoError(t, err)
require.NoError(t, sessionsDB.DeleteExpired(ctx, now, 0, 1))
// Ensure that the old session record was deleted and the other remains.
_, err = sessionsDB.GetBySessionID(ctx, oldSession.ID)
require.ErrorIs(t, err, sql.ErrNoRows)
_, err = sessionsDB.GetBySessionID(ctx, newSession.ID)
require.NoError(t, err)
})
}

View File

@ -145,6 +145,9 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
# interval between chore cycles
# console-db-cleanup.interval: 24h0m0s
# maximum lifetime of project member invitation records
# console-db-cleanup.max-project-invitation-age: 168h0m0s
# maximum lifetime of unverified user account records
# console-db-cleanup.max-unverified-user-age: 168h0m0s
@ -626,7 +629,7 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# metainfo.max-commit-interval: 48h0m0s
# maximum encrypted object key length
# metainfo.max-encrypted-object-key-length: 2000
# metainfo.max-encrypted-object-key-length: 1750
# maximum inline segment size
# metainfo.max-inline-segment-size: 4.0 KiB
@ -886,9 +889,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# price user should pay for storage per month in dollars/TB
# payments.usage-price.storage-tb: "4"
# whether to enable piece tracker observer with ranged loop
# piece-tracker.use-ranged-loop: true
# how often to remove unused project bandwidth rollups
# project-bw-cleanup.interval: 24h0m0s

View File

@ -72,8 +72,6 @@ func TestUploadAndPartialDownload(t *testing.T) {
}()
}
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
var totalBandwidthUsage bandwidth.Usage
for _, storagenode := range planet.StorageNodes {
usage, err := storagenode.DB.Bandwidth().Summary(ctx, time.Now().Add(-10*time.Hour), time.Now().Add(10*time.Hour))
@ -193,8 +191,6 @@ func TestUpload(t *testing.T) {
}
}
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
from, to := date.MonthBoundary(time.Now().UTC())
summary, err := planet.StorageNodes[0].DB.Bandwidth().SatelliteIngressSummary(ctx, planet.Satellites[0].ID(), from, to)
require.NoError(t, err)

View File

@ -10,10 +10,10 @@ require (
go.uber.org/zap v1.21.0
golang.org/x/sync v0.1.0
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/private v0.0.0-20230614131149-2ffd1635adea
storj.io/storj v1.63.1
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33
)
require (

View File

@ -1256,9 +1256,9 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0 h1:pSfGf9E9OlUd17W7LSpL4tTONIyFji6dz8I2iTDd8BY=
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0/go.mod h1:5nLgAOl1KTDVyqORAhvrp+167PtShEuS1L3pJgXPjwo=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=

View File

@ -12,7 +12,7 @@ require (
go.uber.org/zap v1.23.0
storj.io/common v0.0.0-20230602145716-d6ea82d58b3d
storj.io/gateway-mt v1.51.1-0.20230417204402-7d9bb25bc297
storj.io/private v0.0.0-20230627140631-807a2f00d0e1
storj.io/private v0.0.0-20230614131149-2ffd1635adea
storj.io/storj v0.12.1-0.20221125175451-ef4b564b82f7
)
@ -223,5 +223,5 @@ require (
storj.io/minio v0.0.0-20230118205046-c025fcc9eef3 // indirect
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 // indirect
storj.io/picobuf v0.0.1 // indirect
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 // indirect
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 // indirect
)

View File

@ -1974,8 +1974,8 @@ storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41 h1:SVuEocEhZfFc13J1Aml
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41/go.mod h1:iK+dmHZZXQlW7ahKdNSOo+raMk5BDL2wbD62FIeXLWs=
storj.io/picobuf v0.0.1 h1:ekEvxSQCbEjTVIi/qxj2za13SJyfRE37yE30IBkZeT0=
storj.io/picobuf v0.0.1/go.mod h1:7ZTAMs6VesgTHbbhFU79oQ9hDaJ+MD4uoFQZ1P4SEz0=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1 h1:O2+Xjq8H4TKad2cnhvjitK3BtwkGtJ2TfRCHOIN8e7w=
storj.io/private v0.0.0-20230627140631-807a2f00d0e1/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2 h1:XnJR9egrqvAqx5oCRu2b13ubK0iu0qTX12EAa6lAPhg=
storj.io/uplink v1.10.1-0.20230626081029-035890d408c2/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
storj.io/private v0.0.0-20230614131149-2ffd1635adea h1:/dv0bYRPgCFvoXF0S14Ien41i12sj9+s4aKhCrFzXHg=
storj.io/private v0.0.0-20230614131149-2ffd1635adea/go.mod h1:mfdHEaAcTARpd4/Hc6N5uxwB1ZG3jtPdVlle57xzQxQ=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33 h1:A6z1FOmqqh44BI/UOPwTi0qaM+/Hdpiwk3QAuvWf03g=
storj.io/uplink v1.10.1-0.20230607180240-72bcffbeac33/go.mod h1:cDlpDWGJykXfYE7NtO1EeArGFy12K5Xj8pV8ufpUCKE=
storj.io/zipper v0.0.0-20220124122551-2ac2d53a46f6 h1:vJQmb+uAiYn8hVfkhMl6OqjnUyMWSCPnkzW8IsjF8vE=

View File

@ -17,17 +17,13 @@ export class AnalyticsHttpApi {
* Does not throw any errors so that expected UI behavior is not interrupted if the API call fails.
*
* @param eventName - name of the event
* @param props - additional properties to send with the event
*/
public async eventTriggered(eventName: string, props?: {[p:string]:string}): Promise<void> {
public async eventTriggered(eventName: string): Promise<void> {
try {
const path = `${this.ROOT_PATH}/event`;
const body = {
eventName: eventName,
};
if (props) {
body['props'] = props;
}
const response = await this.http.post(path, JSON.stringify(body));
if (response.ok) {
return;

View File

@ -15,6 +15,7 @@ import {
} from '@/types/users';
import { HttpClient } from '@/utils/httpClient';
import { ErrorTokenExpired } from '@/api/errors/ErrorTokenExpired';
import { Duration } from '@/utils/time';
/**
* AuthHttpApi is a console Auth API.
@ -172,7 +173,6 @@ export class AuthHttpApi implements UsersApi {
userResponse.partner,
userResponse.password,
userResponse.projectLimit,
userResponse.projectStorageLimit,
userResponse.paidTier,
userResponse.isMFAEnabled,
userResponse.isProfessional,

View File

@ -107,22 +107,6 @@ export class ProjectMembersApiGql extends BaseGql implements ProjectMembersApi {
throw new Error(result.error || 'Failed to send project invitations');
}
/**
* Get invite link for the specified project and email.
*
* @throws Error
*/
public async getInviteLink(projectID: string, email: string): Promise<string> {
const path = `${this.ROOT_PATH}/${projectID}/invite-link?email=${email}`;
const httpResponse = await this.http.get(path);
if (httpResponse.ok) {
return await httpResponse.json();
}
throw new Error('Can not get invite link');
}
/**
* Method for mapping project members page from json to ProjectMembersPage type.
*

View File

@ -110,7 +110,12 @@
<div class="access-grants__header-container">
<h3 class="access-grants__header-container__title">My Accesses</h3>
<div class="access-grants__header-container__divider" />
<VSearch :search="fetch" />
<VHeader
class="access-header-component"
placeholder="Accesses"
:search="fetch"
style-type="access"
/>
</div>
<VLoader v-if="areGrantsFetching" width="100px" height="100px" class="grants-loader" />
<div class="access-grants-items">
@ -170,8 +175,8 @@ import { MODALS } from '@/utils/constants/appStatePopUps';
import AccessGrantsItem from '@/components/accessGrants/AccessGrantsItem.vue';
import VButton from '@/components/common/VButton.vue';
import VLoader from '@/components/common/VLoader.vue';
import VHeader from '@/components/common/VHeader.vue';
import VTable from '@/components/common/VTable.vue';
import VSearch from '@/components/common/VSearch.vue';
import AccessGrantsIcon from '@/../static/images/accessGrants/accessGrantsIcon.svg';
import CLIIcon from '@/../static/images/accessGrants/cli.svg';
@ -460,6 +465,10 @@ onBeforeUnmount(() => {
.access-grants-items {
padding-bottom: 55px;
@media screen and (width <= 1150px) {
margin-top: -45px;
}
&__content {
margin-top: 20px;
}
@ -496,7 +505,12 @@ onBeforeUnmount(() => {
height: 1px;
width: auto;
background-color: #dadfe7;
margin: 13px 0 16px;
margin-top: 10px;
}
&__access-header-component {
height: 55px !important;
margin-top: 15px;
}
}
}

View File

@ -16,7 +16,7 @@
</div>
<div class="blured-container__wrap" :class="{justify: !isMnemonic}">
<p v-if="isMnemonic" tabindex="0" class="blured-container__wrap__mnemonic" @keyup.space="onCopy">{{ value }}</p>
<p v-else tabindex="0" class="blured-container__wrap__text" :class="{ shown: isValueShown }" @keyup.space="onCopy">{{ value }}</p>
<p v-else tabindex="0" class="blured-container__wrap__text" @keyup.space="onCopy">{{ value }}</p>
<div
v-if="!isMnemonic"
tabindex="0"
@ -135,12 +135,12 @@ function onCopy(): void {
&__text {
font-size: 14px;
line-height: 20px;
color: var(--c-grey-7);
margin-right: 16px;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
line-height: 24px;
margin-right: 16px;
}
&__copy {
@ -160,14 +160,6 @@ function onCopy(): void {
}
}
.shown {
white-space: unset;
text-overflow: unset;
overflow-wrap: break-word;
text-align: left;
font-family: 'Courier', monospace;
}
.justify {
justify-content: space-between;
}

View File

@ -145,7 +145,7 @@ import EndDateSelection from '@/components/accessGrants/createFlow/components/En
import Toggle from '@/components/accessGrants/createFlow/components/Toggle.vue';
import VButton from '@/components/common/VButton.vue';
import SearchIcon from '@/../static/images/common/search.svg';
import SearchIcon from '@/../static/images/accessGrants/newCreateFlow/search.svg';
import CloseIcon from '@/../static/images/accessGrants/newCreateFlow/close.svg';
const props = withDefaults(defineProps<{

View File

@ -1,62 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="dropzone" @mouseout="close" @mouseleave="close" @dragleave.self="close">
<div class="dropzone__message">
<p class="dropzone__message__text">
Drop your files to put it into the {{ bucket }} bucket.
</p>
</div>
<p class="dropzone__info">Drag and drop files here to upload</p>
</div>
</template>
<script setup lang="ts">
const props = defineProps<{
bucket: string
close: () => void
}>();
</script>
<style scoped lang="scss">
.dropzone {
z-index: 1;
position: fixed;
inset: 0;
background: rgb(0 0 0 / 35%);
border: 1px dashed var(--c-white);
display: flex;
align-items: center;
justify-content: center;
&__message {
padding: 10px 24px;
background: var(--c-green-1);
border: 1px solid var(--c-green-5);
border-radius: 8px;
position: absolute;
top: 24px;
pointer-events: none;
&__text {
font-family: 'font_medium', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-green-5);
text-align: center;
}
}
&__info {
font-family: 'font_bold', sans-serif;
font-size: 40px;
line-height: 50px;
text-align: center;
max-width: 380px;
color: var(--c-white);
text-shadow: 0 7px 20px 0 rgb(0 0 0 / 15%);
pointer-events: none;
}
}
</style>

View File

@ -9,10 +9,8 @@
v-cloak
class="div-responsive"
@drop.prevent="upload"
@dragover.prevent="showDropzone"
@dragover.prevent
>
<Dropzone v-if="isOver" :bucket="bucketName" :close="hideDropzone" />
<bread-crumbs @onUpdate="onRouteChange" @bucketClick="goToBuckets" />
<div class="tile-action-bar">
@ -93,14 +91,8 @@
<div class="hr-divider" />
<MultiplePassphraseBanner
v-if="lockedFilesEntryDisplayed && isLockedBanner"
:locked-files-count="lockedFilesCount"
:on-close="closeLockedBanner"
/>
<TooManyObjectsBanner
v-if="files.length >= NUMBER_OF_DISPLAYED_OBJECTS && isTooManyObjectsBanner"
:on-close="closeTooManyObjectsBanner"
v-if="lockedFilesNumber > 0 && isBannerShown && !fetchingFilesSpinner && !currentPath"
:on-close="closeBanner"
/>
<v-table items-label="objects" :total-items-count="files.length" selectable :selected="allFilesSelected" show-select class="file-browser-table" @selectAllClicked="toggleSelectAllFiles">
@ -226,9 +218,7 @@ import VButton from '@/components/common/VButton.vue';
import BucketSettingsNav from '@/components/objects/BucketSettingsNav.vue';
import VTable from '@/components/common/VTable.vue';
import MultiplePassphraseBanner from '@/components/browser/MultiplePassphrasesBanner.vue';
import TooManyObjectsBanner from '@/components/browser/TooManyObjectsBanner.vue';
import UpEntry from '@/components/browser/UpEntry.vue';
import Dropzone from '@/components/browser/Dropzone.vue';
import FileIcon from '@/../static/images/objects/file.svg';
import BlackArrowExpand from '@/../static/images/common/BlackArrowExpand.svg';
@ -248,9 +238,7 @@ const fileInput = ref<HTMLInputElement>();
const fetchingFilesSpinner = ref<boolean>(false);
const isUploadDropDownShown = ref<boolean>(false);
const isLockedBanner = ref<boolean>(true);
const isTooManyObjectsBanner = ref<boolean>(true);
const isOver = ref<boolean>(false);
const isBannerShown = ref<boolean>(true);
/**
* Retrieve the pathMatch from the current route.
*/
@ -297,7 +285,7 @@ const currentPath = computed((): string => {
/**
* Return locked files number.
*/
const lockedFilesCount = computed((): number => {
const lockedFilesNumber = computed((): number => {
const ownObjectsCount = obStore.state.objectsCount;
return objectsCount.value - ownObjectsCount;
@ -317,7 +305,7 @@ const objectsCount = computed((): number => {
* Indicates if locked files entry is displayed.
*/
const lockedFilesEntryDisplayed = computed((): boolean => {
return lockedFilesCount.value > 0 &&
return lockedFilesNumber.value > 0 &&
objectsCount.value <= NUMBER_OF_DISPLAYED_OBJECTS &&
!fetchingFilesSpinner.value &&
!currentPath.value;
@ -400,15 +388,8 @@ const bucket = computed((): string => {
/**
* Closes multiple passphrase banner.
*/
function closeLockedBanner(): void {
isLockedBanner.value = false;
}
/**
* Closes too many objects banner.
*/
function closeTooManyObjectsBanner(): void {
isTooManyObjectsBanner.value = false;
function closeBanner(): void {
isBannerShown.value = false;
}
function calculateRoutePath(): string {
@ -461,12 +442,8 @@ function filename(file: BrowserObject): string {
* Upload the current selected or dragged-and-dropped file.
*/
async function upload(e: Event): Promise<void> {
if (isOver.value) {
isOver.value = false;
}
await obStore.upload({ e });
analytics.eventTriggered(AnalyticsEvent.OBJECT_UPLOADED);
await analytics.eventTriggered(AnalyticsEvent.OBJECT_UPLOADED);
const target = e.target as HTMLInputElement;
target.value = '';
}
@ -517,20 +494,6 @@ function toggleUploadDropdown(): void {
isUploadDropDownShown.value = !isUploadDropDownShown.value;
}
/**
* Makes dropzone visible.
*/
function showDropzone(): void {
isOver.value = true;
}
/**
* Hides dropzone.
*/
function hideDropzone(): void {
isOver.value = false;
}
/**
* Closes upload options dropdown.
*/

View File

@ -283,4 +283,5 @@ function cancelDeleteSelection(): void {
}
}
}
</style>

View File

@ -419,13 +419,7 @@ function openDropdown(): void {
async function download(): Promise<void> {
try {
await obStore.download(props.file);
const message = `
<p class="message-title">Downloading...</p>
<p class="message-info">
Keep this download link private.<br>If you want to share, use the Share option.
</p>
`;
notify.success('', message);
notify.warning('Do not share download link with other people. If you want to share this data better use "Share" option.');
} catch (error) {
notify.error('Can not download your file', AnalyticsErrorEventSource.FILE_BROWSER_ENTRY);
}
@ -506,15 +500,7 @@ function cancelDeletion(): void {
}
.dropdown-item.action.p-3.action {
font-family: 'font_regular', sans-serif;
}
&:first-of-type {
border-radius: 6px 6px 0 0;
}
&:last-of-type {
border-radius: 0 0 6px 6px;
font-family: 'Inter', sans-serif;
}
&__label {

View File

@ -6,15 +6,23 @@
<div class="banner__left">
<LockedIcon class="banner__left__icon" />
<div class="banner__left__labels">
<h2 class="banner__left__labels__title">
You have at least {{ lockedFilesCount }} object{{ lockedFilesCount > 1 ? 's' : '' }} locked with a
different passphrase.
</h2>
<p class="banner__left__labels__subtitle">Enter your other passphrase to access these files.</p>
<template v-if="objectsCount <= NUMBER_OF_DISPLAYED_OBJECTS">
<h2 class="banner__left__labels__title">
You have at least {{ lockedFilesNumber }} object{{ lockedFilesNumber > 1 ? 's' : '' }} locked with a
different passphrase.
</h2>
<p class="banner__left__labels__subtitle">Enter your other passphrase to access these files.</p>
</template>
<template v-else>
<h2 class="banner__left__labels__title">
Due to the number of objects you have uploaded to this bucket, {{ lockedFilesNumber }} files are
not displayed.
</h2>
</template>
</div>
</div>
<div class="banner__right">
<p class="banner__right__unlock" @click="openManageModal">
<p v-if="objectsCount <= NUMBER_OF_DISPLAYED_OBJECTS" class="banner__right__unlock" @click="openManageModal">
Unlock now
</p>
<CloseIcon class="banner__right__close" @click="onClose" />
@ -23,19 +31,48 @@
</template>
<script setup lang="ts">
import { computed } from 'vue';
import { Bucket } from '@/types/buckets';
import { ManageProjectPassphraseStep } from '@/types/managePassphrase';
import { MODALS } from '@/utils/constants/appStatePopUps';
import { useAppStore } from '@/store/modules/appStore';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { useObjectBrowserStore } from '@/store/modules/objectBrowserStore';
import LockedIcon from '@/../static/images/browser/locked.svg';
import CloseIcon from '@/../static/images/browser/close.svg';
const props = defineProps<{
lockedFilesCount: number
onClose: () => void
}>();
const props = withDefaults(defineProps<{
onClose?: () => void;
}>(), {
onClose: () => {},
});
const appStore = useAppStore();
const bucketsStore = useBucketsStore();
const obStore = useObjectBrowserStore();
const NUMBER_OF_DISPLAYED_OBJECTS = 1000;
/**
* Returns locked files number.
*/
const lockedFilesNumber = computed((): number => {
const ownObjectsCount = obStore.state.objectsCount;
return objectsCount.value - ownObjectsCount;
});
/**
* Returns bucket objects count from store.
*/
const objectsCount = computed((): number => {
const name: string = obStore.state.bucket;
const data: Bucket | undefined = bucketsStore.state.page.buckets.find((bucket: Bucket) => bucket.name === name);
return data?.objectCount || 0;
});
/**
* Opens switch passphrase modal.

View File

@ -1,79 +0,0 @@
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="banner">
<div class="banner__left">
<LockedIcon class="banner__left__icon" />
<p class="banner__left__title">
Due to the number of objects you have uploaded, some files may not be displayed.
To list all objects you can use
<a
class="banner__left__title__link"
href="https://docs.storj.io/dcs/getting-started/quickstart-uplink-cli/prerequisites"
target="_blank"
rel="noopener noreferrer"
>
Uplink CLI
</a>
tool.
</p>
</div>
<CloseIcon class="banner__close" @click="onClose" />
</div>
</template>
<script setup lang="ts">
import LockedIcon from '@/../static/images/browser/locked.svg';
import CloseIcon from '@/../static/images/browser/close.svg';
const props = defineProps<{
onClose: () => void
}>();
</script>
<style scoped lang="scss">
.banner {
padding: 16px;
background: #fec;
border: 1px solid var(--c-yellow-2);
box-shadow: 0 7px 20px rgb(0 0 0 / 15%);
border-radius: 10px;
display: flex;
align-items: center;
justify-content: space-between;
font-family: 'font_regular', sans-serif;
margin-bottom: 21px;
&__left {
display: flex;
align-items: center;
margin-right: 15px;
&__icon {
margin-right: 16px;
min-width: 32px;
}
&__title {
font-family: 'font_bold', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-black);
&__link {
color: var(--c-blue-3);
&:visited {
color: var(--c-blue-3);
}
}
}
}
&__close {
min-width: 12px;
cursor: pointer;
}
}
</style>

View File

@ -2,28 +2,18 @@
// See LICENSE for copying information.
<template>
<VInfo>
<template #icon>
<div class="button-icon" :class="{ active: isActive }" @click="onPress">
<component :is="icon" />
</div>
</template>
<template #message>
<p class="message">{{ info }}</p>
</template>
</VInfo>
<div class="button-icon" :class="{ active: isActive }" @click="onPress">
<component :is="icon" />
</div>
</template>
<script setup lang="ts">
import { Component } from 'vue';
import VInfo from '@/components/common/VInfo.vue';
const props = withDefaults(defineProps<{
isActive?: boolean
icon: string
onPress: () => void
info: string
}>(), {
isActive: false,
});
@ -38,41 +28,9 @@ const props = withDefaults(defineProps<{
align-items: center;
justify-content: center;
cursor: pointer;
&:hover {
background: rgb(255 255 255 / 10%);
}
}
.active {
background: rgb(255 255 255 / 10%);
}
.message {
font-family: 'font_medium', sans-serif;
font-size: 12px;
line-height: 18px;
color: var(--c-white);
white-space: nowrap;
}
:deep(.info__box) {
width: auto;
cursor: default;
top: 100%;
left: 50%;
}
:deep(.info__box__message) {
background: var(--c-grey-6);
border-radius: 4px;
padding: 10px 8px;
}
:deep(.info__box__arrow) {
background: var(--c-grey-6);
width: 10px;
height: 10px;
margin-bottom: -3px;
}
</style>

View File

@ -3,7 +3,7 @@
<template>
<Teleport to="#app">
<div ref="viewContainer" class="gallery" tabindex="0" @keydown.esc="closeModal" @keydown.right="onNext" @keydown.left="onPrevious">
<div ref="viewContainer" class="gallery" tabindex="0" @keydown.esc="closeModal">
<div class="gallery__header">
<LogoIcon class="gallery__header__logo" />
<SmallLogoIcon class="gallery__header__small-logo" />
@ -19,33 +19,13 @@
:icon="DotsIcon"
:on-press="toggleDropdown"
:is-active="isOptionsDropdown === true"
info="More"
/>
<ButtonIcon
class="gallery__header__functional__item"
:icon="MapIcon"
:on-press="() => setActiveModal(DistributionModal)"
info="Geographic Distribution"
/>
<ButtonIcon
:icon="DownloadIcon"
:on-press="download"
info="Download"
/>
<ButtonIcon
class="gallery__header__functional__item"
:icon="ShareIcon"
:on-press="() => setActiveModal(ShareModal)"
info="Share"
/>
<ButtonIcon
:icon="CloseIcon"
:on-press="closeModal"
info="Close"
/>
<ButtonIcon :icon="MapIcon" :on-press="() => setActiveModal(DistributionModal)" />
<ButtonIcon class="gallery__header__functional__item" :icon="DownloadIcon" :on-press="download" />
<ButtonIcon class="gallery__header__functional__item" :icon="ShareIcon" :on-press="() => setActiveModal(ShareModal)" />
<ButtonIcon :icon="CloseIcon" :on-press="closeModal" />
<OptionsDropdown
v-if="isOptionsDropdown"
:on-distribution="() => setActiveModal(DistributionModal)"
:on-view-details="() => setActiveModal(DetailsModal)"
:on-download="download"
:on-share="() => setActiveModal(ShareModal)"
@ -116,11 +96,10 @@ import { Component, computed, onBeforeMount, onMounted, ref, Teleport, watch } f
import { useRoute } from 'vue-router';
import prettyBytes from 'pretty-bytes';
import { BrowserObject, PreviewCache, useObjectBrowserStore } from '@/store/modules/objectBrowserStore';
import { BrowserObject, useObjectBrowserStore } from '@/store/modules/objectBrowserStore';
import { AnalyticsErrorEventSource } from '@/utils/constants/analyticsEventNames';
import { useAppStore } from '@/store/modules/appStore';
import { useNotify } from '@/utils/hooks';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { RouteConfig } from '@/types/router';
import ButtonIcon from '@/components/browser/galleryView/ButtonIcon.vue';
@ -146,7 +125,6 @@ import ArrowIcon from '@/../static/images/browser/galleryView/arrow.svg';
const appStore = useAppStore();
const obStore = useObjectBrowserStore();
const bucketsStore = useBucketsStore();
const notify = useNotify();
const route = useRoute();
@ -161,13 +139,6 @@ const objectPreviewUrl = ref<string>('');
const folderType = 'folder';
/**
* Returns object preview URLs cache from store.
*/
const cachedObjectPreviewURLs = computed((): Map<string, PreviewCache> => {
return obStore.state.cachedObjectPreviewURLs;
});
/**
* Retrieve the file object that the modal is set to from the store.
*/
@ -203,13 +174,6 @@ const extension = computed((): string | undefined => {
return filePath.value.split('.').pop();
});
/**
* Returns bucket name from store.
*/
const bucket = computed((): string => {
return bucketsStore.state.fileComponentBucketName;
});
/**
* Check to see if the current file is an image file.
*/
@ -279,9 +243,6 @@ async function fetchPreviewAndMapUrl(): Promise<void> {
return;
}
const encodedPath = encodeURIComponent(`${bucket.value}/${filePath.value.trim()}`);
obStore.cacheObjectPreviewURL(encodedPath, { url, lastModified: file.value.LastModified.getTime() });
objectMapUrl.value = `${url}?map=1`;
objectPreviewUrl.value = `${url}?view=1`;
isLoading.value = false;
@ -292,6 +253,7 @@ async function fetchPreviewAndMapUrl(): Promise<void> {
*/
async function onDelete(): Promise<void> {
try {
const objectsCount = obStore.sortedFiles.length;
let newFile: BrowserObject | undefined = obStore.sortedFiles[fileIndex.value + 1];
if (!newFile || newFile.type === folderType) {
newFile = obStore.sortedFiles.find(f => f.type !== folderType && f.Key !== file.value.Key);
@ -320,13 +282,7 @@ async function onDelete(): Promise<void> {
async function download(): Promise<void> {
try {
await obStore.download(file.value);
const message = `
<p class="message-title">Downloading...</p>
<p class="message-info">
Keep this download link private.<br>If you want to share, use the Share option.
</p>
`;
notify.success('', message);
notify.warning('Do not share download link with other people. If you want to share this data better use "Share" option.');
} catch (error) {
notify.error('Can not download your file', AnalyticsErrorEventSource.OBJECT_DETAILS_MODAL);
}
@ -405,41 +361,11 @@ function setNewObjectPath(objectKey: string): void {
obStore.setObjectPathForModal(`${currentPath.value}${objectKey}`);
}
/**
* Loads object URL from cache or generates new URL.
*/
function processFilePath(): void {
const url = findCachedURL();
if (!url) {
fetchPreviewAndMapUrl();
return;
}
objectMapUrl.value = `${url}?map=1`;
objectPreviewUrl.value = `${url}?view=1`;
}
/**
* Try to find current object path in cache.
*/
function findCachedURL(): string | undefined {
const encodedPath = encodeURIComponent(`${bucket.value}/${filePath.value.trim()}`);
const cache = cachedObjectPreviewURLs.value.get(encodedPath);
if (!cache) return undefined;
if (cache.lastModified !== file.value.LastModified.getTime()) {
obStore.removeFromObjectPreviewCache(encodedPath);
return undefined;
}
return cache.url;
}
/**
* Call `fetchPreviewAndMapUrl` on before mount lifecycle method.
*/
onBeforeMount((): void => {
processFilePath();
fetchPreviewAndMapUrl();
});
onMounted((): void => {
@ -452,7 +378,7 @@ onMounted((): void => {
watch(filePath, () => {
if (!filePath.value) return;
processFilePath();
fetchPreviewAndMapUrl();
});
</script>
@ -574,16 +500,6 @@ watch(filePath, () => {
cursor: pointer;
min-width: 46px;
&:hover {
:deep(rect) {
&:first-of-type {
fill: rgb(255 255 255 / 10%);
}
}
}
@media screen and (width <= 600px) {
display: none;
}
@ -648,16 +564,6 @@ watch(filePath, () => {
svg {
width: 30px;
height: 30px;
&:hover {
:deep(rect) {
&:first-of-type {
fill: rgb(255 255 255 / 10%);
}
}
}
}
@media screen and (width <= 600px) {

View File

@ -3,20 +3,16 @@
<template>
<div class="options">
<div class="options__item" @click="onDistribution">
<MapIcon />
<p class="options__item__label">Distribution</p>
</div>
<div class="options__item" @click="onViewDetails">
<DetailsIcon />
<p class="options__item__label">View details</p>
</div>
<div class="options__item" @click="onDownload">
<DownloadIcon />
<SmallDownloadIcon />
<p class="options__item__label">Download</p>
</div>
<div class="options__item" @click="onShare">
<ShareIcon />
<SmallShareIcon />
<p class="options__item__label">Share</p>
</div>
<div class="options__item" @click="onDelete">
@ -28,13 +24,11 @@
<script setup lang="ts">
import DetailsIcon from '@/../static/images/browser/galleryView/details.svg';
import DownloadIcon from '@/../static/images/browser/galleryView/download.svg';
import ShareIcon from '@/../static/images/browser/galleryView/share.svg';
import SmallDownloadIcon from '@/../static/images/browser/galleryView/downloadSmall.svg';
import SmallShareIcon from '@/../static/images/browser/galleryView/shareSmall.svg';
import DeleteIcon from '@/../static/images/browser/galleryView/delete.svg';
import MapIcon from '@/../static/images/browser/galleryView/map.svg';
const props = defineProps<{
onDistribution: () => void
onViewDetails: () => void
onDownload: () => void
onShare: () => void
@ -65,15 +59,6 @@ const props = defineProps<{
cursor: pointer;
padding: 16px;
svg {
width: 18px;
height: 18px;
:deep(path) {
fill: var(--c-grey-6);
}
}
&__label {
margin-left: 16px;
font-size: 14px;

View File

@ -24,7 +24,7 @@
</p>
</div>
<div class="modal__item last">
<p class="modal__item__label">Bucket</p>
<p class="modal__item__label">Saved in</p>
<p class="modal__item__label right" :title="bucket">{{ bucket }}</p>
</div>
<VButton

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@
</p>
<p v-else :class="{primary: index === 0}" :title="val" @click.stop="(e) => cellContentClicked(index, e)">
<middle-truncate v-if="keyVal === 'fileName'" :text="val" />
<project-ownership-tag v-else-if="keyVal === 'role'" :no-icon="!isProjectRoleIconShown(val)" :role="val" />
<project-ownership-tag v-else-if="keyVal === 'role'" :no-icon="itemType !== 'project' && val !== ProjectRole.Invited" :role="val" />
<span v-else>{{ val }}</span>
</p>
<div v-if="showBucketGuide(index)" class="animation">
@ -92,10 +92,6 @@ const customIconClasses = computed(() => {
return classes;
});
function isProjectRoleIconShown(role: ProjectRole) {
return props.itemType === 'project' || role === ProjectRole.Invited || role === ProjectRole.InviteExpired;
}
function selectClicked(event: Event): void {
emit('selectClicked', event);
}

View File

@ -64,8 +64,6 @@ import DocumentIcon from '@/../static/images/common/documentIcon.svg';
import DownloadIcon from '@/../static/images/common/download.svg';
import FolderIcon from '@/../static/images/objects/newFolder.svg';
import ResourcesIcon from '@/../static/images/navigation/resources.svg';
import UploadIcon from '@/../static/images/common/upload.svg';
import ProjectIcon from '@/../static/images/navigation/project.svg';
const props = withDefaults(defineProps<{
link?: string;
@ -121,8 +119,6 @@ const icons = new Map<string, string>([
['resources', ResourcesIcon],
['addcircle', AddCircleIcon],
['add', WhitePlusIcon],
['upload', UploadIcon],
['project', ProjectIcon],
]);
const iconComponent = computed((): string | undefined => icons.get(props.icon.toLowerCase()));
@ -317,7 +313,8 @@ function handleClick(): void {
background-color: #0059d0;
&.transparent,
&.blue-white {
&.blue-white,
&.white {
box-shadow: none !important;
background-color: #2683ff !important;
border: 1px solid #2683ff !important;
@ -332,20 +329,6 @@ function handleClick(): void {
}
}
&.white {
box-shadow: none !important;
border: 1px solid var(--c-blue-3) !important;
:deep(path),
:deep(rect) {
fill: var(--c-blue-3) !important;
}
.label {
color: var(--c-blue-3) !important;
}
}
&.grey-blue {
background-color: #2683ff !important;
border-color: #2683ff !important;

View File

@ -0,0 +1,86 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="header-container">
<div class="header-container__buttons-area">
<slot />
</div>
<div v-if="styleType === 'common'" class="search-container">
<VSearch
ref="searchInput"
:placeholder="placeholder"
:search="search"
/>
</div>
<div v-if="styleType === 'access'">
<VSearchAlternateStyling
ref="searchInput"
:placeholder="placeholder"
:search="search"
/>
</div>
</div>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import VSearch from '@/components/common/VSearch.vue';
import VSearchAlternateStyling from '@/components/common/VSearchAlternateStyling.vue';
type searchCallback = (search: string) => Promise<void>;
const props = withDefaults(defineProps<{
placeholder: string;
search: searchCallback;
styleType?: string;
}>(), {
placeholder: '',
styleType: 'common',
});
const searchInput = ref<{ clearSearch: () => void }>();
function clearSearch(): void {
searchInput.value?.clearSearch();
}
defineExpose({ clearSearch });
</script>
<style scoped lang="scss">
.header-container {
width: 100%;
height: 85px;
position: relative;
display: flex;
align-items: center;
justify-content: space-between;
&__buttons-area {
width: auto;
display: flex;
align-items: center;
justify-content: space-between;
}
.search-container {
position: relative;
}
}
@media screen and (width <= 1150px) {
.header-container {
flex-direction: column;
align-items: flex-start;
margin-bottom: 75px;
.search-container {
width: 100%;
margin-top: 30px;
}
}
}
</style>

View File

@ -182,11 +182,6 @@ watch(() => props.initValue, (val, oldVal) => {
onBeforeMount(() => {
type.value = props.isPassword ? passwordType : textType;
if (props.initValue) {
value.value = props.initValue;
emit('setData', props.initValue);
}
});
</script>

View File

@ -75,29 +75,10 @@ onMounted((): void => {
&__close {
position: absolute;
right: 3px;
top: 3px;
padding: 10px;
border-radius: 16px;
right: 24px;
top: 24px;
cursor: pointer;
&:hover {
background-color: var(--c-grey-2);
}
&:active {
background-color: var(--c-grey-4);
}
svg {
display: block;
width: 12px;
height: 12px;
:deep(path) {
fill: var(--c-black);
}
}
opacity: 0.55;
}
}
}

View File

@ -2,46 +2,76 @@
// See LICENSE for copying information.
<template>
<div class="search-container">
<SearchIcon class="search-container__icon" />
<input
v-model="searchQuery"
class="search-container__input"
placeholder="Search"
type="text"
autocomplete="off"
readonly
maxlength="72"
@input="processSearchQuery"
@focus="removeReadOnly"
@blur="addReadOnly"
>
</div>
<input
ref="input"
v-model="searchQuery"
readonly
class="common-search-input"
:placeholder="`Search ${placeholder}`"
:style="style"
type="text"
autocomplete="off"
maxlength="72"
@mouseenter="onMouseEnter"
@mouseleave="onMouseLeave"
@input="processSearchQuery"
@focus="removeReadOnly"
@blur="addReadOnly"
>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { computed, ref } from 'vue';
import { useDOM } from '@/composables/DOM';
import SearchIcon from '@/../static/images/common/search.svg';
type searchCallback = (search: string) => Promise<void>;
interface SearchStyle {
width: string;
}
declare type searchCallback = (search: string) => Promise<void>;
const props = defineProps<{
search: searchCallback,
}>();
const props = withDefaults(defineProps<{
search: searchCallback;
placeholder?: string;
}>(), {
placeholder: '',
});
const { removeReadOnly, addReadOnly } = useDOM();
const inputWidth = ref<string>('56px');
const searchQuery = ref<string>('');
const input = ref<HTMLInputElement>();
const style = computed((): SearchStyle => {
return { width: inputWidth.value };
});
/**
* Clears search query.
* Expands search input.
*/
function onMouseEnter(): void {
inputWidth.value = '540px';
input.value?.focus();
}
/**
* Collapses search input if no search query.
*/
function onMouseLeave(): void {
if (!searchQuery.value) {
inputWidth.value = '56px';
input.value?.blur();
}
}
/**
* Clears search query and collapses input.
*/
function clearSearch(): void {
searchQuery.value = '';
processSearchQuery();
inputWidth.value = '56px';
}
async function processSearchQuery(): Promise<void> {
@ -52,37 +82,31 @@ defineExpose({ clearSearch });
</script>
<style scoped lang="scss">
.search-container {
padding: 8px;
display: flex;
align-items: center;
.common-search-input {
position: absolute;
right: 0;
bottom: 50%;
transform: translateY(50%);
padding: 0 38px 0 18px;
border: 1px solid #f2f2f2;
box-sizing: border-box;
border: 1px solid var(--c-grey-3);
border-radius: 10px;
width: 250px;
background-color: #fff;
@media screen and (width <= 1150px) {
width: 100%;
}
&__icon {
margin: 0 12px 0 4px;
}
&__input {
flex: 1;
background-color: transparent;
outline: none;
border: none;
font-family: 'font_regular', sans-serif;
font-size: 14px;
line-height: 20px;
}
box-shadow: 0 4px 4px rgb(231 232 238 / 60%);
outline: none;
border-radius: 36px;
height: 56px;
font-family: 'font_regular', sans-serif;
font-size: 16px;
transition: all 0.4s ease-in-out;
background-image: url('../../../static/images/common/search.png');
background-repeat: no-repeat;
background-size: 22px 22px;
background-position: top 16px right 16px;
}
::placeholder {
color: var(--c-grey-6);
opacity: 0.7;
@media screen and (width <= 1150px) {
.common-search-input {
width: 100% !important;
}
}
</style>

View File

@ -0,0 +1,78 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<input
v-model="searchQuery"
class="access-search-input"
:placeholder="`Search ${placeholder}`"
type="text"
autocomplete="off"
readonly
maxlength="72"
@input="processSearchQuery"
@focus="removeReadOnly"
@blur="addReadOnly"
>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { useDOM } from '@/composables/DOM';
declare type searchCallback = (search: string) => Promise<void>;
const props = withDefaults(defineProps<{
placeholder?: string,
search: searchCallback,
}>(), { placeholder: '' });
const { removeReadOnly, addReadOnly } = useDOM();
const searchQuery = ref<string>('');
/**
* Clears search query.
*/
function clearSearch(): void {
searchQuery.value = '';
processSearchQuery();
}
async function processSearchQuery(): Promise<void> {
await props.search(searchQuery.value);
}
defineExpose({ clearSearch });
</script>
<style scoped lang="scss">
.access-search-input {
position: absolute;
left: 0;
bottom: 0;
padding: 0 10px 0 50px;
box-sizing: border-box;
outline: none;
border: 1px solid var(--c-grey-3);
border-radius: 10px;
height: 40px;
width: 250px;
font-family: 'font_regular', sans-serif;
font-size: 16px;
background-color: #fff;
background-image: url('../../../static/images/common/search-gray.png');
background-repeat: no-repeat;
background-size: 22px 22px;
background-position: top 8px left 14px;
@media screen and (width <= 1150px) {
width: 100%;
}
}
::placeholder {
color: #afb7c1;
}
</style>

View File

@ -18,10 +18,8 @@
<div class="timeout-modal__divider" />
<div>
<p class="timeout-modal__label">Session timeout duration</p>
<timeout-selector :selected="sessionDuration" @select="durationChange" />
</div>
<p class="timeout-modal__label">Session timeout duration</p>
<timeout-selector :selected="sessionDuration" @select="durationChange" />
<div class="timeout-modal__divider" />
@ -29,19 +27,21 @@
<VButton
label="Cancel"
width="100%"
height="40px"
border-radius="10px"
font-size="13px"
is-white
class="timeout-modal__buttons__button cancel"
class="timeout-modal__buttons__button"
:on-press="() => withLoading(onClose)"
:is-disabled="isLoading"
/>
<VButton
label="Save"
width="100%"
height="40px"
border-radius="10px"
font-size="13px"
class="timeout-modal__buttons__button"
class="timeout-modal__buttons__button save"
:on-press="() => withLoading(save)"
:is-disabled="isLoading || !hasChanged"
/>
@ -133,55 +133,51 @@ function onClose(): void {
<style scoped lang="scss">
.timeout-modal {
width: calc(100vw - 48px);
max-width: 410px;
padding: 32px;
display: flex;
flex-direction: column;
gap: 16px;
box-sizing: border-box;
font-family: 'font_regular', sans-serif;
text-align: left;
@media screen and (width <= 400px) {
width: 100vw;
}
&__header {
display: flex;
align-items: center;
gap: 20px;
margin: 20px 0;
@media screen and (width <= 500px) {
flex-direction: column;
align-items: flex-start;
gap: 10px;
}
&__icon {
height: 40px;
width: 40px;
flex-shrink: 0;
}
&__title {
font-family: 'font_bold', sans-serif;
font-size: 24px;
line-height: 31px;
font-size: 28px;
line-height: 36px;
}
}
&__divider {
height: 1px;
background-color: var(--c-grey-2);
margin: 20px 0;
border: 1px solid var(--c-grey-2);
}
&__info {
font-family: 'font_regular', sans-serif;
font-size: 14px;
line-height: 20px;
font-size: 16px;
line-height: 24px;
}
&__label {
margin-bottom: 4px;
font-family: 'font_medium', sans-serif;
font-family: 'font_regular', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-blue-6);
line-height: 24px;
margin-bottom: 10px;
}
&__buttons {
@ -195,14 +191,6 @@ function onClose(): void {
&__button {
padding: 16px;
box-sizing: border-box;
&.cancel {
box-shadow: 0 0 20px rgb(0 0 0 / 4%);
:deep(.label) {
color: var(--c-black) !important;
}
}
}
}
}

View File

@ -50,7 +50,7 @@ import { ref } from 'vue';
import AccessEncryptionIcon from '../../../static/images/accessGrants/newCreateFlow/accessEncryption.svg';
import { AnalyticsHttpApi } from '@/api/analytics';
import { AnalyticsErrorEventSource, AnalyticsEvent } from '@/utils/constants/analyticsEventNames';
import { AnalyticsErrorEventSource } from '@/utils/constants/analyticsEventNames';
import { useAppStore } from '@/store/modules/appStore';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { MODALS } from '@/utils/constants/appStatePopUps';
@ -78,10 +78,6 @@ function onContinue(): void {
return;
}
analytics.eventTriggered(AnalyticsEvent.PASSPHRASE_CREATED, {
method: 'enter',
});
bucketsStore.setPassphrase(passphrase.value);
bucketsStore.setPromptForPassphrase(false);

View File

@ -46,8 +46,7 @@ import { RouteConfig } from '@/types/router';
import { EdgeCredentials } from '@/types/accessGrants';
import { useAppStore } from '@/store/modules/appStore';
import { useBucketsStore } from '@/store/modules/bucketsStore';
import { AnalyticsErrorEventSource, AnalyticsEvent } from '@/utils/constants/analyticsEventNames';
import { AnalyticsHttpApi } from '@/api/analytics';
import { AnalyticsErrorEventSource } from '@/utils/constants/analyticsEventNames';
import VModal from '@/components/common/VModal.vue';
import SelectPassphraseModeStep from '@/components/modals/createProjectPassphrase/SelectPassphraseModeStep.vue';
@ -73,8 +72,6 @@ const notify = useNotify();
const router = useRouter();
const route = useRoute();
const analytics: AnalyticsHttpApi = new AnalyticsHttpApi();
const generatedPassphrase = generateMnemonic();
const selectedOption = ref<CreatePassphraseOption>(CreatePassphraseOption.Generate);
@ -139,10 +136,6 @@ async function onContinue(): Promise<void> {
return;
}
analytics.eventTriggered(AnalyticsEvent.PASSPHRASE_CREATED, {
method: selectedOption.value === CreatePassphraseOption.Enter ? 'enter' : 'generate',
});
bucketsStore.setEdgeCredentials(new EdgeCredentials());
bucketsStore.setPassphrase(passphrase.value);
bucketsStore.setPromptForPassphrase(false);

View File

@ -23,7 +23,7 @@
@click.stop="() => select(option)"
@keyup.enter="() => select(option)"
>
<span>{{ option.shortString }}</span>
<span class="selector__dropdown__item__label">{{ option.shortString }}</span>
</div>
</div>
</div>
@ -118,13 +118,13 @@ function toggleSelector() {
justify-content: space-between;
position: relative;
padding: 10px 14px;
cursor: pointer;
&__label {
font-family: 'font_regular', sans-serif;
font-size: 14px;
line-height: 20px;
color: var(--c-grey-6);
cursor: default;
}
&__arrow {
@ -149,7 +149,10 @@ function toggleSelector() {
&__item {
padding: 10px;
cursor: pointer;
&__label {
cursor: default;
}
&.selected {
background: var(--c-grey-1);

View File

@ -5,7 +5,9 @@
<div class="navigation-area">
<div class="navigation-area__container">
<header class="navigation-area__container__header">
<LogoIcon class="navigation-area__container__header__logo" @click.stop="onLogoClick" />
<div class="navigation-area__container__header__logo" @click.stop="onLogoClick">
<LogoIcon />
</div>
<CrossIcon v-if="isOpened" @click="toggleNavigation" />
<MenuIcon v-else @click="toggleNavigation" />
</header>
@ -27,59 +29,24 @@
<div v-if="isLoading" class="project-selection__dropdown__loader-container">
<VLoader width="30px" height="30px" />
</div>
<template v-else>
<div v-if="ownProjects.length" class="project-selection__dropdown__section-head">
<ProjectIcon />
<span class="project-selection__dropdown__section-head__tag">My Projects</span>
</div>
<div class="project-selection__dropdown__items">
<div
v-for="project in ownProjects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
@keyup.enter="onProjectSelected(project.id)"
>
<div v-if="project.isSelected" class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p
:class="{
'project-selection__dropdown__items__choice__unselected': !project.isSelected,
'project-selection__dropdown__items__choice__selected': project.isSelected,
}"
>
{{ project.name }}
</p>
<div v-else class="project-selection__dropdown__items">
<div class="project-selection__dropdown__items__choice" @click.prevent.stop="toggleProjectDropdown">
<div class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p class="project-selection__dropdown__items__choice__selected">
{{ selectedProject.name }}
</p>
</div>
<div v-if="sharedProjects.length" class="project-selection__dropdown__section-head shared">
<ProjectIcon />
<span class="project-selection__dropdown__section-head__tag shared">Shared with me</span>
<div
v-for="project in projects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
>
<p class="project-selection__dropdown__items__choice__unselected">{{ project.name }}</p>
</div>
<div class="project-selection__dropdown__items">
<div
v-for="project in sharedProjects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
@keyup.enter="onProjectSelected(project.id)"
>
<div v-if="project.isSelected" class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p
:class="{
'project-selection__dropdown__items__choice__unselected': !project.isSelected,
'project-selection__dropdown__items__choice__selected': project.isSelected,
}"
>
{{ project.name }}
</p>
</div>
</div>
</template>
</div>
<div v-if="isAllProjectsDashboard && isProjectOwner" tabindex="0" class="project-selection__dropdown__link-container" @click.stop="onProjectDetailsClick" @keyup.enter="onProjectDetailsClick">
<SettingsIcon />
<p class="project-selection__dropdown__link-container__label">Project Settings</p>
@ -307,19 +274,10 @@ const isAllProjectsDashboard = computed((): boolean => {
});
/**
* Returns user's own projects.
* Returns projects list from store.
*/
const ownProjects = computed((): Project[] => {
const projects = projectsStore.projects.filter((p) => p.ownerId === usersStore.state.user.id);
return projects.sort(compareProjects);
});
/**
* Returns projects the user is invited to.
*/
const sharedProjects = computed((): Project[] => {
const projects = projectsStore.projects.filter((p) => p.ownerId !== usersStore.state.user.id);
return projects.sort(compareProjects);
const projects = computed((): Project[] => {
return projectsStore.projectsWithoutSelected;
});
/**
@ -350,15 +308,6 @@ const user = computed((): User => {
return usersStore.state.user;
});
/**
* This comparator is used to sort projects by isSelected.
*/
function compareProjects(a: Project, b: Project) {
if (a.isSelected) return -1;
if (b.isSelected) return 1;
return 0;
}
/**
* Redirects to project dashboard.
*/
@ -545,7 +494,10 @@ function navigateToBilling(): void {
isOpened.value = false;
if (route.path.includes(RouteConfig.Billing.path)) return;
const link = RouteConfig.Account.with(RouteConfig.Billing.with(RouteConfig.BillingOverview));
let link = RouteConfig.Account.with(RouteConfig.Billing);
if (configStore.state.config.newBillingScreen) {
link = link.with(RouteConfig.BillingOverview);
}
router.push(link.path);
analytics.pageVisit(link.path);
}
@ -618,14 +570,21 @@ async function onLogout(): Promise<void> {
display: flex;
width: 100%;
box-sizing: border-box;
padding: 0 24px;
padding: 0 32px;
justify-content: space-between;
align-items: center;
height: 4rem;
&__logo {
height: 30px;
width: auto;
width: 211px;
max-width: 211px;
height: 37px;
max-height: 37px;
svg {
width: 211px;
height: 37px;
}
}
}
@ -804,36 +763,6 @@ async function onLogout(): Promise<void> {
border-radius: 8px 8px 0 0;
}
&__section-head {
display: flex;
align-items: center;
gap: 24px;
height: 48px;
box-sizing: border-box;
padding: 8px 32px;
&.shared {
border-top: 1px solid var(--c-grey-2);
}
&__tag {
border: 1px solid var(--c-purple-2);
border-radius: 24px;
padding: 2px 8px;
text-align: center;
font-size: 12px;
font-weight: 600;
line-height: 18px;
color: var(--c-purple-4);
background: var(--c-white);
&.shared {
border: 1px solid var(--c-yellow-2);
color: var(--c-yellow-5);
}
}
}
&__items {
overflow-y: auto;
background-color: #fff;

View File

@ -23,61 +23,25 @@
<div v-if="isLoading" class="project-selection__dropdown__loader-container">
<VLoader width="30px" height="30px" />
</div>
<template v-else>
<div v-if="ownProjects.length" class="project-selection__dropdown__section-head">
<ProjectIcon />
<span class="project-selection__dropdown__section-head__tag">My Projects</span>
</div>
<div class="project-selection__dropdown__items">
<div
v-for="project in ownProjects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
@keyup.enter="onProjectSelected(project.id)"
>
<div v-if="project.isSelected" class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p
:class="{
'project-selection__dropdown__items__choice__unselected': !project.isSelected,
'project-selection__dropdown__items__choice__selected': project.isSelected,
}"
>
{{ project.name }}
</p>
<div v-else class="project-selection__dropdown__items">
<div tabindex="0" class="project-selection__dropdown__items__choice" @click.prevent.stop="closeDropdown">
<div class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p class="project-selection__dropdown__items__choice__selected">
{{ selectedProject.name }}
</p>
</div>
<div v-if="sharedProjects.length" class="project-selection__dropdown__section-head shared">
<ProjectIcon />
<span class="project-selection__dropdown__section-head__tag shared">Shared with me</span>
<div
v-for="project in projects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
@keyup.enter="onProjectSelected(project.id)"
>
<p class="project-selection__dropdown__items__choice__unselected">{{ project.name }}</p>
</div>
<div class="project-selection__dropdown__items">
<div
v-for="project in sharedProjects"
:key="project.id"
class="project-selection__dropdown__items__choice"
@click.prevent.stop="onProjectSelected(project.id)"
@keyup.enter="onProjectSelected(project.id)"
>
<div v-if="project.isSelected" class="project-selection__dropdown__items__choice__mark-container">
<CheckmarkIcon class="project-selection__dropdown__items__choice__mark-container__image" />
</div>
<p
:class="{
'project-selection__dropdown__items__choice__unselected': !project.isSelected,
'project-selection__dropdown__items__choice__selected': project.isSelected,
}"
>
{{ project.name }}
</p>
</div>
</div>
</template>
</div>
<div v-if="isAllProjectsDashboard && isProjectOwner" tabindex="0" class="project-selection__dropdown__link-container" @click.stop="onProjectDetailsClick" @keyup.enter="onProjectDetailsClick">
<SettingsIcon />
<p class="project-selection__dropdown__link-container__label">Project Settings</p>
@ -189,19 +153,10 @@ const isDropdownShown = computed((): boolean => {
});
/**
* Returns user's own projects.
* Returns projects list from store.
*/
const ownProjects = computed((): Project[] => {
const projects = projectsStore.projects.filter((p) => p.ownerId === userStore.state.user.id);
return projects.sort(compareProjects);
});
/**
* Returns projects the user is invited to.
*/
const sharedProjects = computed((): Project[] => {
const projects = projectsStore.projects.filter((p) => p.ownerId !== userStore.state.user.id);
return projects.sort(compareProjects);
const projects = computed((): Project[] => {
return projectsStore.projectsWithoutSelected;
});
/**
@ -218,15 +173,6 @@ const isBucketsView = computed((): boolean => {
return route.path.includes(RouteConfig.Buckets.path);
});
/**
* This comparator is used to sort projects by isSelected.
*/
function compareProjects(a: Project, b: Project) {
if (a.isSelected) return -1;
if (b.isSelected) return 1;
return 0;
}
/**
* Fetches projects related information and than toggles selection popup.
*/
@ -482,36 +428,6 @@ function onCreateLinkClick(): void {
border-radius: 8px 8px 0 0;
}
&__section-head {
display: flex;
align-items: center;
gap: 24px;
height: 48px;
box-sizing: border-box;
padding: 8px 16px;
&.shared {
border-top: 1px solid var(--c-grey-2);
}
&__tag {
border: 1px solid var(--c-purple-2);
border-radius: 24px;
padding: 2px 8px;
text-align: center;
font-size: 12px;
font-weight: 600;
line-height: 18px;
color: var(--c-purple-4);
background: var(--c-white);
&.shared {
border: 1px solid var(--c-yellow-2);
color: var(--c-yellow-5);
}
}
}
&__items {
overflow-y: auto;
max-height: 250px;
@ -530,7 +446,7 @@ function onCreateLinkClick(): void {
&__unselected {
font-size: 14px;
line-height: 20px;
color: var(--c-grey-6);
color: #1b2533;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;

Some files were not shown because too many files have changed in this diff Show More