satellite/satellitedb: move IterateBucketLocations sql to dbx

This is attempt to move query back to dbx.

It also removes one unused method.

Change-Id: I8182dd8ecf794cdf0cb3158c36cc00810fc683df
This commit is contained in:
Michal Niewrzal 2023-09-14 11:04:33 +02:00 committed by Storj Robot
parent 7ba8a627bc
commit d7af97c919
4 changed files with 138 additions and 104 deletions

View File

@ -100,8 +100,6 @@ type DB interface {
GetMinimalBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket MinimalBucket, err error)
// HasBucket returns if a bucket exists.
HasBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (exists bool, err error)
// GetBucketID returns an existing bucket id.
GetBucketID(ctx context.Context, bucket metabase.BucketLocation) (id uuid.UUID, err error)
// UpdateBucket updates an existing bucket
UpdateBucket(ctx context.Context, bucket Bucket) (_ Bucket, err error)
// UpdateUserAgent updates buckets user agent.

View File

@ -8,8 +8,6 @@ import (
"database/sql"
"errors"
"github.com/zeebo/errs"
"storj.io/common/macaroon"
"storj.io/common/storj"
"storj.io/common/uuid"
@ -131,27 +129,6 @@ func (db *bucketsDB) HasBucket(ctx context.Context, bucketName []byte, projectID
return exists, buckets.ErrBucket.Wrap(err)
}
// GetBucketID returns an existing bucket id.
func (db *bucketsDB) GetBucketID(ctx context.Context, bucket metabase.BucketLocation) (_ uuid.UUID, err error) {
defer mon.Task()(&ctx)(&err)
dbxID, err := db.db.Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx,
dbx.BucketMetainfo_ProjectId(bucket.ProjectID[:]),
dbx.BucketMetainfo_Name([]byte(bucket.BucketName)),
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return uuid.UUID{}, buckets.ErrBucketNotFound.New("%s", bucket.BucketName)
}
return uuid.UUID{}, buckets.ErrBucket.Wrap(err)
}
id, err := uuid.FromBytes(dbxID.Id)
if err != nil {
return id, buckets.ErrBucket.Wrap(err)
}
return id, err
}
// UpdateBucket updates a bucket.
func (db *bucketsDB) UpdateBucket(ctx context.Context, bucket buckets.Bucket) (_ buckets.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
@ -336,32 +313,26 @@ func (db *bucketsDB) IterateBucketLocations(ctx context.Context, projectID uuid.
var result []metabase.BucketLocation
moreLimit := limit + 1
rows, err := db.db.QueryContext(ctx, `
SELECT project_id, name
FROM bucket_metainfos
WHERE (project_id, name) > ($1, $2)
GROUP BY (project_id, name)
ORDER BY (project_id, name) ASC LIMIT $3
`, projectID, []byte(bucketName), moreLimit)
rows, err := db.db.Limited_BucketMetainfo_ProjectId_BucketMetainfo_Name_By_ProjectId_GreaterOrEqual_And_Name_Greater_GroupBy_ProjectId_Name_OrderBy_Asc_ProjectId_Asc_Name(
ctx,
dbx.BucketMetainfo_ProjectId(projectID[:]),
dbx.BucketMetainfo_Name([]byte(bucketName)),
moreLimit,
0,
)
if err != nil {
return false, buckets.ErrBucket.New("BatchBuckets query error: %s", err)
return false, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, Error.Wrap(rows.Close()))
}()
for rows.Next() {
var bucketLocation metabase.BucketLocation
if err = rows.Scan(&bucketLocation.ProjectID, &bucketLocation.BucketName); err != nil {
return false, buckets.ErrBucket.New("bucket location scan error: %s", err)
for _, row := range rows {
projectID, err := uuid.FromBytes(row.ProjectId)
if err != nil {
return false, Error.Wrap(err)
}
result = append(result, bucketLocation)
}
if err = rows.Err(); err != nil {
return false, buckets.ErrBucket.Wrap(err)
result = append(result, metabase.BucketLocation{
ProjectID: projectID,
BucketName: string(row.Name),
})
}
if len(result) == 0 {

View File

@ -82,12 +82,6 @@ read one (
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo.id
where bucket_metainfo.project_id = ?
where bucket_metainfo.name = ?
)
read one (
select bucket_metainfo.placement
where bucket_metainfo.project_id = ?
@ -130,6 +124,17 @@ read count (
where bucket_metainfo.project_id = ?
)
read limitoffset (
select bucket_metainfo.project_id bucket_metainfo.name
where bucket_metainfo.project_id >= ?
where bucket_metainfo.name > ?
groupby bucket_metainfo.project_id bucket_metainfo.name
orderby (
asc bucket_metainfo.project_id
asc bucket_metainfo.name
)
)
// value_attribution table contains information about which user-agent
// is used to create the project. It's being stored outside of the projects
// table because this information can be still needed after deleting the

View File

@ -12012,6 +12012,11 @@ type Placement_Row struct {
Placement *int
}
type ProjectId_Name_Row struct {
ProjectId []byte
Name []byte
}
type ProjectLimit_Row struct {
ProjectLimit int
}
@ -16300,29 +16305,6 @@ func (obj *pgxImpl) Get_BucketMetainfo_CreatedAt_By_ProjectId_And_Name(ctx conte
}
func (obj *pgxImpl) Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
row *Id_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &Id_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Id)
if err != nil {
return (*Id_Row)(nil), obj.makeErr(err)
}
return row, nil
}
func (obj *pgxImpl) Get_BucketMetainfo_Placement_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
@ -16513,6 +16495,56 @@ func (obj *pgxImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.Context,
}
func (obj *pgxImpl) Limited_BucketMetainfo_ProjectId_BucketMetainfo_Name_By_ProjectId_GreaterOrEqual_And_Name_Greater_GroupBy_ProjectId_Name_OrderBy_Asc_ProjectId_Asc_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*ProjectId_Name_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.project_id, bucket_metainfos.name FROM bucket_metainfos WHERE bucket_metainfos.project_id >= ? AND bucket_metainfos.name > ? GROUP BY bucket_metainfos.project_id, bucket_metainfos.name ORDER BY bucket_metainfos.project_id, bucket_metainfos.name LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id_greater_or_equal.value(), bucket_metainfo_name_greater.value())
__values = append(__values, limit, offset)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*ProjectId_Name_Row, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
row := &ProjectId_Name_Row{}
err = __rows.Scan(&row.ProjectId, &row.Name)
if err != nil {
return nil, err
}
rows = append(rows, row)
}
err = __rows.Err()
if err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
value_attribution_project_id ValueAttribution_ProjectId_Field,
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
@ -24437,29 +24469,6 @@ func (obj *pgxcockroachImpl) Get_BucketMetainfo_CreatedAt_By_ProjectId_And_Name(
}
func (obj *pgxcockroachImpl) Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
row *Id_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &Id_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.Id)
if err != nil {
return (*Id_Row)(nil), obj.makeErr(err)
}
return row, nil
}
func (obj *pgxcockroachImpl) Get_BucketMetainfo_Placement_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
@ -24650,6 +24659,56 @@ func (obj *pgxcockroachImpl) Count_BucketMetainfo_Name_By_ProjectId(ctx context.
}
func (obj *pgxcockroachImpl) Limited_BucketMetainfo_ProjectId_BucketMetainfo_Name_By_ProjectId_GreaterOrEqual_And_Name_Greater_GroupBy_ProjectId_Name_OrderBy_Asc_ProjectId_Asc_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*ProjectId_Name_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.project_id, bucket_metainfos.name FROM bucket_metainfos WHERE bucket_metainfos.project_id >= ? AND bucket_metainfos.name > ? GROUP BY bucket_metainfos.project_id, bucket_metainfos.name ORDER BY bucket_metainfos.project_id, bucket_metainfos.name LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, bucket_metainfo_project_id_greater_or_equal.value(), bucket_metainfo_name_greater.value())
__values = append(__values, limit, offset)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
for {
rows, err = func() (rows []*ProjectId_Name_Row, err error) {
__rows, err := obj.driver.QueryContext(ctx, __stmt, __values...)
if err != nil {
return nil, err
}
defer __rows.Close()
for __rows.Next() {
row := &ProjectId_Name_Row{}
err = __rows.Scan(&row.ProjectId, &row.Name)
if err != nil {
return nil, err
}
rows = append(rows, row)
}
err = __rows.Err()
if err != nil {
return nil, err
}
return rows, nil
}()
if err != nil {
if obj.shouldRetry(err) {
continue
}
return nil, obj.makeErr(err)
}
return rows, nil
}
}
func (obj *pgxcockroachImpl) Get_ValueAttribution_By_ProjectId_And_BucketName(ctx context.Context,
value_attribution_project_id ValueAttribution_ProjectId_Field,
value_attribution_bucket_name ValueAttribution_BucketName_Field) (
@ -28817,11 +28876,6 @@ type Methods interface {
bucket_metainfo_name BucketMetainfo_Name_Field) (
row *CreatedAt_Row, err error)
Get_BucketMetainfo_Id_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
row *Id_Row, err error)
Get_BucketMetainfo_Placement_By_ProjectId_And_Name(ctx context.Context,
bucket_metainfo_project_id BucketMetainfo_ProjectId_Field,
bucket_metainfo_name BucketMetainfo_Name_Field) (
@ -29027,6 +29081,12 @@ type Methods interface {
limit int, offset int64) (
rows []*BucketMetainfo, err error)
Limited_BucketMetainfo_ProjectId_BucketMetainfo_Name_By_ProjectId_GreaterOrEqual_And_Name_Greater_GroupBy_ProjectId_Name_OrderBy_Asc_ProjectId_Asc_Name(ctx context.Context,
bucket_metainfo_project_id_greater_or_equal BucketMetainfo_ProjectId_Field,
bucket_metainfo_name_greater BucketMetainfo_Name_Field,
limit int, offset int64) (
rows []*ProjectId_Name_Row, err error)
Limited_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Context,
project_created_at_less Project_CreatedAt_Field,
limit int, offset int64) (