2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-10-25 18:11:28 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package postgreskv
|
|
|
|
|
|
|
|
import (
|
2019-06-05 15:23:10 +01:00
|
|
|
"context"
|
2018-10-25 18:11:28 +01:00
|
|
|
"database/sql"
|
2020-07-14 14:04:38 +01:00
|
|
|
"errors"
|
2020-07-09 16:51:04 +01:00
|
|
|
"sort"
|
2018-10-25 18:11:28 +01:00
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2018-10-25 18:11:28 +01:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/dbutil"
|
2020-01-16 23:48:59 +00:00
|
|
|
"storj.io/storj/private/dbutil/pgutil"
|
2020-01-17 20:07:00 +00:00
|
|
|
"storj.io/storj/private/tagsql"
|
2018-10-25 18:11:28 +01:00
|
|
|
"storj.io/storj/storage"
|
|
|
|
"storj.io/storj/storage/postgreskv/schema"
|
|
|
|
)
|
|
|
|
|
2019-06-04 22:30:21 +01:00
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Client is the entrypoint into a postgreskv data store.
|
2018-10-25 18:11:28 +01:00
|
|
|
type Client struct {
|
2020-04-24 20:15:27 +01:00
|
|
|
db tagsql.DB
|
|
|
|
dbURL string
|
2020-01-22 19:00:46 +00:00
|
|
|
lookupLimit int
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-10-29 08:01:53 +00:00
|
|
|
// Open connects a new postgreskv client given db URL.
|
|
|
|
func Open(ctx context.Context, dbURL string) (*Client, error) {
|
2020-01-16 23:48:59 +00:00
|
|
|
dbURL = pgutil.CheckApplicationName(dbURL)
|
|
|
|
|
2020-06-28 04:56:29 +01:00
|
|
|
db, err := tagsql.Open("pgx", dbURL)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-21 15:30:06 +01:00
|
|
|
|
2020-03-23 19:58:36 +00:00
|
|
|
dbutil.Configure(db, "postgreskv", mon)
|
2020-04-24 20:15:27 +01:00
|
|
|
return NewWith(db, dbURL), nil
|
2020-01-16 23:48:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWith instantiates a new postgreskv client given db.
|
2020-04-24 20:15:27 +01:00
|
|
|
func NewWith(db tagsql.DB, dbURL string) *Client {
|
|
|
|
return &Client{db: db, lookupLimit: storage.DefaultLookupLimit, dbURL: dbURL}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MigrateToLatest migrates to latest schema version.
|
|
|
|
func (client *Client) MigrateToLatest(ctx context.Context) error {
|
|
|
|
return schema.PrepareDB(ctx, client.db, client.dbURL)
|
2020-01-16 23:48:59 +00:00
|
|
|
}
|
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
// SetLookupLimit sets the lookup limit.
|
|
|
|
func (client *Client) SetLookupLimit(v int) { client.lookupLimit = v }
|
|
|
|
|
|
|
|
// LookupLimit returns the maximum limit that is allowed.
|
|
|
|
func (client *Client) LookupLimit() int { return client.lookupLimit }
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Close closes the client.
|
2020-01-16 23:48:59 +00:00
|
|
|
func (client *Client) Close() error {
|
|
|
|
return client.db.Close()
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Put sets the value for the provided key.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
|
|
|
|
if key.IsZero() {
|
2018-11-15 15:31:33 +00:00
|
|
|
return storage.ErrEmptyKey.New("")
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
q := `
|
2020-01-16 23:48:59 +00:00
|
|
|
INSERT INTO pathdata (fullpath, metadata)
|
|
|
|
VALUES ($1::BYTEA, $2::BYTEA)
|
|
|
|
ON CONFLICT (fullpath) DO UPDATE SET metadata = EXCLUDED.metadata
|
2018-10-25 18:11:28 +01:00
|
|
|
`
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
_, err = client.db.Exec(ctx, q, []byte(key), []byte(value))
|
2020-01-16 23:48:59 +00:00
|
|
|
return Error.Wrap(err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get looks up the provided key and returns its value (or an error).
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return nil, storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
q := "SELECT metadata FROM pathdata WHERE fullpath = $1::BYTEA"
|
2020-01-17 20:07:00 +00:00
|
|
|
row := client.db.QueryRow(ctx, q, []byte(key))
|
2019-07-23 15:28:06 +01:00
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
var val []byte
|
2019-06-05 15:23:10 +01:00
|
|
|
err = row.Scan(&val)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2019-08-21 17:30:29 +01:00
|
|
|
return nil, storage.ErrKeyNotFound.New("%q", key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-07-23 15:28:06 +01:00
|
|
|
|
|
|
|
return val, Error.Wrap(err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
// GetAll finds all values for the provided keys (up to LookupLimit).
|
2018-10-25 18:11:28 +01:00
|
|
|
// If more keys are provided than the maximum, an error will be returned.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetAll(ctx context.Context, keys storage.Keys) (_ storage.Values, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
if len(keys) > client.lookupLimit {
|
2020-07-02 23:51:25 +01:00
|
|
|
return nil, storage.ErrLimitExceeded.New("lookup limit exceeded")
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
q := `
|
|
|
|
SELECT metadata
|
|
|
|
FROM pathdata pd
|
|
|
|
RIGHT JOIN
|
2020-01-16 23:48:59 +00:00
|
|
|
unnest($1::BYTEA[]) WITH ORDINALITY pk(request, ord)
|
|
|
|
ON (pd.fullpath = pk.request)
|
2018-10-25 18:11:28 +01:00
|
|
|
ORDER BY pk.ord
|
|
|
|
`
|
2020-06-28 04:56:29 +01:00
|
|
|
rows, err := client.db.Query(ctx, q, pgutil.ByteaArray(keys.ByteSlices()))
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
defer func() { err = errs.Combine(err, Error.Wrap(rows.Close())) }()
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
values := make([]storage.Value, 0, len(keys))
|
|
|
|
for rows.Next() {
|
|
|
|
var value []byte
|
|
|
|
if err := rows.Scan(&value); err != nil {
|
2020-01-16 23:48:59 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
values = append(values, storage.Value(value))
|
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
return values, Error.Wrap(rows.Err())
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
// Delete deletes the given key and its associated value.
|
|
|
|
func (client *Client) Delete(ctx context.Context, key storage.Key) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
|
|
|
q := "DELETE FROM pathdata WHERE fullpath = $1::BYTEA"
|
2020-01-17 20:07:00 +00:00
|
|
|
result, err := client.db.Exec(ctx, q, []byte(key))
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
2020-01-16 23:48:59 +00:00
|
|
|
return err
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
numRows, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
if numRows == 0 {
|
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
return nil
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// DeleteMultiple deletes keys ignoring missing keys.
|
2020-01-28 20:52:04 +00:00
|
|
|
func (client *Client) DeleteMultiple(ctx context.Context, keys []storage.Key) (_ storage.Items, err error) {
|
|
|
|
defer mon.Task()(&ctx, len(keys))(&err)
|
|
|
|
|
2020-07-09 16:51:04 +01:00
|
|
|
// make sure deletes always happen in the same order
|
|
|
|
sort.Slice(keys, func(i, j int) bool {
|
|
|
|
return keys[i].Less(keys[j])
|
|
|
|
})
|
|
|
|
|
2020-01-28 20:52:04 +00:00
|
|
|
rows, err := client.db.QueryContext(ctx, `
|
|
|
|
DELETE FROM pathdata
|
|
|
|
WHERE fullpath = any($1::BYTEA[])
|
|
|
|
RETURNING fullpath, metadata`,
|
2020-06-28 04:56:29 +01:00
|
|
|
pgutil.ByteaArray(storage.Keys(keys).ByteSlices()))
|
2020-01-28 20:52:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, rows.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
var items storage.Items
|
|
|
|
for rows.Next() {
|
|
|
|
var key, value []byte
|
|
|
|
err := rows.Scan(&key, &value)
|
|
|
|
if err != nil {
|
|
|
|
return items, err
|
|
|
|
}
|
|
|
|
items = append(items, storage.ListItem{
|
|
|
|
Key: key,
|
|
|
|
Value: value,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return items, rows.Err()
|
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
// List returns either a list of known keys, in order, or an error.
|
|
|
|
func (client *Client) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
|
2019-06-05 15:23:10 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-16 23:48:59 +00:00
|
|
|
return storage.ListKeys(ctx, client, first, limit)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-16 23:48:59 +00:00
|
|
|
// Iterate calls the callback with an iterator over the keys.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
if opts.Limit <= 0 || opts.Limit > client.lookupLimit {
|
|
|
|
opts.Limit = client.lookupLimit
|
2020-01-16 23:48:59 +00:00
|
|
|
}
|
|
|
|
|
2020-05-05 07:51:24 +01:00
|
|
|
return client.IterateWithoutLookupLimit(ctx, opts, fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IterateWithoutLookupLimit calls the callback with an iterator over the keys, but doesn't enforce default limit on opts.
|
|
|
|
func (client *Client) IterateWithoutLookupLimit(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-01-19 19:56:51 +00:00
|
|
|
opi, err := newOrderedPostgresIterator(ctx, client, opts)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
2018-12-21 10:54:20 +00:00
|
|
|
err = errs.Combine(err, opi.Close())
|
2018-10-25 18:11:28 +01:00
|
|
|
}()
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
return fn(ctx, opi)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-07-23 20:46:33 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CompareAndSwap atomically compares and swaps oldValue with newValue.
|
2019-07-23 20:46:33 +01:00
|
|
|
func (client *Client) CompareAndSwap(ctx context.Context, key storage.Key, oldValue, newValue storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
|
|
|
if oldValue == nil && newValue == nil {
|
2020-01-16 23:48:59 +00:00
|
|
|
q := "SELECT metadata FROM pathdata WHERE fullpath = $1::BYTEA"
|
2020-01-17 20:07:00 +00:00
|
|
|
row := client.db.QueryRow(ctx, q, []byte(key))
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2019-07-23 20:46:33 +01:00
|
|
|
var val []byte
|
|
|
|
err = row.Scan(&val)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2019-07-23 20:46:33 +01:00
|
|
|
return nil
|
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2019-07-23 20:46:33 +01:00
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2020-01-22 16:10:14 +00:00
|
|
|
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if oldValue == nil {
|
|
|
|
q := `
|
2020-01-16 23:48:59 +00:00
|
|
|
INSERT INTO pathdata (fullpath, metadata) VALUES ($1::BYTEA, $2::BYTEA)
|
2019-07-23 20:46:33 +01:00
|
|
|
ON CONFLICT DO NOTHING
|
|
|
|
RETURNING 1
|
|
|
|
`
|
2020-01-17 20:07:00 +00:00
|
|
|
row := client.db.QueryRow(ctx, q, []byte(key), []byte(newValue))
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2019-07-23 20:46:33 +01:00
|
|
|
var val []byte
|
|
|
|
err = row.Scan(&val)
|
2020-07-14 14:04:38 +01:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
2020-01-22 16:10:14 +00:00
|
|
|
|
2019-07-23 20:46:33 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-01-22 16:10:14 +00:00
|
|
|
var row *sql.Row
|
|
|
|
if newValue == nil {
|
|
|
|
q := `
|
|
|
|
WITH matching_key AS (
|
|
|
|
SELECT * FROM pathdata WHERE fullpath = $1::BYTEA
|
|
|
|
), updated AS (
|
|
|
|
DELETE FROM pathdata
|
|
|
|
USING matching_key mk
|
|
|
|
WHERE pathdata.metadata = $2::BYTEA
|
|
|
|
AND pathdata.fullpath = mk.fullpath
|
|
|
|
RETURNING 1
|
|
|
|
)
|
|
|
|
SELECT EXISTS(SELECT 1 FROM matching_key) AS key_present, EXISTS(SELECT 1 FROM updated) AS value_updated
|
2019-07-23 20:46:33 +01:00
|
|
|
`
|
2020-01-22 16:10:14 +00:00
|
|
|
row = client.db.QueryRow(ctx, q, []byte(key), []byte(oldValue))
|
|
|
|
} else {
|
|
|
|
q := `
|
|
|
|
WITH matching_key AS (
|
|
|
|
SELECT * FROM pathdata WHERE fullpath = $1::BYTEA
|
|
|
|
), updated AS (
|
|
|
|
UPDATE pathdata
|
|
|
|
SET metadata = $3::BYTEA
|
|
|
|
FROM matching_key mk
|
|
|
|
WHERE pathdata.metadata = $2::BYTEA
|
|
|
|
AND pathdata.fullpath = mk.fullpath
|
|
|
|
RETURNING 1
|
|
|
|
)
|
|
|
|
SELECT EXISTS(SELECT 1 FROM matching_key) AS key_present, EXISTS(SELECT 1 FROM updated) AS value_updated;
|
2019-07-23 20:46:33 +01:00
|
|
|
`
|
2020-01-22 16:10:14 +00:00
|
|
|
row = client.db.QueryRow(ctx, q, []byte(key), []byte(oldValue), []byte(newValue))
|
|
|
|
}
|
2019-07-23 20:46:33 +01:00
|
|
|
|
2020-01-22 16:10:14 +00:00
|
|
|
var keyPresent, valueUpdated bool
|
|
|
|
err = row.Scan(&keyPresent, &valueUpdated)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2020-01-22 16:10:14 +00:00
|
|
|
if !keyPresent {
|
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2020-01-22 16:10:14 +00:00
|
|
|
if !valueUpdated {
|
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
|
|
|
}
|
2020-01-16 23:48:59 +00:00
|
|
|
|
2020-01-22 16:10:14 +00:00
|
|
|
return nil
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|