2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-10-25 18:11:28 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package postgreskv
|
|
|
|
|
|
|
|
import (
|
2019-06-05 15:23:10 +01:00
|
|
|
"context"
|
2018-10-25 18:11:28 +01:00
|
|
|
"database/sql"
|
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
"github.com/zeebo/errs"
|
2019-06-04 22:30:21 +01:00
|
|
|
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
2018-10-25 18:11:28 +01:00
|
|
|
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/dbutil"
|
|
|
|
"storj.io/storj/private/dbutil/pgutil"
|
2018-10-25 18:11:28 +01:00
|
|
|
"storj.io/storj/storage"
|
|
|
|
"storj.io/storj/storage/postgreskv/schema"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-05-17 19:41:15 +01:00
|
|
|
defaultBatchSize = 10000
|
2018-10-25 18:11:28 +01:00
|
|
|
defaultBucket = ""
|
|
|
|
)
|
|
|
|
|
2019-06-04 22:30:21 +01:00
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
)
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
// Client is the entrypoint into a postgreskv data store
|
|
|
|
type Client struct {
|
|
|
|
URL string
|
|
|
|
pgConn *sql.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// New instantiates a new postgreskv client given db URL
|
|
|
|
func New(dbURL string) (*Client, error) {
|
|
|
|
pgConn, err := sql.Open("postgres", dbURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-21 15:30:06 +01:00
|
|
|
|
2019-06-04 22:30:21 +01:00
|
|
|
dbutil.Configure(pgConn, mon)
|
2019-05-21 15:30:06 +01:00
|
|
|
|
2019-05-14 16:13:18 +01:00
|
|
|
err = schema.PrepareDB(pgConn, dbURL)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &Client{
|
|
|
|
URL: dbURL,
|
|
|
|
pgConn: pgConn,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-10-10 19:06:26 +01:00
|
|
|
// DropSchema drops the schema.
|
|
|
|
func (client *Client) DropSchema(schema string) error {
|
|
|
|
return pgutil.DropSchema(client.pgConn, schema)
|
|
|
|
}
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
// Put sets the value for the provided key.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return client.PutPath(ctx, storage.Key(defaultBucket), key, value)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutPath sets the value for the provided key (in the given bucket).
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) PutPath(ctx context.Context, bucket, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
if key.IsZero() {
|
2018-11-15 15:31:33 +00:00
|
|
|
return storage.ErrEmptyKey.New("")
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
q := `
|
|
|
|
INSERT INTO pathdata (bucket, fullpath, metadata)
|
|
|
|
VALUES ($1::BYTEA, $2::BYTEA, $3::BYTEA)
|
|
|
|
ON CONFLICT (bucket, fullpath) DO UPDATE SET metadata = EXCLUDED.metadata
|
|
|
|
`
|
2019-06-05 15:23:10 +01:00
|
|
|
_, err = client.pgConn.Exec(q, []byte(bucket), []byte(key), []byte(value))
|
2018-10-25 18:11:28 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get looks up the provided key and returns its value (or an error).
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return client.GetPath(ctx, storage.Key(defaultBucket), key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetPath looks up the provided key (in the given bucket) and returns its value (or an error).
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetPath(ctx context.Context, bucket, key storage.Key) (_ storage.Value, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return nil, storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
q := "SELECT metadata FROM pathdata WHERE bucket = $1::BYTEA AND fullpath = $2::BYTEA"
|
|
|
|
row := client.pgConn.QueryRow(q, []byte(bucket), []byte(key))
|
2019-07-23 15:28:06 +01:00
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
var val []byte
|
2019-06-05 15:23:10 +01:00
|
|
|
err = row.Scan(&val)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err == sql.ErrNoRows {
|
2019-08-21 17:30:29 +01:00
|
|
|
return nil, storage.ErrKeyNotFound.New("%q", key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-07-23 15:28:06 +01:00
|
|
|
|
|
|
|
return val, Error.Wrap(err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes the given key and its associated value.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Delete(ctx context.Context, key storage.Key) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return client.DeletePath(ctx, storage.Key(defaultBucket), key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeletePath deletes the given key (in the given bucket) and its associated value.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) DeletePath(ctx context.Context, bucket, key storage.Key) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
q := "DELETE FROM pathdata WHERE bucket = $1::BYTEA AND fullpath = $2::BYTEA"
|
|
|
|
result, err := client.pgConn.Exec(q, []byte(bucket), []byte(key))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
numRows, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if numRows == 0 {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List returns either a list of known keys, in order, or an error.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return storage.ListKeys(ctx, client, first, limit)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the client
|
|
|
|
func (client *Client) Close() error {
|
|
|
|
return client.pgConn.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAll finds all values for the provided keys (up to storage.LookupLimit).
|
|
|
|
// If more keys are provided than the maximum, an error will be returned.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetAll(ctx context.Context, keys storage.Keys) (_ storage.Values, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return client.GetAllPath(ctx, storage.Key(defaultBucket), keys)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetAllPath finds all values for the provided keys (up to storage.LookupLimit)
|
|
|
|
// in the given bucket. if more keys are provided than the maximum, an error
|
|
|
|
// will be returned.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetAllPath(ctx context.Context, bucket storage.Key, keys storage.Keys) (_ storage.Values, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
if len(keys) > storage.LookupLimit {
|
|
|
|
return nil, storage.ErrLimitExceeded
|
|
|
|
}
|
|
|
|
|
|
|
|
q := `
|
|
|
|
SELECT metadata
|
|
|
|
FROM pathdata pd
|
|
|
|
RIGHT JOIN
|
|
|
|
unnest($2::BYTEA[]) WITH ORDINALITY pk(request, ord)
|
|
|
|
ON (pd.fullpath = pk.request AND pd.bucket = $1::BYTEA)
|
|
|
|
ORDER BY pk.ord
|
|
|
|
`
|
|
|
|
rows, err := client.pgConn.Query(q, []byte(bucket), pq.ByteaArray(keys.ByteSlices()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, errs.Wrap(err)
|
|
|
|
}
|
|
|
|
values := make([]storage.Value, 0, len(keys))
|
|
|
|
for rows.Next() {
|
|
|
|
var value []byte
|
|
|
|
if err := rows.Scan(&value); err != nil {
|
2018-12-21 10:54:20 +00:00
|
|
|
return nil, errs.Wrap(errs.Combine(err, rows.Close()))
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
values = append(values, storage.Value(value))
|
|
|
|
}
|
2018-12-21 10:54:20 +00:00
|
|
|
return values, errs.Combine(rows.Err(), rows.Close())
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type orderedPostgresIterator struct {
|
|
|
|
client *Client
|
|
|
|
opts *storage.IterateOptions
|
|
|
|
bucket storage.Key
|
|
|
|
delimiter byte
|
|
|
|
batchSize int
|
|
|
|
curIndex int
|
|
|
|
curRows *sql.Rows
|
|
|
|
lastKeySeen storage.Key
|
|
|
|
errEncountered error
|
2019-06-05 15:23:10 +01:00
|
|
|
nextQuery func(context.Context) (*sql.Rows, error)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next fills in info for the next item in an ongoing listing.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (opi *orderedPostgresIterator) Next(ctx context.Context, item *storage.ListItem) bool {
|
|
|
|
defer mon.Task()(&ctx)(nil)
|
2018-10-25 18:11:28 +01:00
|
|
|
if !opi.curRows.Next() {
|
|
|
|
if err := opi.curRows.Close(); err != nil {
|
|
|
|
opi.errEncountered = errs.Wrap(err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if opi.curIndex < opi.batchSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if err := opi.curRows.Err(); err != nil {
|
|
|
|
opi.errEncountered = errs.Wrap(err)
|
|
|
|
return false
|
|
|
|
}
|
2019-06-05 15:23:10 +01:00
|
|
|
newRows, err := opi.nextQuery(ctx)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
opi.errEncountered = errs.Wrap(err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
opi.curRows = newRows
|
|
|
|
opi.curIndex = 0
|
|
|
|
if !opi.curRows.Next() {
|
|
|
|
if err := opi.curRows.Close(); err != nil {
|
|
|
|
opi.errEncountered = errs.Wrap(err)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var k, v []byte
|
|
|
|
err := opi.curRows.Scan(&k, &v)
|
|
|
|
if err != nil {
|
2018-12-21 10:54:20 +00:00
|
|
|
opi.errEncountered = errs.Combine(errs.Wrap(err), errs.Wrap(opi.curRows.Close()))
|
2018-10-25 18:11:28 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
item.Key = storage.Key(k)
|
|
|
|
item.Value = storage.Value(v)
|
|
|
|
opi.curIndex++
|
|
|
|
if opi.curIndex == 1 && opi.lastKeySeen.Equal(item.Key) {
|
2019-06-05 15:23:10 +01:00
|
|
|
return opi.Next(ctx, item)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
if !opi.opts.Recurse && item.Key[len(item.Key)-1] == opi.delimiter && !item.Key.Equal(opi.opts.Prefix) {
|
|
|
|
item.IsPrefix = true
|
|
|
|
// i don't think this makes the most sense, but it's necessary to pass the storage testsuite
|
|
|
|
item.Value = nil
|
|
|
|
} else {
|
|
|
|
item.IsPrefix = false
|
|
|
|
}
|
|
|
|
opi.lastKeySeen = item.Key
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
func (opi *orderedPostgresIterator) doNextQuery(ctx context.Context) (_ *sql.Rows, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
start := opi.lastKeySeen
|
|
|
|
if start == nil {
|
|
|
|
start = opi.opts.First
|
|
|
|
}
|
|
|
|
var query string
|
|
|
|
if !opi.opts.Recurse {
|
2019-09-25 22:30:41 +01:00
|
|
|
query = "SELECT p, m FROM list_directory($1::BYTEA, $2::BYTEA, $3::BYTEA, $4) ld(p, m)"
|
2018-10-25 18:11:28 +01:00
|
|
|
} else {
|
2019-09-25 22:30:41 +01:00
|
|
|
query = `
|
2018-10-25 18:11:28 +01:00
|
|
|
SELECT fullpath, metadata
|
|
|
|
FROM pathdata
|
|
|
|
WHERE bucket = $1::BYTEA
|
|
|
|
AND ($2::BYTEA = ''::BYTEA OR fullpath >= $2::BYTEA)
|
|
|
|
AND ($2::BYTEA = ''::BYTEA OR fullpath < bytea_increment($2::BYTEA))
|
2019-09-25 22:30:41 +01:00
|
|
|
AND ($3::BYTEA = ''::BYTEA OR fullpath >= $3::BYTEA)
|
|
|
|
ORDER BY fullpath
|
2018-10-25 18:11:28 +01:00
|
|
|
LIMIT $4
|
2019-09-25 22:30:41 +01:00
|
|
|
`
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
return opi.client.pgConn.Query(query, []byte(opi.bucket), []byte(opi.opts.Prefix), []byte(start), opi.batchSize+1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (opi *orderedPostgresIterator) Close() error {
|
2018-12-21 10:54:20 +00:00
|
|
|
return errs.Combine(opi.errEncountered, opi.curRows.Close())
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
func newOrderedPostgresIterator(ctx context.Context, pgClient *Client, opts storage.IterateOptions, batchSize int) (_ *orderedPostgresIterator, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-25 18:11:28 +01:00
|
|
|
if opts.Prefix == nil {
|
|
|
|
opts.Prefix = storage.Key("")
|
|
|
|
}
|
|
|
|
if opts.First == nil {
|
|
|
|
opts.First = storage.Key("")
|
|
|
|
}
|
|
|
|
opi := &orderedPostgresIterator{
|
|
|
|
client: pgClient,
|
|
|
|
opts: &opts,
|
|
|
|
bucket: storage.Key(defaultBucket),
|
|
|
|
delimiter: byte('/'),
|
|
|
|
batchSize: batchSize,
|
|
|
|
curIndex: 0,
|
|
|
|
}
|
|
|
|
opi.nextQuery = opi.doNextQuery
|
2019-06-05 15:23:10 +01:00
|
|
|
newRows, err := opi.nextQuery(ctx)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
opi.curRows = newRows
|
|
|
|
return opi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate iterates over items based on opts
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
opi, err := newOrderedPostgresIterator(ctx, client, opts, defaultBatchSize)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
2018-12-21 10:54:20 +00:00
|
|
|
err = errs.Combine(err, opi.Close())
|
2018-10-25 18:11:28 +01:00
|
|
|
}()
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
return fn(ctx, opi)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-07-23 20:46:33 +01:00
|
|
|
|
|
|
|
// CompareAndSwap atomically compares and swaps oldValue with newValue
|
|
|
|
func (client *Client) CompareAndSwap(ctx context.Context, key storage.Key, oldValue, newValue storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
return client.CompareAndSwapPath(ctx, storage.Key(defaultBucket), key, oldValue, newValue)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompareAndSwapPath atomically compares and swaps oldValue with newValue in the given bucket
|
|
|
|
func (client *Client) CompareAndSwapPath(ctx context.Context, bucket, key storage.Key, oldValue, newValue storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
|
|
|
if oldValue == nil && newValue == nil {
|
|
|
|
q := "SELECT metadata FROM pathdata WHERE bucket = $1::BYTEA AND fullpath = $2::BYTEA"
|
|
|
|
row := client.pgConn.QueryRow(q, []byte(bucket), []byte(key))
|
|
|
|
var val []byte
|
|
|
|
err = row.Scan(&val)
|
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if oldValue == nil {
|
|
|
|
q := `
|
|
|
|
INSERT INTO pathdata (bucket, fullpath, metadata) VALUES ($1::BYTEA, $2::BYTEA, $3::BYTEA)
|
|
|
|
ON CONFLICT DO NOTHING
|
|
|
|
RETURNING 1
|
|
|
|
`
|
|
|
|
row := client.pgConn.QueryRow(q, []byte(bucket), []byte(key), []byte(newValue))
|
|
|
|
var val []byte
|
|
|
|
err = row.Scan(&val)
|
|
|
|
if err == sql.ErrNoRows {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var row *sql.Row
|
|
|
|
if newValue == nil {
|
|
|
|
q := `
|
|
|
|
WITH matching_key AS (
|
|
|
|
SELECT * FROM pathdata WHERE bucket = $1::BYTEA AND fullpath = $2::BYTEA
|
|
|
|
), updated AS (
|
|
|
|
DELETE FROM pathdata
|
|
|
|
USING matching_key mk
|
|
|
|
WHERE pathdata.metadata = $3::BYTEA
|
|
|
|
AND pathdata.bucket = mk.bucket
|
|
|
|
AND pathdata.fullpath = mk.fullpath
|
|
|
|
RETURNING 1
|
|
|
|
)
|
|
|
|
SELECT EXISTS(SELECT 1 FROM matching_key) AS key_present, EXISTS(SELECT 1 FROM updated) AS value_updated
|
|
|
|
`
|
|
|
|
row = client.pgConn.QueryRow(q, []byte(bucket), []byte(key), []byte(oldValue))
|
|
|
|
} else {
|
|
|
|
q := `
|
|
|
|
WITH matching_key AS (
|
|
|
|
SELECT * FROM pathdata WHERE bucket = $1::BYTEA AND fullpath = $2::BYTEA
|
|
|
|
), updated AS (
|
|
|
|
UPDATE pathdata
|
|
|
|
SET metadata = $4::BYTEA
|
|
|
|
FROM matching_key mk
|
|
|
|
WHERE pathdata.metadata = $3::BYTEA
|
|
|
|
AND pathdata.bucket = mk.bucket
|
|
|
|
AND pathdata.fullpath = mk.fullpath
|
|
|
|
RETURNING 1
|
|
|
|
)
|
|
|
|
SELECT EXISTS(SELECT 1 FROM matching_key) AS key_present, EXISTS(SELECT 1 FROM updated) AS value_updated;
|
|
|
|
`
|
|
|
|
row = client.pgConn.QueryRow(q, []byte(bucket), []byte(key), []byte(oldValue), []byte(newValue))
|
|
|
|
}
|
|
|
|
|
|
|
|
var keyPresent, valueUpdated bool
|
|
|
|
err = row.Scan(&keyPresent, &valueUpdated)
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if !keyPresent {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
if !valueUpdated {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|