storj/storage/boltdb/client.go

339 lines
8.8 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
2018-04-06 17:32:34 +01:00
package boltdb
import (
"bytes"
"context"
"sync/atomic"
"time"
"github.com/spacemonkeygo/monkit/v3"
2018-12-21 10:54:20 +00:00
"github.com/zeebo/errs"
"go.etcd.io/bbolt"
"storj.io/storj/storage"
)
2018-04-06 17:32:34 +01:00
var mon = monkit.Package()
// Error is the default boltdb errs class
var Error = errs.Class("boltdb error")
// Client is the entrypoint into a bolt data store
type Client struct {
db *bbolt.DB
Path string
Bucket []byte
referenceCount *int32
lookupLimit int
}
const (
// fileMode sets permissions so owner can read and write
fileMode = 0600
defaultTimeout = 1 * time.Second
)
2018-04-06 17:32:34 +01:00
// New instantiates a new BoltDB client given db file path, and a bucket name
func New(path, bucket string) (*Client, error) {
db, err := bbolt.Open(path, fileMode, &bbolt.Options{Timeout: defaultTimeout})
2018-04-06 17:32:34 +01:00
if err != nil {
return nil, Error.Wrap(err)
2018-04-06 17:32:34 +01:00
}
err = Error.Wrap(db.Update(func(tx *bbolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
return err
}))
if err != nil {
if closeErr := Error.Wrap(db.Close()); closeErr != nil {
2018-12-21 10:54:20 +00:00
return nil, errs.Combine(err, closeErr)
}
return nil, err
}
refCount := new(int32)
*refCount = 1
return &Client{
db: db,
referenceCount: refCount,
Path: path,
Bucket: []byte(bucket),
lookupLimit: storage.DefaultLookupLimit,
2018-04-06 17:32:34 +01:00
}, nil
}
// SetLookupLimit sets the lookup limit.
func (client *Client) SetLookupLimit(v int) { client.lookupLimit = v }
// LookupLimit returns the maximum limit that is allowed.
func (client *Client) LookupLimit() int { return client.lookupLimit }
func (client *Client) update(fn func(*bbolt.Bucket) error) error {
return Error.Wrap(client.db.Update(func(tx *bbolt.Tx) error {
return fn(tx.Bucket(client.Bucket))
}))
}
func (client *Client) batch(fn func(*bbolt.Bucket) error) error {
return Error.Wrap(client.db.Batch(func(tx *bbolt.Tx) error {
return fn(tx.Bucket(client.Bucket))
}))
}
func (client *Client) view(fn func(*bbolt.Bucket) error) error {
return Error.Wrap(client.db.View(func(tx *bbolt.Tx) error {
return fn(tx.Bucket(client.Bucket))
}))
}
// Put adds a key/value to boltDB in a batch, where boltDB commits the batch to disk every
// 1000 operations or 10ms, whichever is first. The MaxBatchDelay are using default settings.
// Ref: https://github.com/boltdb/bolt/blob/master/db.go#L160
// Note: when using this method, check if it need to be executed asynchronously
// since it blocks for the duration db.MaxBatchDelay.
func (client *Client) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
defer mon.Task()(&ctx)(&err)
start := time.Now()
if key.IsZero() {
return storage.ErrEmptyKey.New("")
}
err = client.batch(func(bucket *bbolt.Bucket) error {
return bucket.Put(key, value)
})
mon.IntVal("boltdb_batch_time_elapsed").Observe(int64(time.Since(start)))
return err
}
// PutAndCommit adds a key/value to BoltDB and writes it to disk.
func (client *Client) PutAndCommit(ctx context.Context, key storage.Key, value storage.Value) (err error) {
defer mon.Task()(&ctx)(&err)
2018-11-15 15:31:33 +00:00
if key.IsZero() {
return storage.ErrEmptyKey.New("")
}
2018-11-15 15:31:33 +00:00
return client.update(func(bucket *bbolt.Bucket) error {
return bucket.Put(key, value)
})
}
// Get looks up the provided key from boltdb returning either an error or the result.
func (client *Client) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
defer mon.Task()(&ctx)(&err)
2018-11-15 15:31:33 +00:00
if key.IsZero() {
return nil, storage.ErrEmptyKey.New("")
}
var value storage.Value
err = client.view(func(bucket *bbolt.Bucket) error {
data := bucket.Get([]byte(key))
if len(data) == 0 {
2019-08-21 17:30:29 +01:00
return storage.ErrKeyNotFound.New("%q", key)
}
value = storage.CloneValue(storage.Value(data))
return nil
})
return value, err
}
// Delete deletes a key/value pair from boltdb, for a given the key
func (client *Client) Delete(ctx context.Context, key storage.Key) (err error) {
defer mon.Task()(&ctx)(&err)
2018-11-15 15:31:33 +00:00
if key.IsZero() {
return storage.ErrEmptyKey.New("")
}
return client.update(func(bucket *bbolt.Bucket) error {
return bucket.Delete(key)
})
}
// DeleteMultiple deletes keys ignoring missing keys
func (client *Client) DeleteMultiple(ctx context.Context, keys []storage.Key) (_ storage.Items, err error) {
defer mon.Task()(&ctx, len(keys))(&err)
var items storage.Items
err = client.update(func(bucket *bbolt.Bucket) error {
for _, key := range keys {
value := bucket.Get(key)
if len(value) == 0 {
continue
}
items = append(items, storage.ListItem{
Key: key,
Value: value,
})
err := bucket.Delete(key)
if err != nil {
return err
}
}
return nil
})
return items, err
}
// List returns either a list of keys for which boltdb has values or an error.
func (client *Client) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
defer mon.Task()(&ctx)(&err)
rv, err := storage.ListKeys(ctx, client, first, limit)
return rv, Error.Wrap(err)
}
// Close closes a BoltDB client
func (client *Client) Close() (err error) {
if atomic.AddInt32(client.referenceCount, -1) == 0 {
return Error.Wrap(client.db.Close())
}
return nil
}
// GetAll finds all values for the provided keys (up to LookupLimit).
// If more keys are provided than the maximum, an error will be returned.
func (client *Client) GetAll(ctx context.Context, keys storage.Keys) (_ storage.Values, err error) {
defer mon.Task()(&ctx)(&err)
if len(keys) > client.lookupLimit {
return nil, storage.ErrLimitExceeded
}
vals := make(storage.Values, 0, len(keys))
err = client.view(func(bucket *bbolt.Bucket) error {
for _, key := range keys {
val := bucket.Get([]byte(key))
if val == nil {
vals = append(vals, nil)
continue
}
vals = append(vals, storage.CloneValue(storage.Value(val)))
}
return nil
})
return vals, err
}
// Iterate iterates over items based on opts
func (client *Client) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
if opts.Limit <= 0 || opts.Limit > client.lookupLimit {
opts.Limit = client.lookupLimit
}
return client.view(func(bucket *bbolt.Bucket) error {
var cursor advancer = forward{bucket.Cursor()}
start := true
lastPrefix := []byte{}
wasPrefix := false
return fn(ctx, storage.IteratorFunc(func(ctx context.Context, item *storage.ListItem) bool {
var key, value []byte
if start {
key, value = cursor.PositionToFirst(opts.Prefix, opts.First)
start = false
} else {
key, value = cursor.Advance()
adds netstate pagination (#95) * adds netstate rpc server pagination, mocks pagination in test/util.go * updates ns client example, combines ns client and server test to netstate_test, adds pagination to bolt client * better organizes netstate test calls * wip breaking netstate test into smaller tests * wip modularizing netstate tests * adds some test panics * wip netstate test attempts * testing bug in netstate TestDeleteAuth * wip fixes global variable problem, still issues with list * wip fixes get request params and args * fixes bug in path when using MakePointers helper fn * updates mockdb list func, adds test, changes Limit to int * fixes merge conflicts * fixes broken tests from merge * remove unnecessary PointerEntry struct * removes error when Get returns nil value from boltdb * breaks boltdb client tests into smaller tests * renames AssertNoErr test helper to HandleErr * adds StartingKey and Limit parameters to redis list func, adds beginning of redis tests * adds helper func for mockdb List function * if no starting key provided for netstate List, the first value in storage will be used * adds basic pagination for redis List function, adds tests * adds list limit to call in overlay/server.go * streamlines/fixes some nits from review * removes use of obsolete EncryptedUnencryptedSize * uses MockKeyValueStore instead of redis instance in redis client test * changes test to expect nil returned for getting missing key * remove error from `KeyValueStore#Get` * fix bolt test * Merge pull request #1 from bryanchriswhite/nat-pagination remove error from `KeyValueStore#Get` * adds Get returning error back to KeyValueStore interface and affected clients * trying to appease travis: returns errors in Get calls in overlay/cache and cache_test * handles redis get error when no key found
2018-06-29 21:06:25 +01:00
}
if !opts.Recurse {
// when non-recursive skip all items that have the same prefix
if wasPrefix && bytes.HasPrefix(key, lastPrefix) {
key, value = cursor.SkipPrefix(lastPrefix)
wasPrefix = false
}
}
if len(key) == 0 || !bytes.HasPrefix(key, opts.Prefix) {
return false
}
if !opts.Recurse {
// check whether the entry is a proper prefix
if p := bytes.IndexByte(key[len(opts.Prefix):], storage.Delimiter); p >= 0 {
key = key[:len(opts.Prefix)+p+1]
lastPrefix = append(lastPrefix[:0], key...)
item.Key = append(item.Key[:0], storage.Key(lastPrefix)...)
item.Value = item.Value[:0]
item.IsPrefix = true
wasPrefix = true
return true
}
}
item.Key = append(item.Key[:0], storage.Key(key)...)
item.Value = append(item.Value[:0], storage.Value(value)...)
item.IsPrefix = false
return true
}))
})
}
type advancer interface {
PositionToFirst(prefix, first storage.Key) (key, value []byte)
SkipPrefix(prefix storage.Key) (key, value []byte)
Advance() (key, value []byte)
}
type forward struct {
*bbolt.Cursor
}
func (cursor forward) PositionToFirst(prefix, first storage.Key) (key, value []byte) {
if first.IsZero() || first.Less(prefix) {
return cursor.Seek([]byte(prefix))
}
return cursor.Seek([]byte(first))
}
func (cursor forward) SkipPrefix(prefix storage.Key) (key, value []byte) {
return cursor.Seek(storage.AfterPrefix(prefix))
}
func (cursor forward) Advance() (key, value []byte) {
return cursor.Next()
}
// CompareAndSwap atomically compares and swaps oldValue with newValue
func (client *Client) CompareAndSwap(ctx context.Context, key storage.Key, oldValue, newValue storage.Value) (err error) {
defer mon.Task()(&ctx)(&err)
if key.IsZero() {
return storage.ErrEmptyKey.New("")
}
return client.update(func(bucket *bbolt.Bucket) error {
data := bucket.Get([]byte(key))
if len(data) == 0 {
if oldValue != nil {
2019-08-21 17:30:29 +01:00
return storage.ErrKeyNotFound.New("%q", key)
}
if newValue == nil {
return nil
}
return Error.Wrap(bucket.Put(key, newValue))
}
if !bytes.Equal(storage.Value(data), oldValue) {
2019-08-21 17:30:29 +01:00
return storage.ErrValueChanged.New("%q", key)
}
if newValue == nil {
return Error.Wrap(bucket.Delete(key))
}
return Error.Wrap(bucket.Put(key, newValue))
})
}