2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-04-17 04:50:20 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-04-06 17:32:34 +01:00
|
|
|
package boltdb
|
|
|
|
|
|
|
|
import (
|
2018-09-05 17:10:35 +01:00
|
|
|
"bytes"
|
2019-06-05 15:23:10 +01:00
|
|
|
"context"
|
2018-10-18 17:20:23 +01:00
|
|
|
"sync/atomic"
|
2018-04-10 22:46:48 +01:00
|
|
|
"time"
|
2018-04-17 04:50:20 +01:00
|
|
|
|
2019-11-08 20:40:39 +00:00
|
|
|
"github.com/spacemonkeygo/monkit/v3"
|
2018-12-21 10:54:20 +00:00
|
|
|
"github.com/zeebo/errs"
|
2020-03-24 10:33:34 +00:00
|
|
|
"go.etcd.io/bbolt"
|
2018-10-16 12:43:44 +01:00
|
|
|
|
2018-06-13 19:22:32 +01:00
|
|
|
"storj.io/storj/storage"
|
2018-04-10 22:46:48 +01:00
|
|
|
)
|
2018-04-06 17:32:34 +01:00
|
|
|
|
2019-05-06 21:47:12 +01:00
|
|
|
var mon = monkit.Package()
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Error is the default boltdb errs class.
|
2019-02-14 12:33:41 +00:00
|
|
|
var Error = errs.Class("boltdb error")
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Client is the entrypoint into a bolt data store.
|
2018-08-01 15:15:38 +01:00
|
|
|
type Client struct {
|
2020-03-24 10:33:34 +00:00
|
|
|
db *bbolt.DB
|
2018-06-13 19:22:32 +01:00
|
|
|
Path string
|
|
|
|
Bucket []byte
|
2018-10-18 17:20:23 +01:00
|
|
|
|
|
|
|
referenceCount *int32
|
2020-01-22 19:00:46 +00:00
|
|
|
lookupLimit int
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2018-04-21 00:54:18 +01:00
|
|
|
const (
|
|
|
|
// fileMode sets permissions so owner can read and write
|
2018-09-05 17:10:35 +01:00
|
|
|
fileMode = 0600
|
2018-06-13 19:22:32 +01:00
|
|
|
defaultTimeout = 1 * time.Second
|
|
|
|
)
|
2018-04-06 17:32:34 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// New instantiates a new BoltDB client given db file path, and a bucket name.
|
2018-09-05 17:10:35 +01:00
|
|
|
func New(path, bucket string) (*Client, error) {
|
2020-03-24 10:33:34 +00:00
|
|
|
db, err := bbolt.Open(path, fileMode, &bbolt.Options{Timeout: defaultTimeout})
|
2018-04-06 17:32:34 +01:00
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-04-06 17:32:34 +01:00
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
err = Error.Wrap(db.Update(func(tx *bbolt.Tx) error {
|
2018-08-16 15:32:28 +01:00
|
|
|
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
|
|
|
|
return err
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-08-16 15:32:28 +01:00
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
if closeErr := Error.Wrap(db.Close()); closeErr != nil {
|
2018-12-21 10:54:20 +00:00
|
|
|
return nil, errs.Combine(err, closeErr)
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
2018-08-16 15:32:28 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-10-18 17:20:23 +01:00
|
|
|
refCount := new(int32)
|
|
|
|
*refCount = 1
|
|
|
|
|
2018-08-01 15:15:38 +01:00
|
|
|
return &Client{
|
2018-10-18 17:20:23 +01:00
|
|
|
db: db,
|
|
|
|
referenceCount: refCount,
|
|
|
|
Path: path,
|
|
|
|
Bucket: []byte(bucket),
|
2020-01-22 19:00:46 +00:00
|
|
|
lookupLimit: storage.DefaultLookupLimit,
|
2018-04-06 17:32:34 +01:00
|
|
|
}, nil
|
|
|
|
}
|
2018-04-10 22:46:48 +01:00
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
// SetLookupLimit sets the lookup limit.
|
|
|
|
func (client *Client) SetLookupLimit(v int) { client.lookupLimit = v }
|
2018-10-18 17:20:23 +01:00
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
// LookupLimit returns the maximum limit that is allowed.
|
|
|
|
func (client *Client) LookupLimit() int { return client.lookupLimit }
|
2018-10-18 17:20:23 +01:00
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
func (client *Client) update(fn func(*bbolt.Bucket) error) error {
|
|
|
|
return Error.Wrap(client.db.Update(func(tx *bbolt.Tx) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return fn(tx.Bucket(client.Bucket))
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
func (client *Client) batch(fn func(*bbolt.Bucket) error) error {
|
|
|
|
return Error.Wrap(client.db.Batch(func(tx *bbolt.Tx) error {
|
2019-05-06 21:47:12 +01:00
|
|
|
return fn(tx.Bucket(client.Bucket))
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
func (client *Client) view(fn func(*bbolt.Bucket) error) error {
|
|
|
|
return Error.Wrap(client.db.View(func(tx *bbolt.Tx) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return fn(tx.Bucket(client.Bucket))
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2019-07-23 20:46:33 +01:00
|
|
|
// Put adds a key/value to boltDB in a batch, where boltDB commits the batch to disk every
|
2019-05-06 21:47:12 +01:00
|
|
|
// 1000 operations or 10ms, whichever is first. The MaxBatchDelay are using default settings.
|
|
|
|
// Ref: https://github.com/boltdb/bolt/blob/master/db.go#L160
|
|
|
|
// Note: when using this method, check if it need to be executed asynchronously
|
|
|
|
// since it blocks for the duration db.MaxBatchDelay.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-06 21:47:12 +01:00
|
|
|
start := time.Now()
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
err = client.batch(func(bucket *bbolt.Bucket) error {
|
2019-05-06 21:47:12 +01:00
|
|
|
return bucket.Put(key, value)
|
|
|
|
})
|
|
|
|
mon.IntVal("boltdb_batch_time_elapsed").Observe(int64(time.Since(start)))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutAndCommit adds a key/value to BoltDB and writes it to disk.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) PutAndCommit(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
2018-11-15 15:31:33 +00:00
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
return client.update(func(bucket *bbolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return bucket.Put(key, value)
|
2018-06-13 19:22:32 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get looks up the provided key from boltdb returning either an error or the result.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return nil, storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
var value storage.Value
|
2020-03-24 10:33:34 +00:00
|
|
|
err = client.view(func(bucket *bbolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
data := bucket.Get([]byte(key))
|
|
|
|
if len(data) == 0 {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
2018-08-14 16:22:29 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
value = storage.CloneValue(storage.Value(data))
|
2018-06-13 19:22:32 +01:00
|
|
|
return nil
|
|
|
|
})
|
2018-09-05 17:10:35 +01:00
|
|
|
return value, err
|
|
|
|
}
|
2018-06-13 19:22:32 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Delete deletes a key/value pair from boltdb, for a given the key.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Delete(ctx context.Context, key storage.Key) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
return client.update(func(bucket *bbolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return bucket.Delete(key)
|
|
|
|
})
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// DeleteMultiple deletes keys ignoring missing keys.
|
2020-01-28 20:52:04 +00:00
|
|
|
func (client *Client) DeleteMultiple(ctx context.Context, keys []storage.Key) (_ storage.Items, err error) {
|
|
|
|
defer mon.Task()(&ctx, len(keys))(&err)
|
|
|
|
|
|
|
|
var items storage.Items
|
2020-03-24 10:33:34 +00:00
|
|
|
err = client.update(func(bucket *bbolt.Bucket) error {
|
2020-01-28 20:52:04 +00:00
|
|
|
for _, key := range keys {
|
|
|
|
value := bucket.Get(key)
|
|
|
|
if len(value) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
items = append(items, storage.ListItem{
|
|
|
|
Key: key,
|
|
|
|
Value: value,
|
|
|
|
})
|
|
|
|
|
|
|
|
err := bucket.Delete(key)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return items, err
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:22:32 +01:00
|
|
|
// List returns either a list of keys for which boltdb has values or an error.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
rv, err := storage.ListKeys(ctx, client, first, limit)
|
2018-12-07 14:46:42 +00:00
|
|
|
return rv, Error.Wrap(err)
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// Close closes a BoltDB client.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Close() (err error) {
|
2018-10-18 17:20:23 +01:00
|
|
|
if atomic.AddInt32(client.referenceCount, -1) == 0 {
|
2018-12-07 14:46:42 +00:00
|
|
|
return Error.Wrap(client.db.Close())
|
2018-10-18 17:20:23 +01:00
|
|
|
}
|
|
|
|
return nil
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
// GetAll finds all values for the provided keys (up to LookupLimit).
|
2018-10-25 18:11:28 +01:00
|
|
|
// If more keys are provided than the maximum, an error will be returned.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetAll(ctx context.Context, keys storage.Keys) (_ storage.Values, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-22 19:00:46 +00:00
|
|
|
if len(keys) > client.lookupLimit {
|
2020-07-02 23:51:25 +01:00
|
|
|
return nil, storage.ErrLimitExceeded.New("lookup limit exceeded")
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2018-09-07 10:00:00 +01:00
|
|
|
vals := make(storage.Values, 0, len(keys))
|
2020-03-24 10:33:34 +00:00
|
|
|
err = client.view(func(bucket *bbolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
for _, key := range keys {
|
|
|
|
val := bucket.Get([]byte(key))
|
2018-09-11 05:52:14 +01:00
|
|
|
if val == nil {
|
|
|
|
vals = append(vals, nil)
|
|
|
|
continue
|
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
vals = append(vals, storage.CloneValue(storage.Value(val)))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return vals, err
|
|
|
|
}
|
|
|
|
|
2020-05-05 07:51:24 +01:00
|
|
|
// Iterate iterates over items based on opts.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-19 19:56:51 +00:00
|
|
|
|
2020-01-22 19:00:46 +00:00
|
|
|
if opts.Limit <= 0 || opts.Limit > client.lookupLimit {
|
|
|
|
opts.Limit = client.lookupLimit
|
2020-01-19 19:56:51 +00:00
|
|
|
}
|
|
|
|
|
2020-05-05 07:51:24 +01:00
|
|
|
return client.IterateWithoutLookupLimit(ctx, opts, fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IterateWithoutLookupLimit calls the callback with an iterator over the keys, but doesn't enforce default limit on opts.
|
|
|
|
func (client *Client) IterateWithoutLookupLimit(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
return client.view(func(bucket *bbolt.Bucket) error {
|
2019-09-25 22:30:41 +01:00
|
|
|
var cursor advancer = forward{bucket.Cursor()}
|
2018-09-05 17:10:35 +01:00
|
|
|
|
|
|
|
start := true
|
|
|
|
lastPrefix := []byte{}
|
|
|
|
wasPrefix := false
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
return fn(ctx, storage.IteratorFunc(func(ctx context.Context, item *storage.ListItem) bool {
|
2018-09-05 17:10:35 +01:00
|
|
|
var key, value []byte
|
|
|
|
if start {
|
|
|
|
key, value = cursor.PositionToFirst(opts.Prefix, opts.First)
|
|
|
|
start = false
|
|
|
|
} else {
|
|
|
|
key, value = cursor.Advance()
|
2018-06-29 21:06:25 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
|
|
|
|
if !opts.Recurse {
|
|
|
|
// when non-recursive skip all items that have the same prefix
|
|
|
|
if wasPrefix && bytes.HasPrefix(key, lastPrefix) {
|
|
|
|
key, value = cursor.SkipPrefix(lastPrefix)
|
|
|
|
wasPrefix = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-07 15:20:15 +01:00
|
|
|
if len(key) == 0 || !bytes.HasPrefix(key, opts.Prefix) {
|
2018-09-05 17:10:35 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !opts.Recurse {
|
|
|
|
// check whether the entry is a proper prefix
|
|
|
|
if p := bytes.IndexByte(key[len(opts.Prefix):], storage.Delimiter); p >= 0 {
|
|
|
|
key = key[:len(opts.Prefix)+p+1]
|
|
|
|
lastPrefix = append(lastPrefix[:0], key...)
|
|
|
|
|
|
|
|
item.Key = append(item.Key[:0], storage.Key(lastPrefix)...)
|
|
|
|
item.Value = item.Value[:0]
|
|
|
|
item.IsPrefix = true
|
|
|
|
|
|
|
|
wasPrefix = true
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
item.Key = append(item.Key[:0], storage.Key(key)...)
|
|
|
|
item.Value = append(item.Value[:0], storage.Value(value)...)
|
|
|
|
item.IsPrefix = false
|
|
|
|
|
|
|
|
return true
|
|
|
|
}))
|
2018-06-13 19:22:32 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
type advancer interface {
|
|
|
|
PositionToFirst(prefix, first storage.Key) (key, value []byte)
|
|
|
|
SkipPrefix(prefix storage.Key) (key, value []byte)
|
|
|
|
Advance() (key, value []byte)
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
type forward struct {
|
2020-03-24 10:33:34 +00:00
|
|
|
*bbolt.Cursor
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) PositionToFirst(prefix, first storage.Key) (key, value []byte) {
|
2018-09-07 15:20:15 +01:00
|
|
|
if first.IsZero() || first.Less(prefix) {
|
2018-09-05 17:10:35 +01:00
|
|
|
return cursor.Seek([]byte(prefix))
|
|
|
|
}
|
|
|
|
return cursor.Seek([]byte(first))
|
2018-08-26 04:00:49 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) SkipPrefix(prefix storage.Key) (key, value []byte) {
|
|
|
|
return cursor.Seek(storage.AfterPrefix(prefix))
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) Advance() (key, value []byte) {
|
|
|
|
return cursor.Next()
|
2018-04-10 22:46:48 +01:00
|
|
|
}
|
2018-08-01 15:15:38 +01:00
|
|
|
|
2020-07-16 15:18:02 +01:00
|
|
|
// CompareAndSwap atomically compares and swaps oldValue with newValue.
|
2019-07-23 20:46:33 +01:00
|
|
|
func (client *Client) CompareAndSwap(ctx context.Context, key storage.Key, oldValue, newValue storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:33:34 +00:00
|
|
|
return client.update(func(bucket *bbolt.Bucket) error {
|
2019-07-23 20:46:33 +01:00
|
|
|
data := bucket.Get([]byte(key))
|
|
|
|
if len(data) == 0 {
|
|
|
|
if oldValue != nil {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrKeyNotFound.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if newValue == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return Error.Wrap(bucket.Put(key, newValue))
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(storage.Value(data), oldValue) {
|
2019-08-21 17:30:29 +01:00
|
|
|
return storage.ErrValueChanged.New("%q", key)
|
2019-07-23 20:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if newValue == nil {
|
|
|
|
return Error.Wrap(bucket.Delete(key))
|
|
|
|
}
|
|
|
|
|
|
|
|
return Error.Wrap(bucket.Put(key, newValue))
|
|
|
|
})
|
|
|
|
}
|