2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-04-17 04:50:20 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-04-06 17:32:34 +01:00
|
|
|
package boltdb
|
|
|
|
|
|
|
|
import (
|
2018-09-05 17:10:35 +01:00
|
|
|
"bytes"
|
2019-06-05 15:23:10 +01:00
|
|
|
"context"
|
2018-10-18 17:20:23 +01:00
|
|
|
"sync/atomic"
|
2018-04-10 22:46:48 +01:00
|
|
|
"time"
|
2018-04-17 04:50:20 +01:00
|
|
|
|
2018-10-16 12:43:44 +01:00
|
|
|
"github.com/boltdb/bolt"
|
2018-12-21 10:54:20 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-07-01 22:20:19 +01:00
|
|
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
2018-10-16 12:43:44 +01:00
|
|
|
|
2018-06-13 19:22:32 +01:00
|
|
|
"storj.io/storj/storage"
|
2018-04-10 22:46:48 +01:00
|
|
|
)
|
2018-04-06 17:32:34 +01:00
|
|
|
|
2019-05-06 21:47:12 +01:00
|
|
|
var mon = monkit.Package()
|
|
|
|
|
2019-02-14 12:33:41 +00:00
|
|
|
// Error is the default boltdb errs class
|
|
|
|
var Error = errs.Class("boltdb error")
|
|
|
|
|
2018-08-01 15:15:38 +01:00
|
|
|
// Client is the entrypoint into a bolt data store
|
|
|
|
type Client struct {
|
2018-06-13 19:22:32 +01:00
|
|
|
db *bolt.DB
|
|
|
|
Path string
|
|
|
|
Bucket []byte
|
2018-10-18 17:20:23 +01:00
|
|
|
|
|
|
|
referenceCount *int32
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2018-04-21 00:54:18 +01:00
|
|
|
const (
|
|
|
|
// fileMode sets permissions so owner can read and write
|
2018-09-05 17:10:35 +01:00
|
|
|
fileMode = 0600
|
2018-06-13 19:22:32 +01:00
|
|
|
defaultTimeout = 1 * time.Second
|
|
|
|
)
|
2018-04-06 17:32:34 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
// New instantiates a new BoltDB client given db file path, and a bucket name
|
|
|
|
func New(path, bucket string) (*Client, error) {
|
2018-04-21 00:54:18 +01:00
|
|
|
db, err := bolt.Open(path, fileMode, &bolt.Options{Timeout: defaultTimeout})
|
2018-04-06 17:32:34 +01:00
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-04-06 17:32:34 +01:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:46:42 +00:00
|
|
|
err = Error.Wrap(db.Update(func(tx *bolt.Tx) error {
|
2018-08-16 15:32:28 +01:00
|
|
|
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
|
|
|
|
return err
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-08-16 15:32:28 +01:00
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
if closeErr := Error.Wrap(db.Close()); closeErr != nil {
|
2018-12-21 10:54:20 +00:00
|
|
|
return nil, errs.Combine(err, closeErr)
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
2018-08-16 15:32:28 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-10-18 17:20:23 +01:00
|
|
|
refCount := new(int32)
|
|
|
|
*refCount = 1
|
|
|
|
|
2018-08-01 15:15:38 +01:00
|
|
|
return &Client{
|
2018-10-18 17:20:23 +01:00
|
|
|
db: db,
|
|
|
|
referenceCount: refCount,
|
|
|
|
Path: path,
|
|
|
|
Bucket: []byte(bucket),
|
2018-04-06 17:32:34 +01:00
|
|
|
}, nil
|
|
|
|
}
|
2018-04-10 22:46:48 +01:00
|
|
|
|
2018-10-18 17:20:23 +01:00
|
|
|
// NewShared instantiates a new BoltDB with multiple buckets
|
|
|
|
func NewShared(path string, buckets ...string) ([]*Client, error) {
|
|
|
|
db, err := bolt.Open(path, fileMode, &bolt.Options{Timeout: defaultTimeout})
|
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
return nil, Error.Wrap(err)
|
2018-10-18 17:20:23 +01:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:46:42 +00:00
|
|
|
err = Error.Wrap(db.Update(func(tx *bolt.Tx) error {
|
2018-10-18 17:20:23 +01:00
|
|
|
for _, bucket := range buckets {
|
|
|
|
_, err := tx.CreateBucketIfNotExists([]byte(bucket))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-10-18 17:20:23 +01:00
|
|
|
if err != nil {
|
2018-12-07 14:46:42 +00:00
|
|
|
if closeErr := Error.Wrap(db.Close()); closeErr != nil {
|
2018-12-21 10:54:20 +00:00
|
|
|
return nil, errs.Combine(err, closeErr)
|
2018-10-18 17:20:23 +01:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
refCount := new(int32)
|
|
|
|
*refCount = int32(len(buckets))
|
|
|
|
|
|
|
|
clients := []*Client{}
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
clients = append(clients, &Client{
|
|
|
|
db: db,
|
|
|
|
referenceCount: refCount,
|
|
|
|
Path: path,
|
|
|
|
Bucket: []byte(bucket),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return clients, nil
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (client *Client) update(fn func(*bolt.Bucket) error) error {
|
2018-12-07 14:46:42 +00:00
|
|
|
return Error.Wrap(client.db.Update(func(tx *bolt.Tx) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return fn(tx.Bucket(client.Bucket))
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2019-05-06 21:47:12 +01:00
|
|
|
func (client *Client) batch(fn func(*bolt.Bucket) error) error {
|
|
|
|
return Error.Wrap(client.db.Batch(func(tx *bolt.Tx) error {
|
|
|
|
return fn(tx.Bucket(client.Bucket))
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (client *Client) view(fn func(*bolt.Bucket) error) error {
|
2018-12-07 14:46:42 +00:00
|
|
|
return Error.Wrap(client.db.View(func(tx *bolt.Tx) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
return fn(tx.Bucket(client.Bucket))
|
2018-12-07 14:46:42 +00:00
|
|
|
}))
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2019-05-06 21:47:12 +01:00
|
|
|
// Put adds a key/value to boltDB in a batch, where boltDB commits the batch to to disk every
|
|
|
|
// 1000 operations or 10ms, whichever is first. The MaxBatchDelay are using default settings.
|
|
|
|
// Ref: https://github.com/boltdb/bolt/blob/master/db.go#L160
|
|
|
|
// Note: when using this method, check if it need to be executed asynchronously
|
|
|
|
// since it blocks for the duration db.MaxBatchDelay.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-05-06 21:47:12 +01:00
|
|
|
start := time.Now()
|
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
err = client.batch(func(bucket *bolt.Bucket) error {
|
2019-05-06 21:47:12 +01:00
|
|
|
return bucket.Put(key, value)
|
|
|
|
})
|
|
|
|
mon.IntVal("boltdb_batch_time_elapsed").Observe(int64(time.Since(start)))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutAndCommit adds a key/value to BoltDB and writes it to disk.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) PutAndCommit(ctx context.Context, key storage.Key, value storage.Value) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
2018-11-15 15:31:33 +00:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
return client.update(func(bucket *bolt.Bucket) error {
|
|
|
|
return bucket.Put(key, value)
|
2018-06-13 19:22:32 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get looks up the provided key from boltdb returning either an error or the result.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return nil, storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
var value storage.Value
|
2019-06-05 15:23:10 +01:00
|
|
|
err = client.view(func(bucket *bolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
data := bucket.Get([]byte(key))
|
|
|
|
if len(data) == 0 {
|
|
|
|
return storage.ErrKeyNotFound.New(key.String())
|
2018-08-14 16:22:29 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
value = storage.CloneValue(storage.Value(data))
|
2018-06-13 19:22:32 +01:00
|
|
|
return nil
|
|
|
|
})
|
2018-09-05 17:10:35 +01:00
|
|
|
return value, err
|
|
|
|
}
|
2018-06-13 19:22:32 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
// Delete deletes a key/value pair from boltdb, for a given the key
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Delete(ctx context.Context, key storage.Key) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-15 15:31:33 +00:00
|
|
|
if key.IsZero() {
|
|
|
|
return storage.ErrEmptyKey.New("")
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
return client.update(func(bucket *bolt.Bucket) error {
|
|
|
|
return bucket.Delete(key)
|
|
|
|
})
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns either a list of keys for which boltdb has values or an error.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
rv, err := storage.ListKeys(ctx, client, first, limit)
|
2018-12-07 14:46:42 +00:00
|
|
|
return rv, Error.Wrap(err)
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
// Close closes a BoltDB client
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Close() (err error) {
|
2018-10-18 17:20:23 +01:00
|
|
|
if atomic.AddInt32(client.referenceCount, -1) == 0 {
|
2018-12-07 14:46:42 +00:00
|
|
|
return Error.Wrap(client.db.Close())
|
2018-10-18 17:20:23 +01:00
|
|
|
}
|
|
|
|
return nil
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
// GetAll finds all values for the provided keys (up to storage.LookupLimit).
|
|
|
|
// If more keys are provided than the maximum, an error will be returned.
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) GetAll(ctx context.Context, keys storage.Keys) (_ storage.Values, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-09-07 10:00:00 +01:00
|
|
|
if len(keys) > storage.LookupLimit {
|
|
|
|
return nil, storage.ErrLimitExceeded
|
2018-09-05 17:10:35 +01:00
|
|
|
}
|
|
|
|
|
2018-09-07 10:00:00 +01:00
|
|
|
vals := make(storage.Values, 0, len(keys))
|
2019-06-05 15:23:10 +01:00
|
|
|
err = client.view(func(bucket *bolt.Bucket) error {
|
2018-09-05 17:10:35 +01:00
|
|
|
for _, key := range keys {
|
|
|
|
val := bucket.Get([]byte(key))
|
2018-09-11 05:52:14 +01:00
|
|
|
if val == nil {
|
|
|
|
vals = append(vals, nil)
|
|
|
|
continue
|
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
vals = append(vals, storage.CloneValue(storage.Value(val)))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return vals, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate iterates over items based on opts
|
2019-06-05 15:23:10 +01:00
|
|
|
func (client *Client) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-09-05 17:10:35 +01:00
|
|
|
return client.view(func(bucket *bolt.Bucket) error {
|
|
|
|
var cursor advancer
|
|
|
|
if !opts.Reverse {
|
|
|
|
cursor = forward{bucket.Cursor()}
|
2018-06-29 21:06:25 +01:00
|
|
|
} else {
|
2018-09-05 17:10:35 +01:00
|
|
|
cursor = backward{bucket.Cursor()}
|
2018-06-29 21:06:25 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
|
|
|
|
start := true
|
|
|
|
lastPrefix := []byte{}
|
|
|
|
wasPrefix := false
|
|
|
|
|
2019-06-05 15:23:10 +01:00
|
|
|
return fn(ctx, storage.IteratorFunc(func(ctx context.Context, item *storage.ListItem) bool {
|
2018-09-05 17:10:35 +01:00
|
|
|
var key, value []byte
|
|
|
|
if start {
|
|
|
|
key, value = cursor.PositionToFirst(opts.Prefix, opts.First)
|
|
|
|
start = false
|
|
|
|
} else {
|
|
|
|
key, value = cursor.Advance()
|
2018-06-29 21:06:25 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
|
|
|
|
if !opts.Recurse {
|
|
|
|
// when non-recursive skip all items that have the same prefix
|
|
|
|
if wasPrefix && bytes.HasPrefix(key, lastPrefix) {
|
|
|
|
key, value = cursor.SkipPrefix(lastPrefix)
|
|
|
|
wasPrefix = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-07 15:20:15 +01:00
|
|
|
if len(key) == 0 || !bytes.HasPrefix(key, opts.Prefix) {
|
2018-09-05 17:10:35 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !opts.Recurse {
|
|
|
|
// check whether the entry is a proper prefix
|
|
|
|
if p := bytes.IndexByte(key[len(opts.Prefix):], storage.Delimiter); p >= 0 {
|
|
|
|
key = key[:len(opts.Prefix)+p+1]
|
|
|
|
lastPrefix = append(lastPrefix[:0], key...)
|
|
|
|
|
|
|
|
item.Key = append(item.Key[:0], storage.Key(lastPrefix)...)
|
|
|
|
item.Value = item.Value[:0]
|
|
|
|
item.IsPrefix = true
|
|
|
|
|
|
|
|
wasPrefix = true
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
item.Key = append(item.Key[:0], storage.Key(key)...)
|
|
|
|
item.Value = append(item.Value[:0], storage.Value(value)...)
|
|
|
|
item.IsPrefix = false
|
|
|
|
|
|
|
|
return true
|
|
|
|
}))
|
2018-06-13 19:22:32 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
type advancer interface {
|
|
|
|
PositionToFirst(prefix, first storage.Key) (key, value []byte)
|
|
|
|
SkipPrefix(prefix storage.Key) (key, value []byte)
|
|
|
|
Advance() (key, value []byte)
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
type forward struct {
|
|
|
|
*bolt.Cursor
|
2018-07-30 20:25:18 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) PositionToFirst(prefix, first storage.Key) (key, value []byte) {
|
2018-09-07 15:20:15 +01:00
|
|
|
if first.IsZero() || first.Less(prefix) {
|
2018-09-05 17:10:35 +01:00
|
|
|
return cursor.Seek([]byte(prefix))
|
|
|
|
}
|
|
|
|
return cursor.Seek([]byte(first))
|
2018-08-26 04:00:49 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) SkipPrefix(prefix storage.Key) (key, value []byte) {
|
|
|
|
return cursor.Seek(storage.AfterPrefix(prefix))
|
2018-06-13 19:22:32 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor forward) Advance() (key, value []byte) {
|
|
|
|
return cursor.Next()
|
2018-04-10 22:46:48 +01:00
|
|
|
}
|
2018-08-01 15:15:38 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
type backward struct {
|
|
|
|
*bolt.Cursor
|
|
|
|
}
|
2018-08-03 14:15:52 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func (cursor backward) PositionToFirst(prefix, first storage.Key) (key, value []byte) {
|
2018-09-07 15:20:15 +01:00
|
|
|
if prefix.IsZero() {
|
2018-09-05 17:10:35 +01:00
|
|
|
// there's no prefix
|
2018-09-07 15:20:15 +01:00
|
|
|
if first.IsZero() {
|
2018-09-05 17:10:35 +01:00
|
|
|
// and no first item, so start from the end
|
|
|
|
return cursor.Last()
|
2018-08-03 14:15:52 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
} else {
|
|
|
|
// there's a prefix
|
2018-09-07 15:20:15 +01:00
|
|
|
if first.IsZero() || storage.AfterPrefix(prefix).Less(first) {
|
2018-09-05 17:10:35 +01:00
|
|
|
// there's no first, or it's after our prefix
|
|
|
|
// storage.AfterPrefix("axxx/") is the next item after prefixes
|
|
|
|
// so we position to the item before
|
|
|
|
nextkey := storage.AfterPrefix(prefix)
|
|
|
|
_, _ = cursor.Seek(nextkey)
|
|
|
|
return cursor.Prev()
|
|
|
|
}
|
|
|
|
}
|
2018-08-03 14:15:52 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
// otherwise try to position on first or one before that
|
|
|
|
key, value = cursor.Seek(first)
|
|
|
|
if !bytes.Equal(key, first) {
|
|
|
|
key, value = cursor.Prev()
|
2018-08-03 14:15:52 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
return key, value
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cursor backward) SkipPrefix(prefix storage.Key) (key, value []byte) {
|
|
|
|
_, _ = cursor.Seek(prefix)
|
|
|
|
return cursor.Prev()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cursor backward) Advance() (key, value []byte) {
|
|
|
|
return cursor.Prev()
|
2018-08-01 15:15:38 +01:00
|
|
|
}
|