2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-04-17 04:50:20 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
2018-04-10 22:46:48 +01:00
|
|
|
package boltdb
|
|
|
|
|
|
|
|
import (
|
2018-10-25 18:11:28 +01:00
|
|
|
"fmt"
|
2018-04-10 22:46:48 +01:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2018-09-05 17:10:35 +01:00
|
|
|
"path/filepath"
|
2019-05-06 21:47:12 +01:00
|
|
|
"sync"
|
2018-04-10 22:46:48 +01:00
|
|
|
"testing"
|
2018-04-21 00:54:18 +01:00
|
|
|
|
2018-12-21 10:54:20 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-05-06 21:47:12 +01:00
|
|
|
"storj.io/storj/internal/testcontext"
|
2018-10-25 18:11:28 +01:00
|
|
|
"storj.io/storj/storage"
|
2018-09-05 17:10:35 +01:00
|
|
|
"storj.io/storj/storage/testsuite"
|
2018-04-10 22:46:48 +01:00
|
|
|
)
|
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func TestSuite(t *testing.T) {
|
2018-09-11 14:57:12 +01:00
|
|
|
tempdir, err := ioutil.TempDir("", "storj-bolt")
|
2018-05-07 18:01:53 +01:00
|
|
|
if err != nil {
|
2018-09-05 17:10:35 +01:00
|
|
|
t.Fatal(err)
|
2018-06-29 21:06:25 +01:00
|
|
|
}
|
2018-09-11 14:13:25 +01:00
|
|
|
defer func() { _ = os.RemoveAll(tempdir) }()
|
2018-06-29 21:06:25 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
dbname := filepath.Join(tempdir, "bolt.db")
|
|
|
|
store, err := New(dbname, "bucket")
|
2018-04-10 22:46:48 +01:00
|
|
|
if err != nil {
|
2018-09-05 17:10:35 +01:00
|
|
|
t.Fatalf("failed to create db: %v", err)
|
2018-04-10 22:46:48 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
defer func() {
|
|
|
|
if err := store.Close(); err != nil {
|
|
|
|
t.Fatalf("failed to close db: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2018-04-10 22:46:48 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
testsuite.RunTests(t, store)
|
2018-06-29 21:06:25 +01:00
|
|
|
}
|
2018-04-10 22:46:48 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
func BenchmarkSuite(b *testing.B) {
|
2018-09-11 14:57:12 +01:00
|
|
|
tempdir, err := ioutil.TempDir("", "storj-bolt")
|
2018-06-29 21:06:25 +01:00
|
|
|
if err != nil {
|
2018-09-05 17:10:35 +01:00
|
|
|
b.Fatal(err)
|
2018-04-10 22:46:48 +01:00
|
|
|
}
|
2018-09-11 14:13:25 +01:00
|
|
|
defer func() { _ = os.RemoveAll(tempdir) }()
|
2018-04-10 22:46:48 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
dbname := filepath.Join(tempdir, "bolt.db")
|
|
|
|
store, err := New(dbname, "bucket")
|
2018-04-10 22:46:48 +01:00
|
|
|
if err != nil {
|
2018-09-05 17:10:35 +01:00
|
|
|
b.Fatalf("failed to create db: %v", err)
|
2018-04-10 22:46:48 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
defer func() {
|
|
|
|
if err := store.Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v", err)
|
2018-08-03 14:15:52 +01:00
|
|
|
}
|
2018-09-05 17:10:35 +01:00
|
|
|
}()
|
2018-08-03 14:15:52 +01:00
|
|
|
|
2018-09-05 17:10:35 +01:00
|
|
|
testsuite.RunBenchmarks(b, store)
|
2018-08-03 14:15:52 +01:00
|
|
|
}
|
2018-10-18 17:20:23 +01:00
|
|
|
|
|
|
|
func TestSuiteShared(t *testing.T) {
|
|
|
|
tempdir, err := ioutil.TempDir("", "storj-bolt")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer func() { _ = os.RemoveAll(tempdir) }()
|
|
|
|
|
|
|
|
dbname := filepath.Join(tempdir, "bolt.db")
|
|
|
|
stores, err := NewShared(dbname, "alpha", "beta")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create db: %v", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
for _, store := range stores {
|
|
|
|
if err := store.Close(); err != nil {
|
|
|
|
t.Fatalf("failed to close db: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for _, store := range stores {
|
|
|
|
testsuite.RunTests(t, store)
|
|
|
|
}
|
|
|
|
}
|
2018-10-25 18:11:28 +01:00
|
|
|
|
|
|
|
type boltLongBenchmarkStore struct {
|
|
|
|
*Client
|
|
|
|
dirPath string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (store *boltLongBenchmarkStore) BulkImport(iter storage.Iterator) (err error) {
|
|
|
|
// turn off syncing during import
|
|
|
|
oldval := store.db.NoSync
|
|
|
|
store.db.NoSync = true
|
|
|
|
defer func() { store.db.NoSync = oldval }()
|
|
|
|
|
|
|
|
var item storage.ListItem
|
|
|
|
for iter.Next(&item) {
|
|
|
|
if err := store.Put(item.Key, item.Value); err != nil {
|
|
|
|
return fmt.Errorf("Failed to insert data (%q, %q): %v", item.Key, item.Value, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return store.db.Sync()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (store *boltLongBenchmarkStore) BulkDelete() error {
|
|
|
|
// do nothing here; everything will be cleaned up later after the test completes. it's not
|
|
|
|
// worth it to wait for BoltDB to remove every key, one by one, and we can't just
|
|
|
|
// os.RemoveAll() the whole test directory at this point because those files are still open
|
|
|
|
// and unremoveable on Windows.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSuiteLong(b *testing.B) {
|
|
|
|
tempdir, err := ioutil.TempDir("", "storj-bolt")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := os.RemoveAll(tempdir); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
dbname := filepath.Join(tempdir, "bolt.db")
|
|
|
|
store, err := New(dbname, "bucket")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create db: %v", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
2018-12-21 10:54:20 +00:00
|
|
|
if err := errs.Combine(store.Close(), os.RemoveAll(tempdir)); err != nil {
|
2018-10-25 18:11:28 +01:00
|
|
|
b.Fatalf("failed to close db: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
longStore := &boltLongBenchmarkStore{
|
|
|
|
Client: store,
|
|
|
|
dirPath: tempdir,
|
|
|
|
}
|
|
|
|
testsuite.BenchmarkPathOperationsInLargeDb(b, longStore)
|
|
|
|
}
|
2019-05-06 21:47:12 +01:00
|
|
|
|
|
|
|
func BenchmarkClientWrite(b *testing.B) {
|
|
|
|
// setup db
|
|
|
|
ctx := testcontext.New(b)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dbfile := ctx.File("testbolt.db")
|
|
|
|
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create db: %v\n", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := dbs[0].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
if err := dbs[1].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
kdb := dbs[0]
|
|
|
|
|
|
|
|
// benchmark test: execute 1000 Put operations where each call to `PutAndCommit` does the following:
|
|
|
|
// 1) create a BoltDB transaction (tx), 2) execute the db operation, 3) commit the tx which writes it to disk.
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
|
|
|
value := storage.Value("testvalue")
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
err := kdb.PutAndCommit(key, value)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal("Put err:", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkClientNoSyncWrite(b *testing.B) {
|
|
|
|
// setup db
|
|
|
|
ctx := testcontext.New(b)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dbfile := ctx.File("testbolt.db")
|
|
|
|
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create db: %v\n", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := dbs[0].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
if err := dbs[1].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
kdb := dbs[0]
|
|
|
|
|
|
|
|
// benchmark test: execute 1000 Put operations with fsync turned off.
|
|
|
|
// Each call to `PutAndCommit` does the following: 1) creates a BoltDB transaction (tx),
|
|
|
|
// 2) executes the db operation, and 3) commits the tx which does NOT write it to disk.
|
|
|
|
kdb.db.NoSync = true
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
|
|
|
value := storage.Value("testvalue")
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
err := kdb.PutAndCommit(key, value)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal("PutAndCommit Nosync err:", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
err = kdb.db.Sync()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB sync err: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkClientBatchWrite(b *testing.B) {
|
|
|
|
// setup db
|
|
|
|
ctx := testcontext.New(b)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dbfile := ctx.File("testbolt.db")
|
|
|
|
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create db: %v\n", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := dbs[0].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
if err := dbs[1].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
kdb := dbs[0]
|
|
|
|
|
|
|
|
// benchmark test: batch 1000 Put operations.
|
|
|
|
// Each call to `Put` does the following: 1) adds the db operation to a queue in boltDB,
|
|
|
|
// 2) every 1000 operations or 10ms, whichever is first, BoltDB creates a single
|
|
|
|
// transaction for all operations currently in the batch, executes the operations,
|
|
|
|
// commits, and writes them to disk
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
|
|
|
value := storage.Value("testvalue")
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
err := kdb.Put(key, value)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB put: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB put: %v\n", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkClientBatchNoSyncWrite(b *testing.B) {
|
|
|
|
// setup db
|
|
|
|
ctx := testcontext.New(b)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
dbfile := ctx.File("testbolt.db")
|
|
|
|
dbs, err := NewShared(dbfile, "kbuckets", "nodes")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create db: %v\n", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := dbs[0].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
if err := dbs[1].Close(); err != nil {
|
|
|
|
b.Fatalf("failed to close db: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
kdb := dbs[0]
|
|
|
|
|
|
|
|
// benchmark test: batch 1000 Put operations with fsync turned off.
|
|
|
|
// Each call to `Put` does the following: 1) adds the db operation to a queue in boltDB,
|
|
|
|
// 2) every 1000 operations or 2 ms, whichever is first, BoltDB creates a single
|
|
|
|
// transaction for all operations currently in the batch, executes the operations,
|
|
|
|
// commits, but does NOT write them to disk
|
|
|
|
kdb.db.NoSync = true
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
|
|
|
value := storage.Value("testvalue")
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
err := kdb.Put(key, value)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB put: %v\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB put: %v\n", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
err := kdb.db.Sync()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("boltDB sync err: %v\n", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|