2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-10-25 18:11:28 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package postgreskv
|
|
|
|
|
|
|
|
import (
|
2019-06-05 15:23:10 +01:00
|
|
|
"context"
|
2019-11-13 23:52:14 +00:00
|
|
|
"strings"
|
2018-10-25 18:11:28 +01:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
"storj.io/common/testcontext"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/dbutil/pgutil/pgtest"
|
2019-12-19 09:27:27 +00:00
|
|
|
"storj.io/storj/private/dbutil/txutil"
|
2020-01-17 20:07:00 +00:00
|
|
|
"storj.io/storj/private/tagsql"
|
2018-10-25 18:11:28 +01:00
|
|
|
"storj.io/storj/storage"
|
|
|
|
"storj.io/storj/storage/testsuite"
|
|
|
|
)
|
|
|
|
|
|
|
|
func newTestPostgres(t testing.TB) (store *Client, cleanup func()) {
|
2019-04-26 14:39:11 +01:00
|
|
|
if *pgtest.ConnStr == "" {
|
|
|
|
t.Skipf("postgres flag missing, example:\n-postgres-test-db=%s", pgtest.DefaultConnStr)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2019-04-26 14:39:11 +01:00
|
|
|
pgdb, err := New(*pgtest.ConnStr)
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("init: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pgdb, func() {
|
|
|
|
if err := pgdb.Close(); err != nil {
|
|
|
|
t.Fatalf("failed to close db: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSuite(t *testing.T) {
|
|
|
|
store, cleanup := newTestPostgres(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-04-22 14:45:53 +01:00
|
|
|
// zap := zaptest.NewLogger(t)
|
|
|
|
// loggedStore := storelogger.New(zap, store)
|
2020-01-22 19:00:46 +00:00
|
|
|
store.SetLookupLimit(500)
|
2019-04-22 14:45:53 +01:00
|
|
|
testsuite.RunTests(t, store)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2019-11-13 23:52:14 +00:00
|
|
|
func TestThatMigrationActuallyHappened(t *testing.T) {
|
2020-01-16 23:48:59 +00:00
|
|
|
t.Skip()
|
2020-01-17 20:07:00 +00:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-11-13 23:52:14 +00:00
|
|
|
store, cleanup := newTestPostgres(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
rows, err := store.db.Query(ctx, `
|
2019-11-13 23:52:14 +00:00
|
|
|
SELECT prosrc
|
|
|
|
FROM pg_catalog.pg_proc p,
|
|
|
|
pg_catalog.pg_namespace n
|
|
|
|
WHERE p.pronamespace = n.oid
|
|
|
|
AND p.proname = 'list_directory'
|
|
|
|
AND n.nspname = ANY(current_schemas(true))
|
|
|
|
AND p.pronargs = 4
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to get list_directory source: %v", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := rows.Close(); err != nil {
|
|
|
|
t.Fatalf("failed to close rows: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
numFound := 0
|
|
|
|
for rows.Next() {
|
|
|
|
numFound++
|
|
|
|
if numFound > 1 {
|
|
|
|
t.Fatal("there are multiple eligible list_directory() functions??")
|
|
|
|
}
|
|
|
|
var source string
|
|
|
|
if err := rows.Scan(&source); err != nil {
|
|
|
|
t.Fatalf("failed to read list_directory source: %v", err)
|
|
|
|
}
|
|
|
|
if strings.Contains(source, "distinct_prefix (truncatedpath)") {
|
|
|
|
t.Fatal("list_directory() function in pg appears to be the oldnbusted one")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-25 18:11:28 +01:00
|
|
|
func BenchmarkSuite(b *testing.B) {
|
|
|
|
store, cleanup := newTestPostgres(b)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
testsuite.RunBenchmarks(b, store)
|
|
|
|
}
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
func bulkImport(ctx context.Context, db tagsql.DB, iter storage.Iterator) error {
|
|
|
|
return txutil.WithTx(ctx, db, nil, func(ctx context.Context, txn tagsql.Tx) (err error) {
|
|
|
|
stmt, err := txn.Prepare(ctx, pq.CopyIn("pathdata", "fullpath", "metadata"))
|
2019-12-19 09:27:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return errs.New("Failed to initialize COPY FROM: %v", err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-12-19 09:27:27 +00:00
|
|
|
defer func() {
|
|
|
|
err2 := stmt.Close()
|
|
|
|
if err2 != nil {
|
|
|
|
err = errs.Combine(err, errs.New("Failed to close COPY FROM statement: %v", err2))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var item storage.ListItem
|
|
|
|
for iter.Next(ctx, &item) {
|
2020-01-17 20:07:00 +00:00
|
|
|
if _, err := stmt.Exec(ctx, []byte(item.Key), []byte(item.Value)); err != nil {
|
2019-12-19 09:27:27 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2020-01-17 20:07:00 +00:00
|
|
|
if _, err = stmt.Exec(ctx); err != nil {
|
2019-12-19 09:27:27 +00:00
|
|
|
return errs.New("Failed to complete COPY FROM: %v", err)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
2019-12-19 09:27:27 +00:00
|
|
|
return nil
|
|
|
|
})
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
func bulkDeleteAll(ctx context.Context, db tagsql.DB) error {
|
|
|
|
_, err := db.Exec(ctx, "TRUNCATE pathdata")
|
2018-10-25 18:11:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return errs.New("Failed to TRUNCATE pathdata table: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pgLongBenchmarkStore struct {
|
|
|
|
*Client
|
|
|
|
}
|
|
|
|
|
2020-01-08 11:42:11 +00:00
|
|
|
func (store *pgLongBenchmarkStore) BulkImport(ctx context.Context, iter storage.Iterator) error {
|
2020-01-16 23:48:59 +00:00
|
|
|
return bulkImport(ctx, store.db, iter)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-17 20:07:00 +00:00
|
|
|
func (store *pgLongBenchmarkStore) BulkDeleteAll(ctx context.Context) error {
|
|
|
|
return bulkDeleteAll(ctx, store.db)
|
2018-10-25 18:11:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSuiteLong(b *testing.B) {
|
|
|
|
store, cleanup := newTestPostgres(b)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
testsuite.BenchmarkPathOperationsInLargeDb(b, &pgLongBenchmarkStore{store})
|
|
|
|
}
|