private/dbutil: use dbutil and tagsql from storj.io/private

Initially we duplicated the code to avoid large scale changes to
the packages. Now we are past metainfo refactor we can remove the
duplication.

Change-Id: I9d0b2756cc6e2a2f4d576afa408a15273a7e1cef
This commit is contained in:
Egon Elbre 2021-04-23 12:52:40 +03:00
parent dd5eb2616b
commit a2e20c93ae
113 changed files with 160 additions and 5190 deletions

View File

@ -12,7 +12,7 @@ import (
"storj.io/common/identity"
"storj.io/common/peertls/extensions"
"storj.io/storj/private/dbutil"
"storj.io/private/dbutil"
"storj.io/storj/storage"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"

View File

@ -16,9 +16,9 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/common/uuid"
"storj.io/private/dbutil"
"storj.io/private/dbutil/tempdb"
migrator "storj.io/storj/cmd/metabase-createdat-migration"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/satellitedb/satellitedbtest"

View File

@ -14,7 +14,7 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/private/dbutil"
"storj.io/storj/satellite"
"storj.io/storj/satellite/payments/stripecoinpayments"
"storj.io/storj/satellite/satellitedb"

View File

@ -30,8 +30,8 @@ import (
"storj.io/common/pb"
"storj.io/common/processgroup"
"storj.io/common/storj"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/uplink"
)

6
go.mod
View File

@ -30,7 +30,7 @@ require (
github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
github.com/shopspring/decimal v1.2.0
github.com/spacemonkeygo/monkit/v3 v3.0.10
github.com/spf13/cobra v1.0.0
github.com/spf13/cobra v1.1.3
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0
@ -43,7 +43,7 @@ require (
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
golang.org/x/sys v0.0.0-20210415045647-66c3f260301c
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.20.0 // indirect
@ -51,6 +51,6 @@ require (
storj.io/common v0.0.0-20210419115916-eabb53ea1332
storj.io/drpc v0.0.20
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6
storj.io/private v0.0.0-20210403210935-5fd57695864c
storj.io/private v0.0.0-20210423085237-5af81f2a2b21
storj.io/uplink v1.4.7-0.20210421171443-53fab7d9387c
)

53
go.sum
View File

@ -51,7 +51,6 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
@ -102,9 +101,7 @@ github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1
github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk=
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@ -241,7 +238,6 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34=
github.com/graphql-go/graphql v0.7.9/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI=
@ -359,6 +355,7 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -378,9 +375,9 @@ github.com/loov/hrtime v1.0.3/go.mod h1:yDY3Pwv2izeY4sq7YcPX/dtLwzg5NU1AxWuWxKwd
github.com/lucas-clemente/quic-go v0.20.1 h1:hb5m76V8QS/8Nw/suHvXqo3BMHAozvIkcnzpJdpanSk=
github.com/lucas-clemente/quic-go v0.20.1/go.mod h1:fZq/HUDIM+mW6X6wtzORjC0E/WDBMKe5Hf9bgjISwLk=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls-go1-15 v0.1.4 h1:RehYMOyRW8hPVEja1KBVsFVNSm35Jj9Mvs5yNoZZ28A=
@ -418,8 +415,9 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
@ -454,13 +452,15 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0=
github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
@ -544,19 +544,21 @@ github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:u
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -576,7 +578,6 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
@ -585,7 +586,6 @@ github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVT
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -714,7 +714,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -789,15 +788,16 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210415045647-66c3f260301c h1:6L+uOeS3OQt/f4eFHXZcTxeZrGCuz+CLElgEBjbcTA4=
golang.org/x/sys v0.0.0-20210415045647-66c3f260301c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -885,7 +885,6 @@ google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9M
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@ -910,8 +909,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/segmentio/analytics-go.v3 v3.1.0 h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U=
gopkg.in/segmentio/analytics-go.v3 v3.1.0/go.mod h1:4QqqlTlSSpVlWA9/9nDcPw+FkM2yv1NQoYjUbL9/JAw=
@ -921,8 +921,9 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
@ -949,7 +950,7 @@ storj.io/drpc v0.0.20 h1:nzOxsetLi0fJ8xCL92LPlYL0B6iYdDDk1Cpdbn0/r9Y=
storj.io/drpc v0.0.20/go.mod h1:eAxUDk8HWvGl9iqznpuphtZ+WIjIGPJFqNXuKHgRiMM=
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6 h1:LTDmeZDrFWD9byqNOf/Bc1VmMNKvja/9Cs52d1V5aTk=
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20210403210935-5fd57695864c h1:uGbSOfuEA92Mo37KKjSrqq2ScUJcOHTzBipiXC7ltHc=
storj.io/private v0.0.0-20210403210935-5fd57695864c/go.mod h1:wxiajovTC2nUfuYnGVgkf0JkjSJs8Ei9lQ1CdC3H8cg=
storj.io/private v0.0.0-20210423085237-5af81f2a2b21 h1:0ZX6agMxxGMj9jcBl9SYcDPPZqpG+cbi56DV/1Btg6s=
storj.io/private v0.0.0-20210423085237-5af81f2a2b21/go.mod h1:iAc+LGwXYCe+YRRTlkfkg95ZBEL8pWHLVZ508/KQjOs=
storj.io/uplink v1.4.7-0.20210421171443-53fab7d9387c h1:LhZcZ2+UXd6rwhDO05RplEVdkAkqig8tA3xm2DfZclY=
storj.io/uplink v1.4.7-0.20210421171443-53fab7d9387c/go.mod h1:CroFLtFtcKj9B0AigacRHuxjNd+jOm9DG45257fTJo0=

View File

@ -11,12 +11,12 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/multinode"
"storj.io/storj/multinode/console"
"storj.io/storj/multinode/multinodedb/dbx"
"storj.io/storj/multinode/nodes"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
)
var (

View File

@ -11,7 +11,7 @@ perl -0777 -pi \
-e 's,\t"fmt"\n,\t"fmt"\n\t"math/rand"\n,' \
multinodedb.dbx.go
perl -0777 -pi \
-e 's,\t"math/rand"\n\),\n\t"storj.io/storj/private/tagsql"\n\),' \
-e 's,\t"math/rand"\n\),\n\t"storj.io/private/tagsql"\n\),' \
multinodedb.dbx.go
perl -0777 -pi \
-e 's/type DB struct \{\n\t\*sql\.DB/type DB struct \{\n\ttagsql.DB/' \

View File

@ -22,7 +22,7 @@ import (
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/mattn/go-sqlite3"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// Prevent conditional imports from causing build failures

View File

@ -15,13 +15,13 @@ import (
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/pgutil"
"storj.io/private/dbutil/tempdb"
"storj.io/storj/multinode"
"storj.io/storj/multinode/multinodedb"
"storj.io/storj/multinode/multinodedb/dbx"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/tempdb"
)
// Database describes a test database.

View File

@ -8,7 +8,7 @@ import (
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/tlsopts"
"storj.io/storj/private/dbutil"
"storj.io/private/dbutil"
"storj.io/storj/storage/boltdb"
"storj.io/storj/storage/redis"
)

View File

@ -1,93 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cockroachutil
import (
"context"
"crypto/rand"
"encoding/hex"
"net/url"
"strings"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/common/context2"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
)
var mon = monkit.Package()
// CreateRandomTestingSchemaName creates a random schema name string.
func CreateRandomTestingSchemaName(n int) string {
data := make([]byte, n)
_, _ = rand.Read(data)
return hex.EncodeToString(data)
}
// OpenUnique opens a temporary unique CockroachDB database that will be cleaned up when closed.
// It is expected that this should normally be used by way of
// "storj.io/storj/private/dbutil/tempdb".OpenUnique() instead of calling it directly.
func OpenUnique(ctx context.Context, connStr string, schemaPrefix string) (db *dbutil.TempDatabase, err error) {
if !strings.HasPrefix(connStr, "cockroach://") {
return nil, errs.New("expected a cockroachDB URI, but got %q", connStr)
}
schemaName := schemaPrefix + "-" + CreateRandomTestingSchemaName(8)
masterDB, err := tagsql.Open(ctx, "cockroach", connStr)
if err != nil {
return nil, errs.Wrap(err)
}
defer func() {
err = errs.Combine(err, masterDB.Close())
}()
err = masterDB.PingContext(ctx)
if err != nil {
return nil, errs.New("Could not open masterDB at conn %q: %w", connStr, err)
}
_, err = masterDB.Exec(ctx, "CREATE DATABASE "+pgutil.QuoteIdentifier(schemaName))
if err != nil {
return nil, errs.Wrap(err)
}
cleanup := func(cleanupDB tagsql.DB) error {
ctx := context2.WithoutCancellation(ctx)
_, err := cleanupDB.Exec(ctx, "DROP DATABASE "+pgutil.QuoteIdentifier(schemaName))
return errs.Wrap(err)
}
modifiedConnStr, err := changeDBTargetInConnStr(connStr, schemaName)
if err != nil {
return nil, errs.Combine(err, cleanup(masterDB))
}
sqlDB, err := tagsql.Open(ctx, "cockroach", modifiedConnStr)
if err != nil {
return nil, errs.Combine(errs.Wrap(err), cleanup(masterDB))
}
dbutil.Configure(ctx, sqlDB, "tmp_cockroach", mon)
return &dbutil.TempDatabase{
DB: sqlDB,
ConnStr: modifiedConnStr,
Schema: schemaName,
Driver: "cockroach",
Implementation: dbutil.Cockroach,
Cleanup: cleanup,
}, nil
}
func changeDBTargetInConnStr(connStr string, newDBName string) (string, error) {
connURL, err := url.Parse(connStr)
if err != nil {
return "", errs.Wrap(err)
}
connURL.Path = newDBName
return connURL.String(), nil
}

View File

@ -1,372 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cockroachutil
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"io"
"net"
"strings"
"syscall"
"github.com/jackc/pgx/v4/stdlib"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/pgutil/pgerrcode"
)
// Driver is the type for the "cockroach" sql/database driver. It uses
// github.com/jackc/pgx/v4/stdlib under the covers because of Cockroach's
// PostgreSQL compatibility, but allows differentiation between pg and crdb
// connections.
type Driver struct {
pgxDriver stdlib.Driver
}
// Open opens a new cockroachDB connection.
func (cd *Driver) Open(name string) (driver.Conn, error) {
name = translateName(name)
conn, err := cd.pgxDriver.Open(name)
if err != nil {
return nil, err
}
pgxStdlibConn, ok := conn.(*stdlib.Conn)
if !ok {
return nil, errs.New("Conn from pgx is not a *stdlib.Conn??? T: %T", conn)
}
return &cockroachConn{pgxStdlibConn}, nil
}
// OpenConnector obtains a new db Connector, which sql.DB can use to
// obtain each needed connection at the appropriate time.
func (cd *Driver) OpenConnector(name string) (driver.Connector, error) {
name = translateName(name)
pgxConnector, err := cd.pgxDriver.OpenConnector(name)
if err != nil {
return nil, err
}
return &cockroachConnector{driver: cd, pgxConnector: pgxConnector}, nil
}
// cockroachConnector is a thin wrapper around a pq-based connector. This allows
// Driver to supply our custom cockroachConn type for connections.
type cockroachConnector struct {
driver *Driver
pgxConnector driver.Connector
}
// Driver returns the driver being used for this connector.
func (c *cockroachConnector) Driver() driver.Driver {
return c.driver
}
// Connect creates a new connection using the connector.
func (c *cockroachConnector) Connect(ctx context.Context) (driver.Conn, error) {
pgxConn, err := c.pgxConnector.Connect(ctx)
if err != nil {
return nil, err
}
pgxStdlibConn, ok := pgxConn.(*stdlib.Conn)
if !ok {
return nil, errs.New("Conn from pgx is not a *stdlib.Conn??? T: %T", pgxConn)
}
return &cockroachConn{pgxStdlibConn}, nil
}
type connAll interface {
driver.Conn
driver.ConnBeginTx
driver.ExecerContext
driver.QueryerContext
}
// cockroachConn is a connection to a database. It is not used concurrently by multiple goroutines.
type cockroachConn struct {
underlying *stdlib.Conn
}
// Assert that cockroachConn fulfills connAll.
var _ connAll = (*cockroachConn)(nil)
// StdlibConn returns the underlying pgx std connection.
func (c *cockroachConn) StdlibConn() *stdlib.Conn { return c.underlying }
// Close closes the cockroachConn.
func (c *cockroachConn) Close() error {
return c.underlying.Close()
}
// ExecContext (when implemented by a driver.Conn) provides ExecContext
// functionality to a sql.DB instance. This implementation provides
// retry semantics for single statements.
func (c *cockroachConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
result, err := c.underlying.ExecContext(ctx, query, args)
for err != nil && !c.isInTransaction() && NeedsRetry(err) {
mon.Event("needed_retry")
result, err = c.underlying.ExecContext(ctx, query, args)
}
return result, err
}
type cockroachRows struct {
rows driver.Rows
firstResults []driver.Value
eof bool
}
// Columns returns the names of the columns.
func (rows *cockroachRows) Columns() []string {
return rows.rows.Columns()
}
// Close closes the rows iterator.
func (rows *cockroachRows) Close() error {
return rows.rows.Close()
}
// Next implements the Next method on driver.Rows.
func (rows *cockroachRows) Next(dest []driver.Value) error {
if rows.eof {
return io.EOF
}
if rows.firstResults == nil {
return rows.rows.Next(dest)
}
copy(dest, rows.firstResults)
rows.firstResults = nil
return nil
}
func wrapRows(rows driver.Rows) (crdbRows *cockroachRows, err error) {
columns := rows.Columns()
dest := make([]driver.Value, len(columns))
err = rows.Next(dest)
if err != nil {
if errors.Is(err, io.EOF) {
return &cockroachRows{rows: rows, firstResults: nil, eof: true}, nil
}
return nil, err
}
return &cockroachRows{rows: rows, firstResults: dest}, nil
}
// QueryContext (when implemented by a driver.Conn) provides QueryContext
// functionality to a sql.DB instance. This implementation provides
// retry semantics for single statements.
func (c *cockroachConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (_ driver.Rows, err error) {
defer mon.Task()(&ctx)(&err)
for {
result, err := c.underlying.QueryContext(ctx, query, args)
if err != nil {
if NeedsRetry(err) {
if c.isInTransaction() {
return nil, err
}
mon.Event("needed_retry")
continue
}
return nil, err
}
wrappedResult, err := wrapRows(result)
if err != nil {
// If this returns an error it's probably the same error
// we got from calling Next inside wrapRows.
_ = result.Close()
if NeedsRetry(err) {
if c.isInTransaction() {
return nil, err
}
mon.Event("needed_retry")
continue
}
return nil, err
}
return wrappedResult, nil
}
}
// Begin starts a new transaction.
func (c *cockroachConn) Begin() (driver.Tx, error) {
return c.BeginTx(context.Background(), driver.TxOptions{})
}
// BeginTx begins a new transaction using the specified context and with the specified options.
func (c *cockroachConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
return c.underlying.BeginTx(ctx, opts)
}
// Prepare prepares a statement for future execution.
func (c *cockroachConn) Prepare(query string) (driver.Stmt, error) {
pqStmt, err := c.underlying.Prepare(query)
if err != nil {
return nil, err
}
adapted, ok := pqStmt.(stmtAll)
if !ok {
return nil, errs.New("Stmt type %T does not provide stmtAll?!", adapted)
}
return &cockroachStmt{underlyingStmt: adapted, conn: c}, nil
}
type transactionStatus byte
const (
txnStatusIdle transactionStatus = 'I'
txnStatusIdleInTransaction transactionStatus = 'T'
txnStatusInFailedTransaction transactionStatus = 'E'
)
func (c *cockroachConn) txnStatus() transactionStatus {
pgConn := c.underlying.Conn().PgConn()
return transactionStatus(pgConn.TxStatus())
}
func (c *cockroachConn) isInTransaction() bool {
txnStatus := c.txnStatus()
return txnStatus == txnStatusIdleInTransaction || txnStatus == txnStatusInFailedTransaction
}
type stmtAll interface {
driver.Stmt
driver.StmtExecContext
driver.StmtQueryContext
}
type cockroachStmt struct {
underlyingStmt stmtAll
conn *cockroachConn
}
// Assert that cockroachStmt satisfies StmtExecContext and StmtQueryContext.
var _ stmtAll = (*cockroachStmt)(nil)
// Close closes a prepared statement.
func (stmt *cockroachStmt) Close() error {
return stmt.underlyingStmt.Close()
}
// NumInput returns the number of placeholder parameters.
func (stmt *cockroachStmt) NumInput() int {
return stmt.underlyingStmt.NumInput()
}
// Exec executes a SQL statement in the background context.
func (stmt *cockroachStmt) Exec(args []driver.Value) (driver.Result, error) {
// since (driver.Stmt).Exec() is deprecated, we translate our Value args to NamedValue args
// and pass in background context to ExecContext instead.
namedArgs := make([]driver.NamedValue, len(args))
for i, arg := range args {
namedArgs[i] = driver.NamedValue{Ordinal: i + 1, Value: arg}
}
result, err := stmt.underlyingStmt.ExecContext(context.Background(), namedArgs)
for err != nil && !stmt.conn.isInTransaction() && NeedsRetry(err) {
mon.Event("needed_retry")
result, err = stmt.underlyingStmt.ExecContext(context.Background(), namedArgs)
}
return result, err
}
// Query executes a query in the background context.
func (stmt *cockroachStmt) Query(args []driver.Value) (driver.Rows, error) {
// since (driver.Stmt).Query() is deprecated, we translate our Value args to NamedValue args
// and pass in background context to QueryContext instead.
namedArgs := make([]driver.NamedValue, len(args))
for i, arg := range args {
namedArgs[i] = driver.NamedValue{Ordinal: i + 1, Value: arg}
}
return stmt.QueryContext(context.Background(), namedArgs)
}
// ExecContext executes SQL statements in the specified context.
func (stmt *cockroachStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
result, err := stmt.underlyingStmt.ExecContext(ctx, args)
for err != nil && !stmt.conn.isInTransaction() && NeedsRetry(err) {
mon.Event("needed_retry")
result, err = stmt.underlyingStmt.ExecContext(ctx, args)
}
return result, err
}
// QueryContext executes a query in the specified context.
func (stmt *cockroachStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (_ driver.Rows, err error) {
defer mon.Task()(&ctx)(&err)
for {
result, err := stmt.underlyingStmt.QueryContext(ctx, args)
if err != nil {
if NeedsRetry(err) {
if stmt.conn.isInTransaction() {
return nil, err
}
mon.Event("needed_retry")
continue
}
return nil, err
}
wrappedResult, err := wrapRows(result)
if err != nil {
// If this returns an error it's probably the same error
// we got from calling Next inside wrapRows.
_ = result.Close()
if NeedsRetry(err) {
if stmt.conn.isInTransaction() {
return nil, err
}
mon.Event("needed_retry")
continue
}
return nil, err
}
return wrappedResult, nil
}
}
// translateName changes the scheme name in a `cockroach://` URL to
// `postgres://`, as that is what jackc/pgx will expect.
func translateName(name string) string {
if strings.HasPrefix(name, "cockroach://") {
name = "postgres://" + name[12:]
}
return name
}
// NeedsRetry checks if the error code means a retry is needed,
// borrowed from code in crdb.
func NeedsRetry(err error) bool {
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
mon.Event("crdb_error_eof")
// Currently we don't retry with EOF because it's unclear if
// a query succeeded or failed.
return false
}
if errors.Is(err, syscall.ECONNRESET) {
mon.Event("crdb_error_conn_reset_needed_retry")
return true
}
if errors.Is(err, syscall.ECONNREFUSED) {
mon.Event("crdb_error_conn_refused_needed_retry")
return true
}
var netErr net.Error
if errors.As(err, &netErr) {
mon.Event("crdb_net_error_needed_retry")
return true
}
code := pgerrcode.FromError(err)
// 57P01 occurs when a CRDB node rejoins the cluster but is not ready to accept connections
// CRDB support recommended a retry at this point
// Support ticket: https://support.cockroachlabs.com/hc/en-us/requests/5510
// TODO re-evaluate this if support provides a better solution
return code == "40001" || code == "CR000" || code == "57P01"
}
var defaultDriver = &Driver{}
func init() {
sql.Register("cockroach", defaultDriver)
}

View File

@ -1,109 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package cockroachutil
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/tagsql"
)
func TestLibPqCompatibility(t *testing.T) {
connstr := pgtest.PickCockroach(t)
ctx := testcontext.New(t)
defer ctx.Cleanup()
testDB, err := OpenUnique(ctx, connstr, "TestLibPqCompatibility")
require.NoError(t, err)
defer ctx.Check(testDB.Close)
// use a single dedicated conn for testing
conn, err := testDB.Conn(ctx)
require.NoError(t, err)
defer ctx.Check(conn.Close)
// should be in idle status, no transaction, initially
require.Equal(t, txnStatusIdle, getTxnStatus(ctx, t, conn))
require.False(t, checkIsInTx(ctx, t, conn))
// start a transaction
tx, err := conn.BeginTx(ctx, nil)
require.NoError(t, err)
func() {
defer func() { err = tx.Rollback() }()
// should be idle in transaction now
require.Equal(t, txnStatusIdleInTransaction, getTxnStatus(ctx, t, conn))
require.True(t, checkIsInTx(ctx, t, conn))
// issue successful query
rows, err := tx.QueryContext(ctx, `SELECT 1`)
require.NoError(t, err)
require.True(t, rows.Next())
var n int
err = rows.Scan(&n)
require.NoError(t, err)
require.False(t, rows.Next())
err = rows.Err()
require.NoError(t, err)
err = rows.Close()
require.NoError(t, err)
// should still be idle in transaction
require.Equal(t, txnStatusIdleInTransaction, getTxnStatus(ctx, t, conn))
require.True(t, checkIsInTx(ctx, t, conn))
// issue bad query
_, err = tx.QueryContext(ctx, `SELECT BALONEY SANDWICHES`)
require.Error(t, err)
// should be in a failed transaction now
require.Equal(t, txnStatusInFailedTransaction, getTxnStatus(ctx, t, conn))
require.True(t, checkIsInTx(ctx, t, conn))
}()
// check rollback error
require.NoError(t, err)
// should be back out of any transaction
require.Equal(t, txnStatusIdle, getTxnStatus(ctx, t, conn))
require.False(t, checkIsInTx(ctx, t, conn))
}
func withCockroachConn(ctx context.Context, sqlConn tagsql.Conn, fn func(conn *cockroachConn) error) error {
return sqlConn.Raw(ctx, func(rawConn interface{}) error {
crConn, ok := rawConn.(*cockroachConn)
if !ok {
return errs.New("conn object is %T, not *cockroachConn", crConn)
}
return fn(crConn)
})
}
func getTxnStatus(ctx context.Context, t *testing.T, sqlConn tagsql.Conn) (txnStatus transactionStatus) {
err := withCockroachConn(ctx, sqlConn, func(crConn *cockroachConn) error {
txnStatus = crConn.txnStatus()
return nil
})
require.NoError(t, err)
return txnStatus
}
func checkIsInTx(ctx context.Context, t *testing.T, sqlConn tagsql.Conn) (isInTx bool) {
err := withCockroachConn(ctx, sqlConn, func(crConn *cockroachConn) error {
isInTx = crConn.isInTransaction()
return nil
})
require.NoError(t, err)
return isInTx
}

View File

@ -1,74 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cockroachutil_test
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/storj/private/tagsql"
)
func TestTempCockroachDB(t *testing.T) {
connstr := pgtest.PickCockroach(t)
ctx := testcontext.New(t)
defer ctx.Cleanup()
prefix := "name#spaced/Test/DB"
testDB, err := tempdb.OpenUnique(ctx, connstr, prefix)
require.NoError(t, err)
require.Equal(t, "cockroach", testDB.Driver)
require.Equal(t, dbutil.Cockroach, testDB.Implementation)
require.IsType(t, &cockroachutil.Driver{}, testDB.DB.Driver())
// save these so we can close testDB down below and then still try connecting to the same place
// (without requiring that the values stay intact in the testDB struct when we close it)
driverCopy := testDB.Driver
connStrCopy := testDB.ConnStr
// assert new test db exists and can be connected to again
otherConn, err := tagsql.Open(ctx, driverCopy, connStrCopy)
require.NoError(t, err)
defer ctx.Check(otherConn.Close)
// verify the name matches expectation
var dbName string
row := otherConn.QueryRowContext(ctx, `SELECT current_database()`)
err = row.Scan(&dbName)
require.NoError(t, err)
require.Truef(t, strings.HasPrefix(dbName, prefix), "Expected prefix of %q for current db name, but found %q", prefix, dbName)
// verify there is a db with such a name
var count int
row = otherConn.QueryRowContext(ctx, `SELECT COUNT(*) FROM pg_database WHERE datname = current_database()`)
err = row.Scan(&count)
require.NoError(t, err)
require.Equalf(t, 1, count, "Expected 1 DB with matching name, but counted %d", count)
// close testDB
err = testDB.Close()
require.NoError(t, err)
// make a new connection back to the master connstr just to check that the our temp db
// really was dropped
plainDBConn, err := tagsql.Open(ctx, "cockroach", connstr)
require.NoError(t, err)
defer ctx.Check(plainDBConn.Close)
// assert new test db was deleted (we expect this connection to keep working, even though its
// database was deleted out from under it!)
row = plainDBConn.QueryRowContext(ctx, `SELECT COUNT(*) FROM pg_database WHERE datname = $1`, dbName)
err = row.Scan(&count)
require.NoError(t, err)
require.Equalf(t, 0, count, "Expected 0 DB with matching name, but counted %d (deletion failure?)", count)
}

View File

@ -1,60 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
// Implementation type of valid DBs.
type Implementation int
const (
// Unknown is an unknown db type.
Unknown Implementation = iota
// Postgres is a Postgresdb type.
Postgres
// Cockroach is a Cockroachdb type.
Cockroach
// Bolt is a Bolt kv store.
Bolt
// Redis is a Redis kv store.
Redis
// SQLite3 is a sqlite3 database.
SQLite3
)
// ImplementationForScheme returns the Implementation that is used for
// the url with the provided scheme.
func ImplementationForScheme(scheme string) Implementation {
switch scheme {
case "pgx", "postgres", "postgresql":
return Postgres
case "cockroach":
return Cockroach
case "bolt":
return Bolt
case "redis":
return Redis
case "sqlite", "sqlite3":
return SQLite3
default:
return Unknown
}
}
// SchemeForImplementation returns the scheme that is used for URLs
// that use the given Implementation.
func SchemeForImplementation(implementation Implementation) string {
switch implementation {
case Postgres:
return "postgres"
case Cockroach:
return "cockroach"
case Bolt:
return "bolt"
case Redis:
return "redis"
case SQLite3:
return "sqlite3"
default:
return "<unknown>"
}
}

View File

@ -1,211 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbschema
import (
"context"
"fmt"
"regexp"
"sort"
"strings"
"github.com/zeebo/errs"
)
// Data is the database content formatted as strings.
type Data struct {
Tables []*TableData
}
// TableData is content of a sql table.
type TableData struct {
Name string
Columns []string
Rows []RowData
}
// ColumnData is a value of a column within a row.
type ColumnData struct {
Column string
Value string
}
// String returns a string representation of the column.
func (c ColumnData) String() string {
return fmt.Sprintf("%s:%s", c.Column, c.Value)
}
// RowData is content of a single row.
type RowData []ColumnData
// Less returns true if one row is less than the other.
func (row RowData) Less(b RowData) bool {
n := len(row)
if len(b) < n {
n = len(b)
}
for k := 0; k < n; k++ {
if row[k].Value < b[k].Value {
return true
} else if row[k].Value > b[k].Value {
return false
}
}
return len(row) < len(b)
}
// AddTable adds a new table.
func (data *Data) AddTable(table *TableData) {
data.Tables = append(data.Tables, table)
}
// DropTable removes the specified table.
func (data *Data) DropTable(tableName string) {
for i, table := range data.Tables {
if table.Name == tableName {
data.Tables = append(data.Tables[:i], data.Tables[i+1:]...)
break
}
}
}
// AddRow adds a new row.
func (table *TableData) AddRow(row RowData) error {
if len(row) != len(table.Columns) {
return errs.New("inconsistent row added to table")
}
for i, cdata := range row {
if cdata.Column != table.Columns[i] {
return errs.New("inconsistent row added to table")
}
}
table.Rows = append(table.Rows, row)
return nil
}
// FindTable finds a table by name.
func (data *Data) FindTable(tableName string) (*TableData, bool) {
for _, table := range data.Tables {
if table.Name == tableName {
return table, true
}
}
return nil, false
}
// Sort sorts all tables.
func (data *Data) Sort() {
for _, table := range data.Tables {
table.Sort()
}
}
// Sort sorts all rows.
func (table *TableData) Sort() {
sort.Slice(table.Rows, func(i, k int) bool {
return table.Rows[i].Less(table.Rows[k])
})
}
// Clone returns a clone of row data.
func (row RowData) Clone() RowData {
return append(RowData{}, row...)
}
// QueryData loads all data from tables.
func QueryData(ctx context.Context, db Queryer, schema *Schema, quoteColumn func(string) string) (*Data, error) {
data := &Data{}
for _, tableSchema := range schema.Tables {
if err := ValidateTableName(tableSchema.Name); err != nil {
return nil, err
}
columnNames := tableSchema.ColumnNames()
// quote column names
quotedColumns := make([]string, len(columnNames))
for i, columnName := range columnNames {
if err := ValidateColumnName(columnName); err != nil {
return nil, err
}
quotedColumns[i] = quoteColumn(columnName)
}
table := &TableData{
Name: tableSchema.Name,
Columns: columnNames,
}
data.AddTable(table)
/* #nosec G202 */ // The columns names and table name are validated above
query := `SELECT ` + strings.Join(quotedColumns, ", ") + ` FROM ` + table.Name
err := func() (err error) {
rows, err := db.QueryContext(ctx, query)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
row := make(RowData, len(columnNames))
rowargs := make([]interface{}, len(columnNames))
for i := range row {
row[i].Column = columnNames[i]
rowargs[i] = &row[i].Value
}
for rows.Next() {
err := rows.Scan(rowargs...)
if err != nil {
return err
}
if err := table.AddRow(row.Clone()); err != nil {
return err
}
}
return rows.Err()
}()
if err != nil {
return nil, err
}
}
data.Sort()
return data, nil
}
var columnNameWhiteList = regexp.MustCompile(`^(?:[a-zA-Z0-9_](?:-[a-zA-Z0-9_]|[a-zA-Z0-9_])?)+$`)
// ValidateColumnName checks column has at least 1 character and it's only
// formed by lower and upper case letters, numbers, underscores or dashes where
// dashes cannot be at the beginning of the end and not in a row.
func ValidateColumnName(column string) error {
if !columnNameWhiteList.MatchString(column) {
return errs.New(
"forbidden column name, it can only contains letters, numbers, underscores and dashes not in a row. Got: %s",
column,
)
}
return nil
}
var tableNameWhiteList = regexp.MustCompile(`^(?:[a-zA-Z0-9_](?:-[a-zA-Z0-9_]|[a-zA-Z0-9_])?)+(?:\.(?:[a-zA-Z0-9_](?:-[a-zA-Z0-9_]|[a-zA-Z0-9_])?)+)?$`)
// ValidateTableName checks table has at least 1 character and it's only
// formed by lower and upper case letters, numbers, underscores or dashes where
// dashes cannot be at the beginning of the end and not in a row.
// One dot is allowed for scoping tables in a schema (e.g. public.my_table).
func ValidateTableName(table string) error {
if !tableNameWhiteList.MatchString(table) {
return errs.New(
"forbidden table name, it can only contains letters, numbers, underscores and dashes not in a row. Got: %s",
table,
)
}
return nil
}

View File

@ -1,82 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package dbschema_test
import (
"testing"
"github.com/stretchr/testify/assert"
"storj.io/storj/private/dbutil/dbschema"
)
func TestValidateColumnName(t *testing.T) {
tcases := []struct {
desc string
column string
isErr bool
}{
{desc: "valid column: all lowercase letters", column: "acolumn", isErr: false},
{desc: "valid column: all uppercase letters", column: "ACOLUMN", isErr: false},
{desc: "valid column: all lower and upper case letters", column: "aColumn", isErr: false},
{desc: "valid column: all letters and numbers", column: "1Column", isErr: false},
{desc: "valid column: with underscores", column: "a_column_2", isErr: false},
{desc: "valid column: with dashes", column: "a-col_umn-2", isErr: false},
{desc: "valid column: single lowercase letter", column: "e", isErr: false},
{desc: "valid column: single uppercase letter", column: "Z", isErr: false},
{desc: "valid column: single number", column: "7", isErr: false},
{desc: "valid column: single underscore", column: "_", isErr: false},
{desc: "invalid column: beginning with dash", column: "-col_umn2", isErr: true},
{desc: "invalid column: ending with dash", column: "Column-", isErr: true},
{desc: "invalid column: 2 dashes in a row", column: "Col--umn2", isErr: true},
{desc: "invalid column: containing forbidden chars (?)", column: "Col?umn", isErr: true},
{desc: "invalid column: containing forbidden chars (*)", column: "Column*", isErr: true},
{desc: "invalid column: containing forbidden chars (blank space)", column: "a Column", isErr: true},
}
for _, tc := range tcases {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
err := dbschema.ValidateColumnName(tc.column)
isErr := err != nil
assert.Equal(t, tc.isErr, isErr, "returns error")
})
}
}
func TestValidateTableName(t *testing.T) {
tcases := []struct {
desc string
table string
isErr bool
}{
{desc: "valid table: all lowercase letters", table: "atable", isErr: false},
{desc: "valid table: all uppercase letters", table: "ATABLE", isErr: false},
{desc: "valid table: all lower and upper case letters", table: "aTable", isErr: false},
{desc: "valid table: all letters and numbers", table: "1Table", isErr: false},
{desc: "valid table: with underscores", table: "a_table_2", isErr: false},
{desc: "valid table: with dashes", table: "a-tab_le-2", isErr: false},
{desc: "valid table: single lowercase letter", table: "e", isErr: false},
{desc: "valid table: single uppercase letter", table: "Z", isErr: false},
{desc: "valid table: table with schema", table: "a.Table", isErr: false},
{desc: "valid table: single number", table: "7", isErr: false},
{desc: "valid table: single underscore", table: "_", isErr: false},
{desc: "invalid table: beginning with dash", table: "-tab_le2", isErr: true},
{desc: "invalid table: ending with dash", table: "Table-", isErr: true},
{desc: "invalid table: 2 dashes in a row", table: "Table--2", isErr: true},
{desc: "invalid table: containing forbidden chars (?)", table: "Tab?e", isErr: true},
{desc: "invalid table: containing forbidden chars (*)", table: "*table", isErr: true},
{desc: "invalid table: containing forbidden chars (blank space)", table: "a Table", isErr: true},
{desc: "invalid table: more than one dot)", table: "public.t1.t2", isErr: true},
}
for _, tc := range tcases {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
err := dbschema.ValidateTableName(tc.table)
isErr := err != nil
assert.Equal(t, tc.isErr, isErr, "returns error")
})
}
}

View File

@ -1,275 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// Package dbschema package implements querying and comparing schemas for testing.
package dbschema
import (
"context"
"database/sql"
"fmt"
"sort"
"strings"
"storj.io/storj/private/tagsql"
)
// Queryer is a representation for something that can query.
type Queryer interface {
// QueryRowContext executes a query that returns a single row.
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
// QueryContext executes a query that returns rows, typically a SELECT.
QueryContext(ctx context.Context, query string, args ...interface{}) (tagsql.Rows, error)
}
// Schema is the database structure.
type Schema struct {
Tables []*Table
Indexes []*Index
}
func (schema Schema) String() string {
var tables []string
for _, table := range schema.Tables {
tables = append(tables, table.String())
}
var indexes []string
for _, index := range schema.Indexes {
indexes = append(indexes, index.String())
}
return fmt.Sprintf("Tables:\n\t%s\nIndexes:\n\t%s",
indent(strings.Join(tables, "\n")),
indent(strings.Join(indexes, "\n")))
}
// Table is a sql table.
type Table struct {
Name string
Columns []*Column
PrimaryKey []string
Unique [][]string
}
func (table Table) String() string {
var columns []string
for _, column := range table.Columns {
columns = append(columns, column.String())
}
var uniques []string
for _, unique := range table.Unique {
uniques = append(uniques, strings.Join(unique, " "))
}
return fmt.Sprintf("Name: %s\nColumns:\n\t%s\nPrimaryKey: %s\nUniques:\n\t%s",
table.Name,
indent(strings.Join(columns, "\n")),
strings.Join(table.PrimaryKey, " "),
indent(strings.Join(uniques, "\n")))
}
// Column is a sql column.
type Column struct {
Name string
Type string
IsNullable bool
Default string
Reference *Reference
}
func (column Column) String() string {
return fmt.Sprintf("Name: %s\nType: %s\nNullable: %t\nDefault: %q\nReference: %s",
column.Name,
column.Type,
column.IsNullable,
column.Default,
column.Reference)
}
// Reference is a column foreign key.
type Reference struct {
Table string
Column string
OnDelete string
OnUpdate string
}
func (reference *Reference) String() string {
if reference == nil {
return "nil"
}
return fmt.Sprintf("Reference<Table: %s, Column: %s, OnDelete: %s, OnUpdate: %s>",
reference.Table,
reference.Column,
reference.OnDelete,
reference.OnUpdate)
}
// Index is an index for a table.
type Index struct {
Name string
Table string
Columns []string
Unique bool
Partial string // partial expression
}
func (index Index) String() string {
return fmt.Sprintf("Index<Table: %s, Name: %s, Columns: %s, Unique: %t, Partial: %q>",
index.Table,
index.Name,
indent(strings.Join(index.Columns, " ")),
index.Unique,
index.Partial)
}
// EnsureTable returns the table with the specified name and creates one if needed.
func (schema *Schema) EnsureTable(tableName string) *Table {
for _, table := range schema.Tables {
if table.Name == tableName {
return table
}
}
table := &Table{Name: tableName}
schema.Tables = append(schema.Tables, table)
return table
}
// DropTable removes the specified table.
func (schema *Schema) DropTable(tableName string) {
for i, table := range schema.Tables {
if table.Name == tableName {
schema.Tables = append(schema.Tables[:i], schema.Tables[i+1:]...)
break
}
}
j := 0
for _, index := range schema.Indexes {
if index.Table == tableName {
continue
}
schema.Indexes[j] = index
j++
}
schema.Indexes = schema.Indexes[:j:j]
}
// FindTable returns the specified table.
func (schema *Schema) FindTable(tableName string) (*Table, bool) {
for _, table := range schema.Tables {
if table.Name == tableName {
return table, true
}
}
return nil, false
}
// FindIndex finds index in the schema.
func (schema *Schema) FindIndex(name string) (*Index, bool) {
for _, idx := range schema.Indexes {
if idx.Name == name {
return idx, true
}
}
return nil, false
}
// DropIndex removes the specified index.
func (schema *Schema) DropIndex(name string) {
for i, idx := range schema.Indexes {
if idx.Name == name {
schema.Indexes = append(schema.Indexes[:i], schema.Indexes[i+1:]...)
return
}
}
}
// AddColumn adds the column to the table.
func (table *Table) AddColumn(column *Column) {
table.Columns = append(table.Columns, column)
}
// RemoveColumn removes the column from the table.
func (table *Table) RemoveColumn(columnName string) {
for i, column := range table.Columns {
if column.Name == columnName {
table.Columns = append(table.Columns[:i], table.Columns[i+1:]...)
return
}
}
}
// FindColumn finds a column in the table.
func (table *Table) FindColumn(columnName string) (*Column, bool) {
for _, column := range table.Columns {
if column.Name == columnName {
return column, true
}
}
return nil, false
}
// ColumnNames returns column names.
func (table *Table) ColumnNames() []string {
columns := make([]string, len(table.Columns))
for i, column := range table.Columns {
columns[i] = column.Name
}
return columns
}
// Sort sorts tables and indexes.
func (schema *Schema) Sort() {
sort.Slice(schema.Tables, func(i, k int) bool {
return schema.Tables[i].Name < schema.Tables[k].Name
})
for _, table := range schema.Tables {
table.Sort()
}
sort.Slice(schema.Indexes, func(i, k int) bool {
switch {
case schema.Indexes[i].Table < schema.Indexes[k].Table:
return true
case schema.Indexes[i].Table > schema.Indexes[k].Table:
return false
default:
return schema.Indexes[i].Name < schema.Indexes[k].Name
}
})
}
// Sort sorts columns, primary keys and unique.
func (table *Table) Sort() {
sort.Slice(table.Columns, func(i, k int) bool {
return table.Columns[i].Name < table.Columns[k].Name
})
sort.Strings(table.PrimaryKey)
sort.Slice(table.Unique, func(i, k int) bool {
return lessStrings(table.Unique[i], table.Unique[k])
})
}
func lessStrings(a, b []string) bool {
n := len(a)
if len(b) < n {
n = len(b)
}
for k := 0; k < n; k++ {
if a[k] < b[k] {
return true
} else if a[k] > b[k] {
return false
}
}
return len(a) < len(b)
}
func indent(lines string) string {
return strings.TrimSpace(strings.ReplaceAll(lines, "\n", "\n\t"))
}

View File

@ -1,98 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbschema
import (
"bufio"
"sort"
"strings"
)
// Snapshots defines a collection of snapshot.
type Snapshots struct {
List []*Snapshot
}
// Snapshot defines a particular snapshot of schema and data.
type Snapshot struct {
Version int
Sections
*Schema
*Data
}
// DropTable removes the specified table.
func (snapshot *Snapshot) DropTable(tableName string) {
snapshot.Schema.DropTable(tableName)
snapshot.Data.DropTable(tableName)
}
// Add adds a new snapshot.
func (snapshots *Snapshots) Add(snap *Snapshot) {
snapshots.List = append(snapshots.List, snap)
}
// FindVersion finds a snapshot with the specified version.
func (snapshots *Snapshots) FindVersion(version int) (*Snapshot, bool) {
for _, snap := range snapshots.List {
if snap.Version == version {
return snap, true
}
}
return nil, false
}
// Sort sorts the snapshots by version.
func (snapshots *Snapshots) Sort() {
sort.Slice(snapshots.List, func(i, k int) bool {
return snapshots.List[i].Version < snapshots.List[k].Version
})
}
// Sections is a type to keep track of the sections inside of a sql script.
type Sections struct {
Script string
Sections map[string]string
}
// These consts are the names of the sections that are typical in our scripts.
const (
NewData = "NEW DATA"
OldData = "OLD DATA"
Main = "MAIN"
MainData = "MAIN DATA"
)
// NewSections constructs a Sections from a sql script.
func NewSections(script string) Sections {
sections := make(map[string]string)
var buf strings.Builder
section := "MAIN"
scanner := bufio.NewScanner(strings.NewReader(script))
for scanner.Scan() {
line := scanner.Text()
if len(line) > 6 && line[:3] == "-- " && line[len(line)-3:] == " --" {
sections[section] += buf.String()
buf.Reset()
section = line[3 : len(line)-3]
}
_, _ = buf.WriteString(line)
_ = buf.WriteByte('\n')
}
if buf.Len() > 0 {
sections[section] += buf.String()
}
return Sections{
Script: script,
Sections: sections,
}
}
// LookupSection finds the named section in the script or returns an empty string.
func (s Sections) LookupSection(section string) string {
return s.Sections[section]
}

View File

@ -1,44 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"context"
"database/sql"
"flag"
"time"
"github.com/spacemonkeygo/monkit/v3"
)
var (
maxIdleConns = flag.Int("db.max_idle_conns", 1, "Maximum Amount of Idle Database connections, -1 means the stdlib default")
maxOpenConns = flag.Int("db.max_open_conns", 5, "Maximum Amount of Open Database connections, -1 means the stdlib default")
connMaxLifetime = flag.Duration("db.conn_max_lifetime", 30*time.Minute, "Maximum Database Connection Lifetime, -1ns means the stdlib default")
)
// ConfigurableDB contains methods for configuring a database.
type ConfigurableDB interface {
SetMaxIdleConns(int)
SetMaxOpenConns(int)
SetConnMaxLifetime(time.Duration)
Stats() sql.DBStats
}
// Configure Sets Connection Boundaries and adds db_stats monitoring to monkit.
func Configure(ctx context.Context, db ConfigurableDB, dbName string, mon *monkit.Scope) {
if *maxIdleConns >= 0 {
db.SetMaxIdleConns(*maxIdleConns)
}
if *maxOpenConns >= 0 {
db.SetMaxOpenConns(*maxOpenConns)
}
if *connMaxLifetime >= 0 {
db.SetConnMaxLifetime(*connMaxLifetime)
}
mon.Chain(monkit.StatSourceFunc(
func(cb func(key monkit.SeriesKey, field string, val float64)) {
monkit.StatSourceFromStruct(monkit.NewSeriesKey("db_stats").WithTag("db_name", dbName), db.Stats()).Stats(cb)
}))
}

View File

@ -1,54 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"fmt"
"strings"
)
// EscapableCommaSplit is like strings.Split(x, ","), but if
// it sees two ','s in a row, it will treat them like one
// unsplit comma. So "hello,there,,friend" will result in
// ["hello", "there,friend"].
func EscapableCommaSplit(val string) []string {
bytes := []byte(val)
var vals []string
current := make([]byte, 0, len(bytes))
for i := 0; i < len(bytes); i++ {
char := bytes[i]
if char == ',' {
if i < len(bytes)-1 && bytes[i+1] == ',' {
current = append(current, ',')
i++
} else {
vals = append(vals, string(current))
current = nil
}
} else {
current = append(current, char)
}
}
vals = append(vals, string(current))
return vals
}
// ParseDBMapping parses a mapping of database connection URLs, preceded
// by the default URL. An example that overrides the repairqueue looks like:
// cockroach://user:pw@host/database,repairqueue:postgres://user:pw@host/database.
// The default is stored in "".
func ParseDBMapping(urlSpec string) (map[string]string, error) {
parts := EscapableCommaSplit(urlSpec)
rv := map[string]string{
"": parts[0],
}
for _, other := range parts[1:] {
override := strings.SplitN(other, ":", 2)
if len(override) != 2 || strings.HasPrefix(override[1], "/") {
return nil, fmt.Errorf("invalid db mapping spec: %q", urlSpec)
}
rv[override[0]] = override[1]
}
return rv, nil
}

View File

@ -1,52 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestEscapableCommaSplit(t *testing.T) {
for _, testcase := range []struct {
input string
expected []string
}{
{"", []string{""}},
{",", []string{"", ""}},
{",hello", []string{"", "hello"}},
{"hello,", []string{"hello", ""}},
{"hello,there", []string{"hello", "there"}},
{"hello,,there", []string{"hello,there"}},
{",,hello", []string{",hello"}},
{"hello,,", []string{"hello,"}},
{"hello,,,there", []string{"hello,", "there"}},
{"hello,,,,there", []string{"hello,,there"}},
} {
require.Equal(t, testcase.expected, EscapableCommaSplit(testcase.input))
}
}
func TestParseDBMapping(t *testing.T) {
for _, testcase := range []struct {
input string
expected map[string]string
err error
}{
{"db://host", map[string]string{"": "db://host"}, nil},
{"db://host,override:db2://host2/db,,name",
map[string]string{"": "db://host", "override": "db2://host2/db,name"}, nil},
{"db://host,db2://host2", nil,
fmt.Errorf("invalid db mapping spec: %q", "db://host,db2://host2")},
} {
actual, err := ParseDBMapping(testcase.input)
if testcase.err != nil {
require.Equal(t, testcase.err, err)
} else {
require.Equal(t, testcase.expected, actual)
}
}
}

View File

@ -1,96 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"database/sql"
"database/sql/driver"
"time"
"github.com/zeebo/errs"
)
const (
sqliteTimeLayout = "2006-01-02 15:04:05-07:00"
sqliteTimeLayoutNoTimeZone = "2006-01-02 15:04:05"
sqliteTimeLayoutDate = "2006-01-02"
)
// ErrNullTime defines error class for NullTime.
var ErrNullTime = errs.Class("null time error")
// NullTime time helps convert nil to time.Time.
type NullTime struct {
time.Time
Valid bool
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = time.Time{}, false
if value == nil {
return nil
}
switch v := value.(type) {
// Postgres could return for lagged time values.
case time.Time:
nt.Time, nt.Valid = v, true
// Database could return for nullable time values.
case sql.NullTime:
nt.Time, nt.Valid = v.Time, v.Valid
// Sqlite may return this.
case string:
parsed, err := parseSqliteTimeString(v)
if err != nil {
return ErrNullTime.Wrap(err)
}
nt.Time, nt.Valid = parsed, true
// Sqlite may return this.
case []byte:
parsed, err := parseSqliteTimeString(string(v))
if err != nil {
return ErrNullTime.Wrap(err)
}
nt.Time, nt.Valid = parsed, true
default:
return ErrNullTime.New("sql null time: scan received unsupported value %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
// parseSqliteTimeString parses sqlite times string.
// It tries to process value as string with timezone first,
// then fallback to parsing as string without timezone and
// finally to parsing value as date.
func parseSqliteTimeString(val string) (time.Time, error) {
var times time.Time
var err error
times, err = time.Parse(sqliteTimeLayout, val)
if err == nil {
return times, nil
}
times, err = time.Parse(sqliteTimeLayoutNoTimeZone, val)
if err == nil {
return times, nil
}
return time.Parse(sqliteTimeLayoutDate, val)
}

View File

@ -1,120 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgtest
import (
"flag"
"os"
"strings"
"sync/atomic"
"testing"
"storj.io/common/testcontext"
)
// We need to define this in a separate package due to https://golang.org/issue/23910.
func getenv(priority ...string) string {
for _, p := range priority {
v := os.Getenv(p)
if v != "" {
return v
}
}
return ""
}
// postgres is the test database connection string.
var postgres = flag.String("postgres-test-db", getenv("STORJ_TEST_POSTGRES", "STORJ_POSTGRES_TEST"), "PostgreSQL test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
// cockroach is the test database connection string for CockroachDB.
var cockroach = flag.String("cockroach-test-db", getenv("STORJ_TEST_COCKROACH", "STORJ_COCKROACH_TEST"), "CockroachDB test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
var cockroachAlt = flag.String("cockroach-test-alt-db", getenv("STORJ_TEST_COCKROACH_ALT"), "CockroachDB test database connection alternate string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
// DefaultPostgres is expected to work under the storj-test docker-compose instance.
const DefaultPostgres = "postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable"
// DefaultCockroach is expected to work when a local cockroachDB instance is running.
const DefaultCockroach = "cockroach://root@localhost:26257/master?sslmode=disable"
// Database defines a postgres compatible database.
type Database struct {
Name string
// Pick picks a connection string for the database and skips when it's missing.
Pick func(t TB) string
}
// TB defines minimal interface required for Pick.
type TB interface {
Skip(...interface{})
}
// Databases returns list of postgres compatible databases.
func Databases() []Database {
return []Database{
{Name: "Postgres", Pick: PickPostgres},
{Name: "Cockroach", Pick: PickCockroach},
}
}
// Run runs tests with all postgres compatible databases.
func Run(t *testing.T, test func(ctx *testcontext.Context, t *testing.T, connstr string)) {
for _, db := range Databases() {
db := db
connstr := db.Pick(t)
if strings.EqualFold(connstr, "omit") {
continue
}
t.Run(db.Name, func(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
defer ctx.Cleanup()
test(ctx, t, connstr)
})
}
}
// PickPostgres picks one postgres database from flag.
func PickPostgres(t TB) string {
if *postgres == "" || strings.EqualFold(*postgres, "omit") {
t.Skip("Postgres flag missing, example: -postgres-test-db=" + DefaultPostgres)
}
return pickNext(*postgres, &pickPostgres)
}
// PickCockroach picks one cockroach database from flag.
func PickCockroach(t TB) string {
if *cockroach == "" || strings.EqualFold(*cockroach, "omit") {
t.Skip("Cockroach flag missing, example: -cockroach-test-db=" + DefaultCockroach)
}
return pickNext(*cockroach, &pickCockroach)
}
// PickCockroachAlt picks an alternate cockroach database from flag.
//
// This is used for high-load tests to ensure that other tests do not timeout.
func PickCockroachAlt(t TB) string {
if *cockroachAlt == "" {
return PickCockroach(t)
}
if strings.EqualFold(*cockroachAlt, "omit") {
t.Skip("Cockroach alt flag omitted.")
}
return pickNext(*cockroachAlt, &pickCockroach)
}
var pickPostgres uint64
var pickCockroach uint64
func pickNext(dbstr string, counter *uint64) string {
values := strings.Split(dbstr, ";")
if len(values) <= 1 {
return dbstr
}
v := atomic.AddUint64(counter, 1)
return values[v%uint64(len(values))]
}

View File

@ -1,19 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil
import (
"context"
"strconv"
"storj.io/storj/private/dbutil/dbschema"
)
// QueryData loads all data from tables.
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
quoted := strconv.Quote(columnName)
return `quote_nullable(` + quoted + `) as ` + quoted
})
}

View File

@ -1,126 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil
import (
"context"
"strings"
"github.com/jackc/pgx/v4"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/pgutil/pgerrcode"
"storj.io/storj/private/tagsql"
)
var (
mon = monkit.Package()
)
const (
// pgErrorClassConstraintViolation is the class of PostgreSQL errors indicating
// integrity constraint violations.
pgErrorClassConstraintViolation = "23"
)
// OpenUnique opens a postgres database with a temporary unique schema, which will be cleaned up
// when closed. It is expected that this should normally be used by way of
// "storj.io/storj/private/dbutil/tempdb".OpenUnique() instead of calling it directly.
func OpenUnique(ctx context.Context, connstr string, schemaPrefix string) (*dbutil.TempDatabase, error) {
// sanity check, because you get an unhelpful error message when this happens
if strings.HasPrefix(connstr, "cockroach://") {
return nil, errs.New("can't connect to cockroach using pgutil.OpenUnique()! connstr=%q. try tempdb.OpenUnique() instead?", connstr)
}
schemaName := schemaPrefix + "-" + CreateRandomTestingSchemaName(8)
connStrWithSchema := ConnstrWithSchema(connstr, schemaName)
db, err := tagsql.Open(ctx, "pgx", connStrWithSchema)
if err == nil {
// check that connection actually worked before trying CreateSchema, to make
// troubleshooting (lots) easier
err = db.PingContext(ctx)
}
if err != nil {
return nil, errs.New("failed to connect to %q with driver pgx: %w", connStrWithSchema, err)
}
err = CreateSchema(ctx, db, schemaName)
if err != nil {
return nil, errs.Combine(err, db.Close())
}
cleanup := func(cleanupDB tagsql.DB) error {
return DropSchema(ctx, cleanupDB, schemaName)
}
dbutil.Configure(ctx, db, "tmp_postgres", mon)
return &dbutil.TempDatabase{
DB: db,
ConnStr: connStrWithSchema,
Schema: schemaName,
Driver: "pgx",
Implementation: dbutil.Postgres,
Cleanup: cleanup,
}, nil
}
// QuerySnapshot loads snapshot from database.
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
schema, err := QuerySchema(ctx, db)
if err != nil {
return nil, err
}
data, err := QueryData(ctx, db, schema)
if err != nil {
return nil, err
}
return &dbschema.Snapshot{
Version: -1,
Schema: schema,
Data: data,
}, err
}
// CheckApplicationName ensures that the Connection String contains an application name.
func CheckApplicationName(s string, app string) (string, error) {
if !strings.Contains(s, "application_name") {
if strings.TrimSpace(app) == "" {
return s, errs.New("application name cannot be empty")
}
if !strings.Contains(s, "?") {
return s + "?application_name=" + app, nil
}
return s + "&application_name=" + app, nil
}
// return source as is if application_name is set
return s, nil
}
// IsConstraintError checks if given error is about constraint violation.
func IsConstraintError(err error) bool {
errCode := pgerrcode.FromError(err)
return strings.HasPrefix(errCode, pgErrorClassConstraintViolation)
}
// QuoteIdentifier quotes an identifier for use in an interpolated SQL string.
func QuoteIdentifier(ident string) string {
return pgx.Identifier{ident}.Sanitize()
}
// UnquoteIdentifier is the analog of QuoteIdentifier.
func UnquoteIdentifier(quotedIdent string) string {
if len(quotedIdent) >= 2 && quotedIdent[0] == '"' && quotedIdent[len(quotedIdent)-1] == '"' {
quotedIdent = strings.ReplaceAll(quotedIdent[1:len(quotedIdent)-1], "\"\"", "\"")
}
return quotedIdent
}

View File

@ -1,57 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil_test
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/storj/private/tagsql"
)
func TestTempPostgresDB(t *testing.T) {
connstr := pgtest.PickPostgres(t)
ctx := testcontext.New(t)
defer ctx.Cleanup()
prefix := "name#spaced/Test/DB"
testDB, err := tempdb.OpenUnique(ctx, connstr, prefix)
require.NoError(t, err)
// assert new test db exists and can be connected to again
otherConn, err := tagsql.Open(ctx, testDB.Driver, testDB.ConnStr)
require.NoError(t, err)
defer ctx.Check(otherConn.Close)
// verify the name matches expectation
var name *string
row := otherConn.QueryRowContext(ctx, `SELECT current_schema()`)
err = row.Scan(&name)
require.NoErrorf(t, err, "connStr=%q", testDB.ConnStr)
require.NotNilf(t, name, "PG has no current_schema, which means the one we asked for doesn't exist. connStr=%q", testDB.ConnStr)
require.Truef(t, strings.HasPrefix(*name, prefix), "Expected prefix of %q for current db name, but found %q", prefix, name)
// verify there is an entry in pg_namespace with such a name
var count int
row = otherConn.QueryRowContext(ctx, `SELECT COUNT(*) FROM pg_namespace WHERE nspname = current_schema`)
err = row.Scan(&count)
require.NoError(t, err)
require.Equalf(t, 1, count, "Expected 1 schema with matching name, but counted %d", count)
// close testDB but leave otherConn open
err = testDB.Close()
require.NoError(t, err)
// assert new test schema was deleted
row = otherConn.QueryRowContext(ctx, `SELECT COUNT(*) FROM pg_namespace WHERE nspname = current_schema`)
err = row.Scan(&count)
require.NoError(t, err)
require.Equalf(t, 0, count, "Expected 0 schemas with matching name, but counted %d (deletion failure?)", count)
}

View File

@ -1,26 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// Package pgerrcode implements postgres error extraction without depending on a postgres
// library.
package pgerrcode
import "errors"
// FromError returns the 5-character PostgreSQL error code string associated
// with the given error, if any.
func FromError(err error) string {
var sqlStateErr errWithSQLState
if errors.As(err, &sqlStateErr) {
return sqlStateErr.SQLState()
}
return ""
}
// errWithSQLState is an interface supported by error classes corresponding
// to PostgreSQL errors from certain drivers. This is satisfied, in particular,
// by pgx (*pgconn.PgError) and may be adopted by other types. An effort is
// apparently underway to get lib/pq to add this interface.
type errWithSQLState interface {
SQLState() string
}

View File

@ -1,297 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/jackc/pgtype"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/dbschema"
)
// QuerySchema loads the schema from postgres database.
func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, error) {
schema := &dbschema.Schema{}
// get version string to do efficient queries
var version string
row := db.QueryRowContext(ctx, `SELECT version()`)
if err := row.Scan(&version); err != nil {
return nil, errs.Wrap(err)
}
// find tables
err := func() (err error) {
rows, err := db.QueryContext(ctx, `
SELECT table_name, column_name, is_nullable, coalesce(column_default, ''), data_type
FROM information_schema.columns
WHERE table_schema = CURRENT_SCHEMA
`)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var tableName, columnName, isNullable, columnDefault, dataType string
err := rows.Scan(&tableName, &columnName, &isNullable, &columnDefault, &dataType)
if err != nil {
return err
}
table := schema.EnsureTable(tableName)
table.AddColumn(&dbschema.Column{
Name: columnName,
Type: dataType,
IsNullable: isNullable == "YES",
Default: parseColumnDefault(columnDefault),
})
}
return rows.Err()
}()
if err != nil {
return nil, err
}
// find constraints
err = func() (err error) {
// cockroach has a .condef field and it's way faster than the function call
var definitionClause string
if strings.Contains(version, "CockroachDB") {
definitionClause = `pg_constraint.condef AS definition`
} else {
definitionClause = `pg_get_constraintdef(pg_constraint.oid) AS definition`
}
rows, err := db.QueryContext(ctx, `
SELECT
pg_class.relname AS table_name,
pg_constraint.conname AS constraint_name,
pg_constraint.contype AS constraint_type,
(
SELECT
ARRAY_AGG(pg_attribute.attname ORDER BY u.pos)
FROM
pg_attribute
JOIN UNNEST(pg_constraint.conkey) WITH ORDINALITY AS u(attnum, pos) ON u.attnum = pg_attribute.attnum
WHERE
pg_attribute.attrelid = pg_class.oid
) AS columns, `+definitionClause+`
FROM
pg_constraint
JOIN pg_class ON pg_class.oid = pg_constraint.conrelid
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE pg_namespace.nspname = CURRENT_SCHEMA
`)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var tableName, constraintName, constraintType string
var columnsArray pgtype.VarcharArray
var columns []string
var definition string
err := rows.Scan(&tableName, &constraintName, &constraintType, &columnsArray, &definition)
if err != nil {
return err
}
err = columnsArray.AssignTo(&columns)
if err != nil {
return err
}
switch constraintType {
case "p": // primary key
table := schema.EnsureTable(tableName)
table.PrimaryKey = columns
case "f": // foreign key
if len(columns) != 1 {
return fmt.Errorf("expected one column, got: %q", columns)
}
table := schema.EnsureTable(tableName)
column, ok := table.FindColumn(columns[0])
if !ok {
return fmt.Errorf("did not find column %q", columns[0])
}
matches := rxPostgresForeignKey.FindStringSubmatch(definition)
if len(matches) == 0 {
return fmt.Errorf("unable to parse constraint %q", definition)
}
column.Reference = &dbschema.Reference{
Table: matches[1],
Column: matches[2],
OnUpdate: matches[3],
OnDelete: matches[4],
}
case "u": // unique
table := schema.EnsureTable(tableName)
table.Unique = append(table.Unique, columns)
default:
return fmt.Errorf("unhandled constraint type %q", constraintType)
}
}
return rows.Err()
}()
if err != nil {
return nil, err
}
// find indexes
err = func() (err error) {
rows, err := db.QueryContext(ctx, `SELECT indexdef FROM pg_indexes WHERE schemaname = CURRENT_SCHEMA`)
if err != nil {
return errs.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var indexdef string
err := rows.Scan(&indexdef)
if err != nil {
return errs.Wrap(err)
}
index, err := parseIndexDefinition(indexdef)
if err != nil {
return errs.Wrap(err)
}
if isAutogeneratedCockroachIndex(index) {
continue
}
schema.Indexes = append(schema.Indexes, index)
}
return errs.Wrap(rows.Err())
}()
if err != nil {
return nil, err
}
schema.Sort()
return schema, nil
}
// matches FOREIGN KEY (project_id) REFERENCES projects(id) ON UPDATE CASCADE ON DELETE CASCADE.
var rxPostgresForeignKey = regexp.MustCompile(
`^FOREIGN KEY \([[:word:]]+\) ` +
`REFERENCES ([[:word:]]+)\(([[:word:]]+)\)` +
`(?:\s*ON UPDATE (CASCADE|RESTRICT|SET NULL|SET DEFAULT|NO ACTION))?` +
`(?:\s*ON DELETE (CASCADE|RESTRICT|SET NULL|SET DEFAULT|NO ACTION))?$`,
)
var (
rxIndex = regexp.MustCompile(`^CREATE( UNIQUE)? INDEX (.*) ON .*\.(.*) USING btree \((.*)\)$`)
indexDirRemove = strings.NewReplacer(" ASC", "", " DESC", "")
)
func parseColumnDefault(columnDefault string) string {
// hackity hack: See the comments in parseIndexDefinition for why we do this.
if columnDefault == "nextval('storagenode_storage_tallies_id_seq'::regclass)" {
return "nextval('accounting_raws_id_seq'::regclass)"
}
// hackity hack: cockroach sometimes adds type descriptors to the default. ignore em!
if idx := strings.Index(columnDefault, ":::"); idx >= 0 {
columnDefault = columnDefault[:idx]
}
return columnDefault
}
func parseIndexDefinition(indexdef string) (*dbschema.Index, error) {
matches := rxIndex.FindStringSubmatch(indexdef)
if matches == nil {
return nil, errs.New("unable to parse index (you should go make the parser better): %q", indexdef)
}
// hackity hack: cockroach returns all primary key index names as `"primary"`, but sometimes
// our migrations create them with explicit names. so let's change all of them.
name := matches[2]
if name == `"primary"` {
name = matches[3] + "_pkey"
}
// hackity hack: sometimes they end with _pk, sometimes they end with _pkey. let's make them
// all end with _pkey.
if strings.HasSuffix(name, "_pk") {
name = name[:len(name)-3] + "_pkey"
}
// biggest hackity hack of all: we apparently did
//
// CREATE TABLE accounting_raws ( ... )
// ALTER TABLE accounting_raws RENAME TO storagenode_storage_tallies
//
// but that means the primary key index is still named accounting_raws_pkey and not
// the expected storagenode_storage_tallies_pkey.
//
// "No big deal", you might say, "just add an ALTER INDEX". Ah, but recall: cockroach
// does not name their primary key indexes. They are instead all named `"primary"`.
// Now, at this point, a clever person might suggest ALTER INDEX IF EXISTS so that
// it renames it on postgres but not cockroach. Surely if the index does not exist
// it will happily succeed. You'd like to think that, wouldn't you! Alas, cockroach
// will error on ALTER INDEX IF EXISTS even if the index does not exist. Basic
// conditionals are apparently too hard for it.
//
// Undaunted, I searched their bug tracker and found this comment within this issue:
// https://github.com/cockroachdb/cockroach/issues/42399#issuecomment-558377915
// It turns out, you apparently need to specify the index with some sort of `@` sigil
// or it just errors with an unhelpful message. But only a great fool would think that
// the query would remain valid for postgres!
//
// In summary, because cockroach errors even if the index does not exist, I can clearly
// not use cockroach. But because postgres will error if the sigil is included, I can
// clearly not use postgres.
//
// As a last resort, one may suggest changing the postgres.N.sql file to ALSO create
// the wrong table name and rename it. Truly, they have a dizzying intellect. But prepare
// yourself for the final killing blow: if we do that, then the final output does not match
// the dbx schema that is autogenerated, and the test still fails.
//
// The lesson? Never go in against a database when death is on the line. HA HA HA HA...
//
// Bleh.
if name == "accounting_raws_pkey" {
name = "storagenode_storage_tallies_pkey"
}
return &dbschema.Index{
Name: name,
Table: matches[3],
Unique: matches[1] != "",
Columns: strings.Split(indexDirRemove.Replace(matches[4]), ", "),
}, nil
}
// hackity hack:
//
// Cockroach sometimes creates automatic indexes to enforce foreign key
// relationships, if it doesn't think the need is already met by other
// indexes. If you then add the other indexes after creating the table,
// the auto-generated index does not go away. So you get different results
// when establishing one table with a set of constraints over multiple
// steps, versus creating that same table with the same set of constraints
// all at once. Unfortunately, our system wants very much for those two
// paths to produce exactly the same result.
//
// This should make it so that we disregard the difference in the cases
// that it arises.
//
// See above for an important lesson about going in against a database when
// death is on the line.
func isAutogeneratedCockroachIndex(index *dbschema.Index) bool {
return strings.Contains(index.Name, "_auto_index_fk_")
}

View File

@ -1,100 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil_test
import (
"testing"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/tempdb"
)
func TestQuery(t *testing.T) {
pgtest.Run(t, func(ctx *testcontext.Context, t *testing.T, connstr string) {
db, err := tempdb.OpenUnique(ctx, connstr, "pgutil-query")
require.NoError(t, err)
defer ctx.Check(db.Close)
emptySchema, err := pgutil.QuerySchema(ctx, db)
require.NoError(t, err)
assert.Equal(t, &dbschema.Schema{}, emptySchema)
_, err = db.ExecContext(ctx, `
CREATE TABLE users (
a bigint NOT NULL,
b bigint NOT NULL,
c text,
UNIQUE (c),
PRIMARY KEY (a)
);
CREATE TABLE names (
users_a bigint REFERENCES users( a ) ON DELETE CASCADE,
a text NOT NULL,
x text,
b text,
PRIMARY KEY (a, x),
UNIQUE ( x ),
UNIQUE ( a, b )
);
`)
require.NoError(t, err)
schema, err := pgutil.QuerySchema(ctx, db)
require.NoError(t, err)
expected := &dbschema.Schema{
Tables: []*dbschema.Table{
{
Name: "users",
Columns: []*dbschema.Column{
{Name: "a", Type: "bigint", IsNullable: false, Reference: nil},
{Name: "b", Type: "bigint", IsNullable: false, Reference: nil},
{Name: "c", Type: "text", IsNullable: true, Reference: nil},
},
PrimaryKey: []string{"a"},
Unique: [][]string{
{"c"},
},
},
{
Name: "names",
Columns: []*dbschema.Column{
{Name: "users_a", Type: "bigint", IsNullable: true,
Reference: &dbschema.Reference{
Table: "users",
Column: "a",
OnDelete: "CASCADE",
}},
{Name: "a", Type: "text", IsNullable: false, Reference: nil},
{Name: "x", Type: "text", IsNullable: false, Reference: nil}, // not null, because primary key
{Name: "b", Type: "text", IsNullable: true, Reference: nil},
},
PrimaryKey: []string{"a", "x"},
Unique: [][]string{
{"a", "b"},
{"x"},
},
},
},
Indexes: []*dbschema.Index{
{Name: "names_a_b_key", Table: "names", Columns: []string{"a", "b"}, Unique: true, Partial: ""},
{Name: "names_pkey", Table: "names", Columns: []string{"a", "x"}, Unique: true, Partial: ""},
{Name: "names_x_key", Table: "names", Columns: []string{"x"}, Unique: true, Partial: ""},
{Name: "users_c_key", Table: "users", Columns: []string{"c"}, Unique: true, Partial: ""},
{Name: "users_pkey", Table: "users", Columns: []string{"a"}, Unique: true, Partial: ""},
},
}
expected.Sort()
schema.Sort()
assert.Equal(t, expected, schema)
})
}

View File

@ -1,91 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// Package pgutil contains utilities for postgres
package pgutil
import (
"context"
"crypto/rand"
"database/sql"
"encoding/hex"
"net/url"
"strings"
)
// CreateRandomTestingSchemaName creates a random schema name string.
func CreateRandomTestingSchemaName(n int) string {
data := make([]byte, n)
_, err := rand.Read(data)
if err != nil {
panic(err)
}
return hex.EncodeToString(data)
}
// ConnstrWithSchema adds schema to a connection string.
func ConnstrWithSchema(connstr, schema string) string {
if strings.Contains(connstr, "?") {
connstr += "&options="
} else {
connstr += "?options="
}
return connstr + url.QueryEscape("--search_path="+QuoteIdentifier(schema))
}
// ParseSchemaFromConnstr returns the name of the schema parsed from the
// connection string if one is provided.
func ParseSchemaFromConnstr(connstr string) (string, error) {
url, err := url.Parse(connstr)
if err != nil {
return "", err
}
queryValues := url.Query()
// this is the Proper™ way to encode search_path in a pg connection string
options := queryValues["options"]
for _, option := range options {
if strings.HasPrefix(option, "--search_path=") {
return UnquoteIdentifier(option[len("--search_path="):]), nil
}
}
// this is another way we've used before; supported brokenly as a kludge in github.com/lib/pq
schema := queryValues["search_path"]
if len(schema) > 0 {
return UnquoteIdentifier(schema[0]), nil
}
return "", nil
}
// QuoteSchema quotes schema name for.
func QuoteSchema(schema string) string {
return QuoteIdentifier(schema)
}
// Execer is for executing sql.
type Execer interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
}
// CreateSchema creates a schema if it doesn't exist.
func CreateSchema(ctx context.Context, db Execer, schema string) (err error) {
for try := 0; try < 5; try++ {
_, err = db.ExecContext(ctx, `CREATE SCHEMA IF NOT EXISTS `+QuoteSchema(schema)+`;`)
// Postgres `CREATE SCHEMA IF NOT EXISTS` may return "duplicate key value violates unique constraint".
// In that case, we will retry rather than doing anything more complicated.
//
// See more in: https://stackoverflow.com/a/29908840/192220
if IsConstraintError(err) {
continue
}
return err
}
return err
}
// DropSchema drops the named schema.
func DropSchema(ctx context.Context, db Execer, schema string) error {
_, err := db.ExecContext(ctx, `DROP SCHEMA `+QuoteSchema(schema)+` CASCADE;`)
return err
}

View File

@ -1,156 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil
import (
"time"
"github.com/jackc/pgtype"
"storj.io/common/storj"
"storj.io/common/uuid"
)
// The following XArray() helper methods exist alongside similar methods in the
// jackc/pgtype library. The difference with the methods in pgtype is that they
// will accept any of a wide range of types. That is nice, but it comes with
// the potential that someone might pass in an invalid type; thus, those
// methods have to return (*pgtype.XArray, error).
//
// The methods here do not need to return an error because they require passing
// in the correct type to begin with.
//
// An alternative implementation for the following methods might look like
// calls to pgtype.ByteaArray() followed by `if err != nil { panic }` blocks.
// That would probably be ok, but we decided on this approach, as it ought to
// require fewer allocations and less time, in addition to having no error
// return.
// ByteaArray returns an object usable by pg drivers for passing a [][]byte slice
// into a database as type BYTEA[].
func ByteaArray(bytesArray [][]byte) *pgtype.ByteaArray {
pgtypeByteaArray := make([]pgtype.Bytea, len(bytesArray))
for i, byteSlice := range bytesArray {
pgtypeByteaArray[i].Bytes = byteSlice
pgtypeByteaArray[i].Status = pgtype.Present
}
return &pgtype.ByteaArray{
Elements: pgtypeByteaArray,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(bytesArray)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// TextArray returns an object usable by pg drivers for passing a []string slice
// into a database as type TEXT[].
func TextArray(stringSlice []string) *pgtype.TextArray {
pgtypeTextArray := make([]pgtype.Text, len(stringSlice))
for i, s := range stringSlice {
pgtypeTextArray[i].String = s
pgtypeTextArray[i].Status = pgtype.Present
}
return &pgtype.TextArray{
Elements: pgtypeTextArray,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(stringSlice)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// TimestampTZArray returns an object usable by pg drivers for passing a []time.Time
// slice into a database as type TIMESTAMPTZ[].
func TimestampTZArray(timeSlice []time.Time) *pgtype.TimestamptzArray {
pgtypeTimestamptzArray := make([]pgtype.Timestamptz, len(timeSlice))
for i, t := range timeSlice {
pgtypeTimestamptzArray[i].Time = t
pgtypeTimestamptzArray[i].Status = pgtype.Present
}
return &pgtype.TimestamptzArray{
Elements: pgtypeTimestamptzArray,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(timeSlice)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// Int4Array returns an object usable by pg drivers for passing a []int32 slice
// into a database as type INT4[].
func Int4Array(ints []int32) *pgtype.Int4Array {
pgtypeInt4Array := make([]pgtype.Int4, len(ints))
for i, someInt := range ints {
pgtypeInt4Array[i].Int = someInt
pgtypeInt4Array[i].Status = pgtype.Present
}
return &pgtype.Int4Array{
Elements: pgtypeInt4Array,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(ints)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// Int8Array returns an object usable by pg drivers for passing a []int64 slice
// into a database as type INT8[].
func Int8Array(bigInts []int64) *pgtype.Int8Array {
pgtypeInt8Array := make([]pgtype.Int8, len(bigInts))
for i, bigInt := range bigInts {
pgtypeInt8Array[i].Int = bigInt
pgtypeInt8Array[i].Status = pgtype.Present
}
return &pgtype.Int8Array{
Elements: pgtypeInt8Array,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(bigInts)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// Float8Array returns an object usable by pg drivers for passing a []float64 slice
// into a database as type FLOAT8[].
func Float8Array(floats []float64) *pgtype.Float8Array {
pgtypeFloat8Array := make([]pgtype.Float8, len(floats))
for i, someFloat := range floats {
pgtypeFloat8Array[i].Float = someFloat
pgtypeFloat8Array[i].Status = pgtype.Present
}
return &pgtype.Float8Array{
Elements: pgtypeFloat8Array,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(floats)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// NodeIDArray returns an object usable by pg drivers for passing a []storj.NodeID
// slice into a database as type BYTEA[].
func NodeIDArray(nodeIDs []storj.NodeID) *pgtype.ByteaArray {
if nodeIDs == nil {
return &pgtype.ByteaArray{Status: pgtype.Null}
}
pgtypeByteaArray := make([]pgtype.Bytea, len(nodeIDs))
for i, nodeID := range nodeIDs {
nodeIDCopy := nodeID
pgtypeByteaArray[i].Bytes = nodeIDCopy[:]
pgtypeByteaArray[i].Status = pgtype.Present
}
return &pgtype.ByteaArray{
Elements: pgtypeByteaArray,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(nodeIDs)), LowerBound: 1}},
Status: pgtype.Present,
}
}
// UUIDArray returns an object usable by pg drivers for passing a []uuid.UUID
// slice into a database as type BYTEA[].
func UUIDArray(uuids []uuid.UUID) *pgtype.ByteaArray {
if uuids == nil {
return &pgtype.ByteaArray{Status: pgtype.Null}
}
pgtypeByteaArray := make([]pgtype.Bytea, len(uuids))
for i, uuid := range uuids {
uuidCopy := uuid
pgtypeByteaArray[i].Bytes = uuidCopy[:]
pgtypeByteaArray[i].Status = pgtype.Present
}
return &pgtype.ByteaArray{
Elements: pgtypeByteaArray,
Dimensions: []pgtype.ArrayDimension{{Length: int32(len(uuids)), LowerBound: 1}},
Status: pgtype.Present,
}
}

View File

@ -1,29 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pgutil
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/storj"
"storj.io/common/testrand"
)
func TestPostgresNodeIDsArray(t *testing.T) {
ids := make(storj.NodeIDList, 10)
for i := range ids {
ids[i] = testrand.NodeID()
}
got, err := NodeIDArray(ids).Value() // returns a string
require.NoError(t, err)
expected, err := ByteaArray(ids.Bytes()).Value() // returns a string
require.NoError(t, err)
assert.Equal(t, expected.(string), got.(string))
}

View File

@ -1,31 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"fmt"
"strings"
)
// SplitConnStr returns the driver and DSN portions of a URL, along with the db implementation.
func SplitConnStr(s string) (driver string, source string, implementation Implementation, err error) {
// consider https://github.com/xo/dburl if this ends up lacking
parts := strings.SplitN(s, "://", 2)
if len(parts) != 2 {
return "", "", Unknown, fmt.Errorf("could not parse DB URL %s", s)
}
driver = parts[0]
source = parts[1]
implementation = ImplementationForScheme(parts[0])
switch implementation {
case Postgres:
source = s // postgres wants full URLS for its DSN
driver = "pgx"
case Cockroach:
source = s // cockroach wants full URLS for its DSN
driver = "pgxcockroach"
}
return driver, source, implementation, nil
}

View File

@ -1,93 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package sqliteutil
import (
"context"
"strconv"
sqlite3 "github.com/mattn/go-sqlite3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/tagsql"
)
// LoadSchemaFromSQL inserts script into connstr and loads schema.
func LoadSchemaFromSQL(ctx context.Context, script string) (_ *dbschema.Schema, err error) {
db, err := tagsql.Open(ctx, "sqlite3", ":memory:")
if err != nil {
return nil, errs.Wrap(err)
}
defer func() { err = errs.Combine(err, db.Close()) }()
_, err = db.ExecContext(ctx, script)
if err != nil {
return nil, errs.Wrap(err)
}
return QuerySchema(ctx, db)
}
// LoadSnapshotFromSQL inserts script into connstr and loads schema.
func LoadSnapshotFromSQL(ctx context.Context, script string) (_ *dbschema.Snapshot, err error) {
db, err := tagsql.Open(ctx, "sqlite3", ":memory:")
if err != nil {
return nil, errs.Wrap(err)
}
defer func() { err = errs.Combine(err, db.Close()) }()
_, err = db.ExecContext(ctx, script)
if err != nil {
return nil, errs.Wrap(err)
}
snapshot, err := QuerySnapshot(ctx, db)
if err != nil {
return nil, errs.Wrap(err)
}
snapshot.Sections = dbschema.NewSections(script)
return snapshot, nil
}
// QuerySnapshot loads snapshot from database.
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
schema, err := QuerySchema(ctx, db)
if err != nil {
return nil, errs.Wrap(err)
}
data, err := QueryData(ctx, db, schema)
if err != nil {
return nil, errs.Wrap(err)
}
return &dbschema.Snapshot{
Version: -1,
Schema: schema,
Data: data,
}, err
}
// QueryData loads all data from tables.
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
quoted := strconv.Quote(columnName)
return `quote(` + quoted + `) as ` + quoted
})
}
// IsConstraintError checks if given error is about constraint violation.
func IsConstraintError(err error) bool {
return errs.IsFunc(err, func(err error) bool {
if e, ok := err.(sqlite3.Error); ok {
if e.Code == sqlite3.ErrConstraint {
return true
}
}
return false
})
}

View File

@ -1,224 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package sqliteutil
import (
"context"
"database/sql/driver"
"fmt"
"github.com/mattn/go-sqlite3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
)
var (
// ErrMigrateTables is error class for MigrateTables.
ErrMigrateTables = errs.Class("migrate tables")
// ErrKeepTables is error class for MigrateTables.
ErrKeepTables = errs.Class("keep tables")
)
// getSqlite3Conn attempts to get a *sqlite3.SQLiteConn from the connection.
func getSqlite3Conn(conn interface{}) (*sqlite3.SQLiteConn, error) {
for {
switch c := conn.(type) {
case *sqlite3.SQLiteConn:
return c, nil
case interface{ Unwrap() driver.Conn }:
conn = c.Unwrap()
default:
return nil, ErrMigrateTables.New("unable to get raw database connection")
}
}
}
// MigrateTablesToDatabase copies the specified tables from srcDB into destDB.
// All tables in destDB will be dropped other than those specified in
// tablesToKeep.
func MigrateTablesToDatabase(ctx context.Context, srcDB, destDB tagsql.DB, tablesToKeep ...string) error {
err := backupDBs(ctx, srcDB, destDB)
if err != nil {
return ErrMigrateTables.Wrap(err)
}
// Remove tables we don't want to keep from the cloned destination database.
return ErrMigrateTables.Wrap(KeepTables(ctx, destDB, tablesToKeep...))
}
func backupDBs(ctx context.Context, srcDB, destDB tagsql.DB) error {
// Retrieve the raw Sqlite3 driver connections for the src and dest so that
// we can execute the backup API for a corruption safe clone.
srcConn, err := srcDB.Conn(ctx)
if err != nil {
return ErrMigrateTables.Wrap(err)
}
defer func() {
err = errs.Combine(err, ErrMigrateTables.Wrap(srcConn.Close()))
}()
destConn, err := destDB.Conn(ctx)
if err != nil {
return ErrMigrateTables.Wrap(err)
}
defer func() {
err = errs.Combine(err, ErrMigrateTables.Wrap(destConn.Close()))
}()
// The references to the driver connections are only guaranteed to be valid
// for the life of the callback so we must do the work within both callbacks.
err = srcConn.Raw(ctx, func(srcDriverConn interface{}) error {
srcSqliteConn, err := getSqlite3Conn(srcDriverConn)
if err != nil {
return err
}
err = destConn.Raw(ctx, func(destDriverConn interface{}) error {
destSqliteConn, err := getSqlite3Conn(destDriverConn)
if err != nil {
return err
}
return ErrMigrateTables.Wrap(backupConns(ctx, srcSqliteConn, destSqliteConn))
})
if err != nil {
return ErrMigrateTables.Wrap(err)
}
return nil
})
return ErrMigrateTables.Wrap(err)
}
// backupConns executes the sqlite3 backup process that safely ensures that no other
// connections to the database accidentally corrupt the source or destination.
func backupConns(ctx context.Context, sourceDB *sqlite3.SQLiteConn, destDB *sqlite3.SQLiteConn) error {
// "main" represents the main (ie not "temp") database in sqlite3, which is
// the database we want to backup, and the appropriate dest in the destDB
backup, err := destDB.Backup("main", sourceDB, "main")
if err != nil {
return ErrMigrateTables.Wrap(err)
}
isDone, err := backup.Step(0)
if err != nil {
return ErrMigrateTables.Wrap(err)
}
if isDone {
return ErrMigrateTables.New("Backup is done")
}
// Check that the page count and remaining values are reasonable.
initialPageCount := backup.PageCount()
if initialPageCount <= 0 {
return ErrMigrateTables.New("initialPageCount invalid")
}
initialRemaining := backup.Remaining()
if initialRemaining <= 0 {
return ErrMigrateTables.New("initialRemaining invalid")
}
if initialRemaining != initialPageCount {
return ErrMigrateTables.New("initialRemaining != initialPageCount")
}
// Step -1 is used to copy the entire source database to the destination.
isDone, err = backup.Step(-1)
if err != nil {
return ErrMigrateTables.Wrap(err)
}
if !isDone {
return ErrMigrateTables.New("Backup not done")
}
// Check that the page count and remaining values are reasonable.
finalPageCount := backup.PageCount()
if finalPageCount != initialPageCount {
return ErrMigrateTables.New("finalPageCount != initialPageCount")
}
finalRemaining := backup.Remaining()
if finalRemaining != 0 {
return ErrMigrateTables.New("finalRemaining invalid")
}
// Finish the backup.
err = backup.Finish()
if err != nil {
return ErrMigrateTables.Wrap(err)
}
return nil
}
// KeepTables drops all the tables except the specified tables to keep.
func KeepTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err error) {
err = dropTables(ctx, db, tablesToKeep...)
if err != nil {
return ErrKeepTables.Wrap(err)
}
// VACUUM the database to reclaim the space used by the dropped tables. The
// data will not actually be reclaimed until the db has been closed.
// We don't include this in the above transaction because
// you can't VACUUM within a transaction with SQLite3.
_, err = db.ExecContext(ctx, "VACUUM;")
if err != nil {
return ErrKeepTables.Wrap(err)
}
return err
}
// dropTables performs the table drops in a single transaction.
func dropTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err error) {
err = txutil.WithTx(ctx, db, nil, func(ctx context.Context, tx tagsql.Tx) error {
// Get a list of tables excluding sqlite3 system tables.
rows, err := tx.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%';")
if err != nil {
return err
}
// Collect a list of the tables. We must do this because we can't do DDL
// statements like drop tables while a query result is open.
var tables []string
for rows.Next() {
var tableName string
err = rows.Scan(&tableName)
if err != nil {
return errs.Combine(err, rows.Close())
}
tables = append(tables, tableName)
}
err = errs.Combine(rows.Err(), rows.Close())
if err != nil {
return err
}
// Loop over the list of tables and decide which ones to keep and which to drop.
for _, tableName := range tables {
if !tableToKeep(tableName, tablesToKeep) {
// Drop tables we aren't told to keep in the destination database.
_, err = tx.ExecContext(ctx, fmt.Sprintf("DROP TABLE %s;", tableName))
if err != nil {
return err
}
}
}
return nil
})
return ErrKeepTables.Wrap(err)
}
func tableToKeep(table string, tables []string) bool {
for _, t := range tables {
if t == table {
return true
}
}
return false
}

View File

@ -1,97 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package sqliteutil_test
import (
"context"
"testing"
_ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/sqliteutil"
"storj.io/storj/private/tagsql"
)
func TestMigrateTablesToDatabase(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
srcDB := openMemDB(ctx, t)
defer ctx.Check(srcDB.Close)
destDB := openMemDB(ctx, t)
defer ctx.Check(srcDB.Close)
query := `
CREATE TABLE bobby_jones(I Int);
INSERT INTO bobby_jones VALUES (1);
`
execSQL(ctx, t, srcDB, query)
// This table should be removed after migration
execSQL(ctx, t, srcDB, "CREATE TABLE what(I Int);")
err := sqliteutil.MigrateTablesToDatabase(ctx, srcDB, destDB, "bobby_jones")
require.NoError(t, err)
destSchema, err := sqliteutil.QuerySchema(ctx, destDB)
require.NoError(t, err)
destData, err := sqliteutil.QueryData(ctx, destDB, destSchema)
require.NoError(t, err)
snapshot, err := sqliteutil.LoadSnapshotFromSQL(ctx, query)
require.NoError(t, err)
require.Equal(t, snapshot.Schema, destSchema)
require.Equal(t, snapshot.Data, destData)
}
func TestKeepTables(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
db := openMemDB(ctx, t)
defer ctx.Check(db.Close)
table1SQL := `
CREATE TABLE table_one(I int);
INSERT INTO table_one VALUES(1);
`
table2SQL := `
CREATE TABLE table_two(I int);
INSERT INTO table_two VALUES(2);
`
execSQL(ctx, t, db, table1SQL)
execSQL(ctx, t, db, table2SQL)
err := sqliteutil.KeepTables(ctx, db, "table_one")
require.NoError(t, err)
schema, err := sqliteutil.QuerySchema(ctx, db)
require.NoError(t, err)
data, err := sqliteutil.QueryData(ctx, db, schema)
require.NoError(t, err)
snapshot, err := sqliteutil.LoadSnapshotFromSQL(ctx, table1SQL)
require.NoError(t, err)
require.Equal(t, snapshot.Schema, schema)
require.Equal(t, snapshot.Data, data)
}
func execSQL(ctx context.Context, t *testing.T, db tagsql.DB, query string, args ...interface{}) {
_, err := db.ExecContext(ctx, query, args...)
require.NoError(t, err)
}
func openMemDB(ctx context.Context, t *testing.T) tagsql.DB {
db, err := tagsql.Open(ctx, "sqlite3", ":memory:")
require.NoError(t, err)
return db
}

View File

@ -1,254 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package sqliteutil
import (
"context"
"database/sql"
"regexp"
"sort"
"strings"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/dbschema"
)
type definition struct {
name string
table string
sql string
}
// QuerySchema loads the schema from sqlite database.
func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, error) {
schema := &dbschema.Schema{}
tableDefinitions := make([]*definition, 0)
indexDefinitions := make([]*definition, 0)
// find tables and indexes
err := func() error {
rows, err := db.QueryContext(ctx, `
SELECT name, tbl_name, type, sql FROM sqlite_master WHERE sql NOT NULL AND name NOT LIKE 'sqlite_%'
`)
if err != nil {
return errs.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var defName, defTblName, defType, defSQL string
err := rows.Scan(&defName, &defTblName, &defType, &defSQL)
if err != nil {
return errs.Wrap(err)
}
if defType == "table" {
tableDefinitions = append(tableDefinitions, &definition{name: defName, sql: defSQL})
} else if defType == "index" {
indexDefinitions = append(indexDefinitions, &definition{name: defName, sql: defSQL, table: defTblName})
}
}
return rows.Err()
}()
if err != nil {
return nil, err
}
err = discoverTables(ctx, db, schema, tableDefinitions)
if err != nil {
return nil, err
}
err = discoverIndexes(ctx, db, schema, indexDefinitions)
if err != nil {
return nil, err
}
schema.Sort()
return schema, nil
}
func discoverTables(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, tableDefinitions []*definition) (err error) {
for _, definition := range tableDefinitions {
if err := discoverTable(ctx, db, schema, definition); err != nil {
return err
}
}
return errs.Wrap(err)
}
func discoverTable(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, definition *definition) (err error) {
table := schema.EnsureTable(definition.name)
tableRows, err := db.QueryContext(ctx, `PRAGMA table_info(`+definition.name+`)`)
if err != nil {
return errs.Wrap(err)
}
for tableRows.Next() {
var defaultValue sql.NullString
var index, name, columnType string
var pk int
var notNull bool
err := tableRows.Scan(&index, &name, &columnType, &notNull, &defaultValue, &pk)
if err != nil {
return errs.Wrap(errs.Combine(tableRows.Err(), tableRows.Close(), err))
}
column := &dbschema.Column{
Name: name,
Type: columnType,
IsNullable: !notNull && pk == 0,
}
table.AddColumn(column)
if pk > 0 {
if table.PrimaryKey == nil {
table.PrimaryKey = make([]string, 0)
}
table.PrimaryKey = append(table.PrimaryKey, name)
}
}
err = errs.Combine(tableRows.Err(), tableRows.Close())
if err != nil {
return errs.Wrap(err)
}
matches := rxUnique.FindAllStringSubmatch(definition.sql, -1)
for _, match := range matches {
// TODO feel this can be done easier
var columns []string
for _, name := range strings.Split(match[1], ",") {
columns = append(columns, strings.TrimSpace(name))
}
table.Unique = append(table.Unique, columns)
}
keysRows, err := db.QueryContext(ctx, `PRAGMA foreign_key_list(`+definition.name+`)`)
if err != nil {
return errs.Wrap(err)
}
for keysRows.Next() {
var id, sec int
var tableName, from, to, onUpdate, onDelete, match string
err := keysRows.Scan(&id, &sec, &tableName, &from, &to, &onUpdate, &onDelete, &match)
if err != nil {
return errs.Wrap(errs.Combine(keysRows.Err(), keysRows.Close(), err))
}
column, found := table.FindColumn(from)
if found {
if onDelete == "NO ACTION" {
onDelete = ""
}
if onUpdate == "NO ACTION" {
onUpdate = ""
}
column.Reference = &dbschema.Reference{
Table: tableName,
Column: to,
OnUpdate: onUpdate,
OnDelete: onDelete,
}
}
}
err = errs.Combine(keysRows.Err(), keysRows.Close())
if err != nil {
return errs.Wrap(err)
}
return nil
}
func discoverIndexes(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema, indexDefinitions []*definition) (err error) {
// TODO improve indexes discovery
for _, definition := range indexDefinitions {
index := &dbschema.Index{
Name: definition.name,
Table: definition.table,
}
schema.Indexes = append(schema.Indexes, index)
indexRows, err := db.QueryContext(ctx, `PRAGMA index_info(`+definition.name+`)`)
if err != nil {
return errs.Wrap(err)
}
defer func() { err = errs.Combine(err, indexRows.Close()) }()
type indexInfo struct {
name *string
seqno int
cid int
}
var indexInfos []indexInfo
for indexRows.Next() {
var info indexInfo
err := indexRows.Scan(&info.seqno, &info.cid, &info.name)
if err != nil {
return errs.Wrap(err)
}
indexInfos = append(indexInfos, info)
}
sort.SliceStable(indexInfos, func(i, j int) bool {
return indexInfos[i].seqno < indexInfos[j].seqno
})
sqlDef := definition.sql
var parsedColumns []string
parseColumns := func() []string {
if parsedColumns != nil {
return parsedColumns
}
var base string
if matches := rxIndexExpr.FindStringSubmatchIndex(strings.ToUpper(sqlDef)); len(matches) > 0 {
base = sqlDef[matches[2]:matches[3]]
}
parsedColumns = strings.Split(base, ",")
return parsedColumns
}
for _, info := range indexInfos {
if info.name != nil {
index.Columns = append(index.Columns, *info.name)
continue
}
if info.cid == -1 {
index.Columns = append(index.Columns, "rowid")
} else if info.cid == -2 {
index.Columns = append(index.Columns, parseColumns()[info.seqno])
}
}
// unique
if strings.Contains(definition.sql, "CREATE UNIQUE INDEX") {
index.Unique = true
}
// partial
if matches := rxIndexPartial.FindStringSubmatch(definition.sql); len(matches) > 0 {
index.Partial = strings.TrimSpace(matches[1])
}
}
return errs.Wrap(err)
}
var (
// matches "UNIQUE (a,b)".
rxUnique = regexp.MustCompile(`UNIQUE\s*\((.*?)\)`)
// matches "ON table(expr)".
rxIndexExpr = regexp.MustCompile(`ON\s*[^(]*\((.*)\)`)
// matches "WHERE (partial expression)".
rxIndexPartial = regexp.MustCompile(`WHERE (.*)$`)
)

View File

@ -1,103 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package sqliteutil_test
import (
"testing"
_ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/sqliteutil"
"storj.io/storj/private/tagsql"
)
func TestQuery(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
db, err := tagsql.Open(ctx, "sqlite3", ":memory:")
require.NoError(t, err)
defer ctx.Check(db.Close)
emptySchema, err := sqliteutil.QuerySchema(ctx, db)
assert.NoError(t, err)
assert.Equal(t, &dbschema.Schema{}, emptySchema)
_, err = db.ExecContext(ctx, `
CREATE TABLE users (
a integer NOT NULL,
b integer NOT NULL,
c text,
UNIQUE (c),
PRIMARY KEY (a)
);
CREATE TABLE names (
users_a integer REFERENCES users( a ) ON DELETE CASCADE,
a text NOT NULL,
x text,
b text,
PRIMARY KEY (a, x),
UNIQUE ( x ),
UNIQUE ( a, b )
);
CREATE INDEX names_a ON names (a, b);
`)
require.NoError(t, err)
schema, err := sqliteutil.QuerySchema(ctx, db)
assert.NoError(t, err)
expected := &dbschema.Schema{
Tables: []*dbschema.Table{
{
Name: "users",
Columns: []*dbschema.Column{
{Name: "a", Type: "integer", IsNullable: false, Reference: nil},
{Name: "b", Type: "integer", IsNullable: false, Reference: nil},
{Name: "c", Type: "text", IsNullable: true, Reference: nil},
},
PrimaryKey: []string{"a"},
Unique: [][]string{
{"c"},
},
},
{
Name: "names",
Columns: []*dbschema.Column{
{Name: "users_a", Type: "integer", IsNullable: true,
Reference: &dbschema.Reference{
Table: "users",
Column: "a",
OnDelete: "CASCADE",
}},
{Name: "a", Type: "text", IsNullable: false, Reference: nil},
{Name: "x", Type: "text", IsNullable: false, Reference: nil}, // not null, because primary key
{Name: "b", Type: "text", IsNullable: true, Reference: nil},
},
PrimaryKey: []string{"a", "x"},
Unique: [][]string{
{"a", "b"},
{"x"},
},
},
},
Indexes: []*dbschema.Index{
{
Name: "names_a",
Table: "names",
Columns: []string{"a", "b"},
},
},
}
expected.Sort()
schema.Sort()
assert.Equal(t, expected, schema)
}

View File

@ -1,27 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package tempdb
import (
"context"
"strings"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
)
// OpenUnique opens a temporary, uniquely named database (or isolated database schema)
// for scratch work. When closed, this database or schema will be cleaned up and destroyed.
func OpenUnique(ctx context.Context, connURL string, namePrefix string) (*dbutil.TempDatabase, error) {
if strings.HasPrefix(connURL, "postgres://") || strings.HasPrefix(connURL, "postgresql://") {
return pgutil.OpenUnique(ctx, connURL, namePrefix)
}
if strings.HasPrefix(connURL, "cockroach://") {
return cockroachutil.OpenUnique(ctx, connURL, namePrefix)
}
return nil, errs.New("OpenUnique does not yet support the db type for %q", connURL)
}

View File

@ -1,30 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package dbutil
import (
"github.com/zeebo/errs"
"storj.io/storj/private/tagsql"
)
// TempDatabase is a database (or something that works like an isolated database,
// such as a PostgreSQL schema) with a semi-unique name which will be cleaned up
// when closed. Mainly useful for testing purposes.
type TempDatabase struct {
tagsql.DB
ConnStr string
Schema string
Driver string
Implementation Implementation
Cleanup func(tagsql.DB) error
}
// Close closes the database and deletes the schema.
func (db *TempDatabase) Close() error {
return errs.Combine(
db.Cleanup(db.DB),
db.DB.Close(),
)
}

View File

@ -1,76 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// Package txutil provides safe transaction-encapsulation functions which have retry
// semantics as necessary.
package txutil
import (
"context"
"database/sql"
"time"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/pgutil/pgerrcode"
"storj.io/storj/private/tagsql"
)
var mon = monkit.Package()
// WithTx starts a transaction on the given sql.DB. The transaction is started in the appropriate
// manner, and will be restarted if appropriate. While in the transaction, fn is called with a
// handle to the transaction in order to make use of it. If fn returns an error, the transaction
// is rolled back. If fn returns nil, the transaction is committed.
//
// If fn has any side effects outside of changes to the database, they must be idempotent! fn may
// be called more than one time.
func WithTx(ctx context.Context, db tagsql.DB, txOpts *sql.TxOptions, fn func(context.Context, tagsql.Tx) error) (err error) {
defer mon.Task()(&ctx)(&err)
start := time.Now()
for i := 0; ; i++ {
var retryErr error
err, rollbackErr := withTxOnce(ctx, db, txOpts, fn)
// if we had any error, check to see if we should retry.
if err != nil || rollbackErr != nil {
// we will only retry if we have enough resources (duration and count).
if dur := time.Since(start); dur < 5*time.Minute && i < 10 {
// even though the resources (duration and count) allow us to issue a retry,
// we only should if the error claims we should.
if code := pgerrcode.FromError(err); code == "CR000" || code == "40001" {
continue
}
} else {
// we aren't issuing a retry due to resources (duration and count), so
// include a retry error in the output so that we know something is wrong.
retryErr = errs.New("unable to retry: duration:%v attempts:%d", dur, i)
}
}
mon.IntVal("transaction_retries").Observe(int64(i))
return errs.Wrap(errs.Combine(err, rollbackErr, retryErr))
}
}
// withTxOnce creates a transaction, ensures that it is eventually released (commit or rollback)
// and passes it to the provided callback. It does not handle retries or anything, delegating
// that to callers.
func withTxOnce(ctx context.Context, db tagsql.DB, txOpts *sql.TxOptions, fn func(context.Context, tagsql.Tx) error) (err, rollbackErr error) {
defer mon.Task()(&ctx)(&err)
tx, err := db.BeginTx(ctx, txOpts)
if err != nil {
return errs.Wrap(err), nil
}
defer func() {
if err == nil {
err = tx.Commit()
} else {
rollbackErr = tx.Rollback()
}
}()
return fn(ctx, tx), nil
}

View File

@ -1,299 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package utccheck
import (
"context"
"database/sql/driver"
"time"
"github.com/zeebo/errs"
)
// Connector wraps a driver.Connector with utc checks.
type Connector struct {
connector driver.Connector
}
// WrapConnector wraps a driver.Connector with utc checks.
func WrapConnector(connector driver.Connector) *Connector {
return &Connector{connector: connector}
}
// Unwrap returns the underlying driver.Connector.
func (c *Connector) Unwrap() driver.Connector { return c.connector }
// Connect returns a wrapped driver.Conn with utc checks.
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
conn, err := c.connector.Connect(ctx)
if err != nil {
return nil, errs.Wrap(err)
}
return WrapConn(conn), nil
}
// Driver returns a wrapped driver.Driver with utc checks.
func (c *Connector) Driver() driver.Driver {
return WrapDriver(c.connector.Driver())
}
//
// driver
//
// Driver wraps a driver.Driver with utc checks.
type Driver struct {
driver driver.Driver
}
// WrapDriver wraps a driver.Driver with utc checks.
func WrapDriver(driver driver.Driver) *Driver {
return &Driver{driver: driver}
}
// Unwrap returns the underlying driver.Driver.
func (d *Driver) Unwrap() driver.Driver { return d.driver }
// Open returns a wrapped driver.Conn with utc checks.
func (d *Driver) Open(name string) (driver.Conn, error) {
conn, err := d.driver.Open(name)
if err != nil {
return nil, errs.Wrap(err)
}
return WrapConn(conn), nil
}
//
// conn
//
// Conn wraps a driver.Conn with utc checks.
type Conn struct {
conn driver.Conn
}
// WrapConn wraps a driver.Conn with utc checks.
func WrapConn(conn driver.Conn) *Conn {
return &Conn{conn: conn}
}
// Unwrap returns the underlying driver.Conn.
func (c *Conn) Unwrap() driver.Conn { return c.conn }
// Close closes the conn.
func (c *Conn) Close() error {
return c.conn.Close()
}
// Ping implements driver.Pinger.
func (c *Conn) Ping(ctx context.Context) error {
// sqlite3 implements this
return c.conn.(driver.Pinger).Ping(ctx)
}
// Begin returns a wrapped driver.Tx with utc checks.
func (c *Conn) Begin() (driver.Tx, error) {
//lint:ignore SA1019 deprecated is fine. this is a wrapper.
//nolint
tx, err := c.conn.Begin()
if err != nil {
return nil, errs.Wrap(err)
}
return WrapTx(tx), nil
}
// BeginTx returns a wrapped driver.Tx with utc checks.
func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
// sqlite3 implements this
tx, err := c.conn.(driver.ConnBeginTx).BeginTx(ctx, opts)
if err != nil {
return nil, errs.Wrap(err)
}
return WrapTx(tx), nil
}
// Query checks the arguments for non-utc timestamps and returns the result.
func (c *Conn) Query(query string, args []driver.Value) (driver.Rows, error) {
if err := utcCheckArgs(args); err != nil {
return nil, err
}
// sqlite3 implements this
//
//lint:ignore SA1019 deprecated is fine. this is a wrapper.
//nolint
return c.conn.(driver.Queryer).Query(query, args)
}
// QueryContext checks the arguments for non-utc timestamps and returns the result.
func (c *Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
if err := utcCheckNamedArgs(args); err != nil {
return nil, err
}
// sqlite3 implements this
return c.conn.(driver.QueryerContext).QueryContext(ctx, query, args)
}
// Exec checks the arguments for non-utc timestamps and returns the result.
func (c *Conn) Exec(query string, args []driver.Value) (driver.Result, error) {
if err := utcCheckArgs(args); err != nil {
return nil, err
}
// sqlite3 implements this
//
//lint:ignore SA1019 deprecated is fine. this is a wrapper.
//nolint
return c.conn.(driver.Execer).Exec(query, args)
}
// ExecContext checks the arguments for non-utc timestamps and returns the result.
func (c *Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
if err := utcCheckNamedArgs(args); err != nil {
return nil, err
}
// sqlite3 implements this
return c.conn.(driver.ExecerContext).ExecContext(ctx, query, args)
}
// Prepare returns a wrapped driver.Stmt with utc checks.
func (c *Conn) Prepare(query string) (driver.Stmt, error) {
stmt, err := c.conn.Prepare(query)
if err != nil {
return nil, errs.Wrap(err)
}
return WrapStmt(stmt), nil
}
// PrepareContext checks the arguments for non-utc timestamps and returns the result.
func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
// sqlite3 implements this
stmt, err := c.conn.(driver.ConnPrepareContext).PrepareContext(ctx, query)
if err != nil {
return nil, errs.Wrap(err)
}
return WrapStmt(stmt), nil
}
//
// stmt
//
// Stmt wraps a driver.Stmt with utc checks.
type Stmt struct {
stmt driver.Stmt
}
// WrapStmt wraps a driver.Stmt with utc checks.
func WrapStmt(stmt driver.Stmt) *Stmt {
return &Stmt{stmt: stmt}
}
// Unwrap returns the underlying driver.Stmt.
func (s *Stmt) Unwrap() driver.Stmt { return s.stmt }
// Close closes the stmt.
func (s *Stmt) Close() error {
return s.stmt.Close()
}
// NumInput returns the number of inputs to the stmt.
func (s *Stmt) NumInput() int {
return s.stmt.NumInput()
}
// Exec checks the arguments for non-utc timestamps and returns the result.
func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) {
if err := utcCheckArgs(args); err != nil {
return nil, errs.Wrap(err)
}
//lint:ignore SA1019 deprecated is fine. this is a wrapper.
//nolint
return s.stmt.Exec(args)
}
// Query checks the arguments for non-utc timestamps and returns the result.
func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) {
if err := utcCheckArgs(args); err != nil {
return nil, errs.Wrap(err)
}
//lint:ignore SA1019 deprecated is fine. this is a wrapper.
//nolint
return s.stmt.Query(args)
}
//
// tx
//
// Tx wraps a driver.Tx with utc checks.
type Tx struct {
tx driver.Tx
}
// WrapTx wraps a driver.Tx with utc checks.
func WrapTx(tx driver.Tx) *Tx {
return &Tx{tx: tx}
}
// Unwrap returns the underlying driver.Tx.
func (t *Tx) Unwrap() driver.Tx { return t.tx }
// Commit commits the tx.
func (t *Tx) Commit() error {
return t.tx.Commit()
}
// Rollback rolls the tx back.
func (t *Tx) Rollback() error {
return t.tx.Rollback()
}
//
// helpers
//
func utcCheckArg(n int, arg interface{}) error {
var t time.Time
var ok bool
switch a := arg.(type) {
case time.Time:
t, ok = a, true
case *time.Time:
if a != nil {
t, ok = *a, true
}
}
if !ok {
return nil
} else if loc := t.Location(); loc != time.UTC {
return errs.New("invalid timezone on argument %d: %v", n, loc)
} else {
return nil
}
}
func utcCheckNamedArgs(args []driver.NamedValue) error {
for n, arg := range args {
if err := utcCheckArg(n, arg.Value); err != nil {
return err
}
}
return nil
}
func utcCheckArgs(args []driver.Value) error {
for n, arg := range args {
if err := utcCheckArg(n, arg); err != nil {
return err
}
}
return nil
}

View File

@ -1,95 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package utccheck_test
import (
"context"
"database/sql"
"database/sql/driver"
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/storj/private/dbutil/utccheck"
)
func TestUTCDB(t *testing.T) {
notUTC := time.FixedZone("not utc", -1)
db := sql.OpenDB(utccheck.WrapConnector(emptyConnector{}))
{ // time.Time not in UTC
_, err := db.Exec("", time.Now().In(notUTC))
require.Error(t, err)
}
{ // *time.Time not in UTC
now := time.Now().In(notUTC)
_, err := db.Exec("", &now)
require.Error(t, err)
}
{ // time.Time in UTC
_, err := db.Exec("", time.Now().UTC())
require.NoError(t, err)
}
{ // *time.Time in UTC
now := time.Now().UTC()
_, err := db.Exec("", &now)
require.NoError(t, err)
}
{ // nil *time.Time
_, err := db.Exec("", (*time.Time)(nil))
require.NoError(t, err)
}
}
//
// empty driver
//
type emptyConnector struct{}
func (emptyConnector) Connect(context.Context) (driver.Conn, error) { return emptyConn{}, nil }
func (emptyConnector) Driver() driver.Driver { return nil }
type emptyConn struct{}
func (emptyConn) Close() error { return nil }
func (emptyConn) Prepare(query string) (driver.Stmt, error) { return emptyStmt{}, nil }
func (emptyConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
return emptyStmt{}, nil
}
func (emptyConn) Begin() (driver.Tx, error) { return emptyTx{}, nil }
func (emptyConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
return emptyTx{}, nil
}
func (emptyConn) Query(query string, args []driver.Value) (driver.Rows, error) { return nil, nil }
func (emptyConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
return nil, nil
}
func (emptyConn) Exec(query string, args []driver.Value) (driver.Result, error) { return nil, nil }
func (emptyConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
return nil, nil
}
type emptyTx struct{}
func (emptyTx) Commit() error { return nil }
func (emptyTx) Rollback() error { return nil }
type emptyStmt struct{}
func (emptyStmt) Close() error { return nil }
func (emptyStmt) Exec(args []driver.Value) (driver.Result, error) { return nil, nil }
func (emptyStmt) Query(args []driver.Value) (driver.Rows, error) { return nil, nil }
// must be 1 so that we can pass 1 argument.
func (emptyStmt) NumInput() int { return 1 }

View File

@ -10,8 +10,8 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
)
// Error is the default migrate errs class.

View File

@ -13,10 +13,10 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/tempdb"
"storj.io/private/tagsql"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
func TestCreate_Sqlite(t *testing.T) {

View File

@ -4,7 +4,7 @@
package migrate
import (
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// DBX contains additional methods for migrations.

View File

@ -15,8 +15,8 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
)
var (

View File

@ -18,10 +18,10 @@ import (
"go.uber.org/zap"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/tempdb"
"storj.io/private/tagsql"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
func TestBasicMigrationSqliteNoRebind(t *testing.T) {

View File

@ -1,104 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/tagsql"
)
func TestDetect(t *testing.T) {
run(t, func(parentctx *testcontext.Context, t *testing.T, db tagsql.DB, support tagsql.ContextSupport) {
_, err := db.ExecContext(parentctx, "CREATE TABLE example (num INT)")
require.NoError(t, err)
_, err = db.ExecContext(parentctx, "INSERT INTO example (num) values (1)")
require.NoError(t, err)
ctx, cancel := context.WithCancel(parentctx)
cancel()
var verify func(t require.TestingT, err error, msgAndArgs ...interface{})
if support.Basic() {
verify = require.Error
} else {
verify = require.NoError
}
err = db.PingContext(ctx)
verify(t, err)
_, err = db.ExecContext(ctx, "INSERT INTO example (num) values (1)")
verify(t, err)
row := db.QueryRowContext(ctx, "select num from example")
var value int64
err = row.Scan(&value)
verify(t, err)
var rows tagsql.Rows
rows, err = db.QueryContext(ctx, "select num from example")
verify(t, err)
if rows != nil {
require.NoError(t, rows.Err())
require.NoError(t, rows.Close())
}
if support.Transactions() {
var tx tagsql.Tx
tx, err = db.Begin(ctx)
require.Error(t, err)
if tx != nil {
require.NoError(t, tx.Rollback())
}
tx, err = db.BeginTx(ctx, nil)
require.Error(t, err)
if tx != nil {
require.NoError(t, tx.Rollback())
}
}
var verifyTx func(t require.TestingT, err error, msgAndArgs ...interface{})
if support.Transactions() {
verifyTx = require.Error
} else {
verifyTx = require.NoError
}
for _, alt := range []bool{false, true} {
t.Log("Transactions", alt)
var tx tagsql.Tx
if alt {
tx, err = db.Begin(parentctx)
} else {
tx, err = db.BeginTx(parentctx, nil)
}
require.NoError(t, err)
_, err = tx.ExecContext(ctx, "INSERT INTO example (num) values (1)")
verifyTx(t, err)
var rows tagsql.Rows
rows, err = tx.QueryContext(ctx, "select num from example")
verifyTx(t, err)
if rows != nil {
require.NoError(t, rows.Err())
require.NoError(t, rows.Close())
}
row := tx.QueryRowContext(ctx, "select num from example")
var value int64
// lib/pq seems to stall here for some reason?
err = row.Scan(&value)
verifyTx(t, err)
require.NoError(t, tx.Commit())
}
})
}

View File

@ -1,122 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql
import (
"context"
"database/sql"
"errors"
"github.com/zeebo/errs"
"storj.io/common/context2"
"storj.io/private/traces"
)
// Conn is an interface for *sql.Conn-like connections.
type Conn interface {
BeginTx(ctx context.Context, txOptions *sql.TxOptions) (Tx, error)
Close() error
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
PingContext(ctx context.Context) error
PrepareContext(ctx context.Context, query string) (Stmt, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
Raw(ctx context.Context, f func(driverConn interface{}) error) (err error)
}
// ConnWithoutTxContext wraps *sql.Conn.
func ConnWithoutTxContext(conn *sql.Conn) Conn {
return &sqlConn{conn: conn, useContext: true, useTxContext: false}
}
// TODO:
// Is there a way to call non-context versions on *sql.Conn?
// The pessimistic and safer assumption is that using any context may break
// lib/pq internally. It might be fine, however it's unclear, how fine it is.
// sqlConn implements Conn, which optionally disables contexts.
type sqlConn struct {
conn *sql.Conn
useContext bool
useTxContext bool
tracker *tracker
}
func (s *sqlConn) BeginTx(ctx context.Context, txOptions *sql.TxOptions) (Tx, error) {
if txOptions != nil {
return nil, errors.New("txOptions not supported")
}
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
tx, err := s.conn.BeginTx(ctx, nil)
if err != nil {
return nil, err
}
return &sqlTx{
tx: tx,
useContext: s.useContext && s.useTxContext,
tracker: s.tracker.child(1),
}, nil
}
func (s *sqlConn) Close() error {
return errs.Combine(s.tracker.close(), s.conn.Close())
}
func (s *sqlConn) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
return s.conn.ExecContext(ctx, query, args...)
}
func (s *sqlConn) PingContext(ctx context.Context) error {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
return s.conn.PingContext(ctx)
}
func (s *sqlConn) PrepareContext(ctx context.Context, query string) (Stmt, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
stmt, err := s.conn.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
return &sqlStmt{
stmt: stmt,
useContext: s.useContext,
tracker: s.tracker.child(1),
}, nil
}
func (s *sqlConn) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
return s.tracker.wrapRows(s.conn.QueryContext(ctx, query, args...))
}
func (s *sqlConn) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
ctx = context2.WithoutCancellation(ctx)
}
return s.conn.QueryRowContext(ctx, query, args...)
}
func (s *sqlConn) Raw(ctx context.Context, f func(driverConn interface{}) error) (err error) {
traces.Tag(ctx, traces.TagDB)
return s.conn.Raw(f)
}

View File

@ -1,287 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// Package tagsql implements a tagged wrapper for databases.
//
// This package also handles hides context cancellation from database drivers
// that don't support it.
package tagsql
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"runtime/pprof"
"time"
"github.com/zeebo/errs"
"storj.io/common/context2"
"storj.io/private/traces"
)
// Open opens *sql.DB and wraps the implementation with tagging.
func Open(ctx context.Context, driverName, dataSourceName string) (DB, error) {
var sdb *sql.DB
var err error
pprof.Do(ctx, pprof.Labels("db", driverName), func(ctx context.Context) {
sdb, err = sql.Open(driverName, dataSourceName)
})
if err != nil {
return nil, err
}
err = sdb.PingContext(ctx)
if err != nil {
return nil, err
}
return Wrap(sdb), nil
}
// Wrap turns a *sql.DB into a DB-matching interface.
func Wrap(db *sql.DB) DB {
support, err := DetectContextSupport(db)
if err != nil {
// When we reach here it is definitely a programmer error.
// Add any new database drivers into DetectContextSupport
panic(err)
}
return &sqlDB{
db: db,
useContext: support.Basic(),
useTxContext: support.Transactions(),
tracker: rootTracker(1),
}
}
// WithoutContext turns a *sql.DB into a DB-matching that redirects context calls to regular calls.
func WithoutContext(db *sql.DB) DB {
return &sqlDB{
db: db,
useContext: false,
useTxContext: false,
tracker: rootTracker(1),
}
}
// AllowContext turns a *sql.DB into a DB which uses context calls.
func AllowContext(db *sql.DB) DB {
return &sqlDB{
db: db,
useContext: true,
useTxContext: true,
tracker: rootTracker(1),
}
}
// DB implements a wrapper for *sql.DB-like database.
//
// The wrapper adds tracing to all calls.
// It also adds context handling compatibility for different databases.
type DB interface {
// To be deprecated, the following take ctx as argument,
// however do not pass it forward to the underlying database.
Begin(ctx context.Context) (Tx, error)
Driver() driver.Driver
Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
Ping(ctx context.Context) error
Prepare(ctx context.Context, query string) (Stmt, error)
Query(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row
BeginTx(ctx context.Context, txOptions *sql.TxOptions) (Tx, error)
Conn(ctx context.Context) (Conn, error)
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
PingContext(ctx context.Context) error
PrepareContext(ctx context.Context, query string) (Stmt, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
Close() error
SetConnMaxLifetime(d time.Duration)
SetMaxIdleConns(n int)
SetMaxOpenConns(n int)
Stats() sql.DBStats
Internal() *sql.DB
}
// sqlDB implements DB, which optionally disables contexts.
type sqlDB struct {
db *sql.DB
useContext bool
useTxContext bool
tracker *tracker
}
func (s *sqlDB) Internal() *sql.DB { return s.db }
func (s *sqlDB) Begin(ctx context.Context) (Tx, error) {
traces.Tag(ctx, traces.TagDB)
tx, err := s.db.Begin()
if err != nil {
return nil, err
}
return &sqlTx{
tx: tx,
useContext: s.useContext && s.useTxContext,
tracker: s.tracker.child(1),
}, err
}
func (s *sqlDB) BeginTx(ctx context.Context, txOptions *sql.TxOptions) (Tx, error) {
if txOptions != nil {
return nil, errors.New("txOptions not supported")
}
traces.Tag(ctx, traces.TagDB)
var tx *sql.Tx
var err error
if !s.useContext {
tx, err = s.db.Begin()
} else {
tx, err = s.db.BeginTx(ctx, nil)
}
if err != nil {
return nil, err
}
return &sqlTx{
tx: tx,
useContext: s.useContext && s.useTxContext,
tracker: s.tracker.child(1),
}, err
}
func (s *sqlDB) Close() error {
return errs.Combine(s.tracker.close(), s.db.Close())
}
func (s *sqlDB) Conn(ctx context.Context) (Conn, error) {
traces.Tag(ctx, traces.TagDB)
var conn *sql.Conn
var err error
if !s.useContext {
// Uses WithoutCancellation, because there isn't an underlying call that doesn't take a context.
conn, err = s.db.Conn(context2.WithoutCancellation(ctx))
} else {
conn, err = s.db.Conn(ctx)
}
if err != nil {
return nil, err
}
return &sqlConn{
conn: conn,
useContext: s.useContext,
useTxContext: s.useTxContext,
tracker: s.tracker.child(1),
}, nil
}
func (s *sqlDB) Driver() driver.Driver {
return s.db.Driver()
}
func (s *sqlDB) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
return s.db.Exec(query, args...)
}
func (s *sqlDB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.db.Exec(query, args...)
}
return s.db.ExecContext(ctx, query, args...)
}
func (s *sqlDB) Ping(ctx context.Context) error {
traces.Tag(ctx, traces.TagDB)
return s.db.Ping()
}
func (s *sqlDB) PingContext(ctx context.Context) error {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.db.Ping()
}
return s.db.PingContext(ctx)
}
func (s *sqlDB) Prepare(ctx context.Context, query string) (Stmt, error) {
traces.Tag(ctx, traces.TagDB)
stmt, err := s.db.Prepare(query)
if err != nil {
return nil, err
}
return &sqlStmt{stmt: stmt, useContext: s.useContext}, nil
}
func (s *sqlDB) PrepareContext(ctx context.Context, query string) (Stmt, error) {
traces.Tag(ctx, traces.TagDB)
var stmt *sql.Stmt
var err error
if !s.useContext {
stmt, err = s.db.Prepare(query)
if err != nil {
return nil, err
}
} else {
stmt, err = s.db.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
}
return &sqlStmt{
stmt: stmt,
useContext: s.useContext,
tracker: s.tracker.child(1),
}, nil
}
func (s *sqlDB) Query(ctx context.Context, query string, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
return s.tracker.wrapRows(s.db.Query(query, args...))
}
func (s *sqlDB) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.tracker.wrapRows(s.db.Query(query, args...))
}
return s.tracker.wrapRows(s.db.QueryContext(ctx, query, args...))
}
func (s *sqlDB) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
return s.db.QueryRow(query, args...)
}
func (s *sqlDB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.db.QueryRow(query, args...)
}
return s.db.QueryRowContext(ctx, query, args...)
}
func (s *sqlDB) SetConnMaxLifetime(d time.Duration) {
s.db.SetConnMaxLifetime(d)
}
func (s *sqlDB) SetMaxIdleConns(n int) {
s.db.SetMaxIdleConns(n)
}
func (s *sqlDB) SetMaxOpenConns(n int) {
s.db.SetMaxOpenConns(n)
}
func (s *sqlDB) Stats() sql.DBStats {
return s.db.Stats()
}

View File

@ -1,67 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql_test
import (
"testing"
_ "github.com/jackc/pgx/v4/stdlib"
_ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
)
func run(t *testing.T, fn func(*testcontext.Context, *testing.T, tagsql.DB, tagsql.ContextSupport)) {
t.Helper()
t.Run("mattn-sqlite3", func(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
db, err := tagsql.Open(ctx, "sqlite3", ":memory:")
if err != nil {
t.Fatal(err)
}
defer ctx.Check(db.Close)
fn(ctx, t, db, tagsql.SupportBasic)
})
t.Run("jackc-pgx-postgres", func(t *testing.T) {
connstr := pgtest.PickPostgres(t)
ctx := testcontext.New(t)
defer ctx.Cleanup()
db, err := pgutil.OpenUnique(ctx, connstr, "detect")
require.NoError(t, err)
defer ctx.Check(db.Close)
db.SetMaxOpenConns(100)
db.SetMaxIdleConns(100)
fn(ctx, t, db.DB, tagsql.SupportNone)
})
t.Run("jackc-pgx-cockroach", func(t *testing.T) {
connstr := pgtest.PickCockroach(t)
ctx := testcontext.New(t)
defer ctx.Cleanup()
db, err := cockroachutil.OpenUnique(ctx, connstr, "detect")
require.NoError(t, err)
defer ctx.Check(db.Close)
db.SetMaxOpenConns(100)
db.SetMaxIdleConns(100)
fn(ctx, t, db.DB, tagsql.SupportNone)
})
}

View File

@ -1,73 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql
import (
"database/sql"
"reflect"
"github.com/zeebo/errs"
)
// Currently lib/pq has known issues with contexts in general.
// For lib/pq context methods will be completely disabled.
//
// A few issues:
// https://github.com/lib/pq/issues/874
// https://github.com/lib/pq/issues/908
// https://github.com/lib/pq/issues/731
//
// mattn/go-sqlite3 seems to work with contexts on the most part,
// except in transactions. For them, we need to disable.
// https://github.com/mattn/go-sqlite3/issues/769
//
// So far, we believe that github.com/jackc/pgx supports contexts
// and cancellations properly.
// ContextSupport returns the level of context support a driver has.
type ContextSupport byte
// Constants for defining context level support.
const (
SupportBasic ContextSupport = 1 << 0
SupportTransactions ContextSupport = 1 << 1
SupportNone ContextSupport = 0
SupportAll ContextSupport = SupportBasic | SupportTransactions
)
// Basic returns true when driver supports basic contexts.
func (v ContextSupport) Basic() bool {
return v&SupportBasic == SupportBasic
}
// Transactions returns true when driver supports contexts inside transactions.
func (v ContextSupport) Transactions() bool {
return v&SupportTransactions == SupportTransactions
}
// DetectContextSupport detects *sql.DB driver without importing the specific packages.
func DetectContextSupport(db *sql.DB) (ContextSupport, error) {
// We're using reflect so we don't have to import these packages
// into the binary.
typ := reflect.TypeOf(db.Driver())
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
switch {
case typ.PkgPath() == "github.com/mattn/go-sqlite3" && typ.Name() == "SQLiteDriver" ||
// wrapper for sqlite
typ.PkgPath() == "storj.io/storj/private/dbutil/utccheck" && typ.Name() == "Driver":
return SupportBasic, nil
case typ.PkgPath() == "github.com/lib/pq" && typ.Name() == "Driver" ||
// internally uses lib/pq
typ.PkgPath() == "storj.io/storj/private/dbutil/cockroachutil" && typ.Name() == "Driver":
return SupportNone, nil
case typ.PkgPath() == "github.com/jackc/pgx/v4/stdlib" && typ.Name() == "Driver":
return SupportTransactions, nil
default:
return SupportNone, errs.New("sql driver %q %q unsupported", typ.PkgPath(), typ.Name())
}
}

View File

@ -1,84 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql
import (
"database/sql"
"errors"
"fmt"
"runtime/debug"
"strings"
"github.com/zeebo/errs"
)
// Rows implements a wrapper for *sql.Rows.
type Rows interface {
Close() error
ColumnTypes() ([]*sql.ColumnType, error)
Columns() ([]string, error)
Err() error
Next() bool
NextResultSet() bool
Scan(dest ...interface{}) error
}
func (t *tracker) wrapRows(rows *sql.Rows, err error) (Rows, error) {
if rows == nil || err != nil {
return nil, err
}
return &sqlRows{
rows: rows,
tracker: t.child(2),
}, err
}
type sqlRows struct {
rows *sql.Rows
tracker *tracker
errcalled bool
}
func (s *sqlRows) Close() error {
var errCalling error
if !s.errcalled {
var x strings.Builder
fmt.Fprintf(&x, "--- rows.Err() was not called, for rows started at ---\n")
fmt.Fprintf(&x, "%s", s.tracker.formatStack())
fmt.Fprintf(&x, "--- Closing the rows at ---\n")
fmt.Fprintf(&x, "%s", string(debug.Stack()))
errCalling = errors.New(x.String())
}
return errs.Combine(errCalling, s.tracker.close(), s.rows.Close())
}
func (s *sqlRows) ColumnTypes() ([]*sql.ColumnType, error) {
return s.rows.ColumnTypes()
}
func (s *sqlRows) Columns() ([]string, error) {
return s.rows.Columns()
}
func (s *sqlRows) Err() error {
s.errcalled = true
return s.rows.Err()
}
func (s *sqlRows) Next() bool {
s.errcalled = false
return s.rows.Next()
}
func (s *sqlRows) NextResultSet() bool {
return s.rows.NextResultSet()
}
func (s *sqlRows) Scan(dest ...interface{}) error {
err := s.rows.Scan(dest...)
if err != nil {
s.errcalled = true
}
return err
}

View File

@ -1,82 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql
import (
"context"
"database/sql"
"github.com/zeebo/errs"
"storj.io/private/traces"
)
// Stmt is an interface for *sql.Stmt.
type Stmt interface {
// Exec and other methods take a context for tracing
// purposes, but do not pass the context to the underlying database query.
Exec(ctx context.Context, args ...interface{}) (sql.Result, error)
Query(ctx context.Context, args ...interface{}) (Rows, error)
QueryRow(ctx context.Context, args ...interface{}) *sql.Row
// ExecContext and other Context methods take a context for tracing and also
// pass the context to the underlying database, if this tagsql instance is
// configured to do so. (By default, lib/pq does not ever, and
// mattn/go-sqlite3 does not for transactions).
ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, args ...interface{}) (Rows, error)
QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row
Close() error
}
// sqlStmt implements Stmt, which optionally disables contexts.
type sqlStmt struct {
stmt *sql.Stmt
useContext bool
tracker *tracker
}
func (s *sqlStmt) Close() error {
return errs.Combine(s.tracker.close(), s.stmt.Close())
}
func (s *sqlStmt) Exec(ctx context.Context, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
return s.stmt.Exec(args...)
}
func (s *sqlStmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.stmt.Exec(args...)
}
return s.stmt.ExecContext(ctx, args...)
}
func (s *sqlStmt) Query(ctx context.Context, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
return s.tracker.wrapRows(s.stmt.Query(args...))
}
func (s *sqlStmt) QueryContext(ctx context.Context, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.tracker.wrapRows(s.stmt.Query(args...))
}
return s.tracker.wrapRows(s.stmt.QueryContext(ctx, args...))
}
func (s *sqlStmt) QueryRow(ctx context.Context, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
return s.stmt.QueryRow(args...)
}
func (s *sqlStmt) QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.stmt.QueryRow(args...)
}
return s.stmt.QueryRowContext(ctx, args...)
}

View File

@ -1,14 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// +build !race
package tagsql
type tracker struct{}
func rootTracker(skip int) *tracker { return nil }
func (t *tracker) child(skip int) *tracker { return nil }
func (t *tracker) close() error { return nil }
func (t *tracker) formatStack() string { return "<no start stack for !race>" }

View File

@ -1,104 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// +build race
package tagsql
import (
"errors"
"fmt"
"runtime"
"strings"
"sync"
)
type tracker struct {
parent *tracker
callers frames
mu sync.Mutex
open map[*tracker]struct{}
}
type frames [5]uintptr
func callers(skipCallers int) frames {
var fs frames
runtime.Callers(skipCallers+1, fs[:])
return fs
}
func rootTracker(skipCallers int) *tracker {
return &tracker{
callers: callers(skipCallers + 1),
open: map[*tracker]struct{}{},
}
}
func (t *tracker) child(skipCallers int) *tracker {
c := rootTracker(skipCallers + 1)
c.parent = t
t.add(c)
return c
}
func (t *tracker) add(r *tracker) {
t.mu.Lock()
defer t.mu.Unlock()
t.open[r] = struct{}{}
}
func (t *tracker) del(r *tracker) {
t.mu.Lock()
defer t.mu.Unlock()
delete(t.open, r)
}
func (t *tracker) close() error {
var err error
if len(t.open) != 0 {
var s strings.Builder
fmt.Fprintf(&s, "--- Database created at ---\n")
fmt.Fprintf(&s, "%s", t.callers.String())
unique := map[frames]int{}
for r := range t.open {
unique[r.callers]++
}
for r, count := range unique {
fmt.Fprintf(&s, "--- Unclosed resource opened from (count=%d) ---\n", count)
fmt.Fprintf(&s, "%s", r.String())
}
fmt.Fprintf(&s, "--- Closing the parent of unclosed resources ---\n")
closingFrames := callers(2)
fmt.Fprintf(&s, "%s", closingFrames.String())
err = errors.New(s.String())
}
if t.parent != nil {
t.parent.del(t)
}
return err
}
func (t *tracker) formatStack() string {
return t.callers.String()
}
func (fs *frames) String() string {
var s strings.Builder
frames := runtime.CallersFrames((*fs)[:])
for {
frame, more := frames.Next()
if strings.Contains(frame.File, "runtime/") {
break
}
fmt.Fprintf(&s, "%s\n", frame.Function)
fmt.Fprintf(&s, "\t%s:%d\n", frame.File, frame.Line)
if !more {
break
}
}
return s.String()
}

View File

@ -1,124 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package tagsql
import (
"context"
"database/sql"
"github.com/zeebo/errs"
"storj.io/private/traces"
)
// Tx is an interface for *sql.Tx-like transactions.
type Tx interface {
// Exec and other methods take a context for tracing
// purposes, but do not pass the context to the underlying database query
Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
Prepare(ctx context.Context, query string) (Stmt, error)
Query(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row
// ExecContext and other Context methods take a context for tracing and also
// pass the context to the underlying database, if this tagsql instance is
// configured to do so. (By default, lib/pq does not ever, and
// mattn/go-sqlite3 does not for transactions).
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
PrepareContext(ctx context.Context, query string) (Stmt, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
Commit() error
Rollback() error
}
// sqlTx implements Tx, which optionally disables contexts.
type sqlTx struct {
tx *sql.Tx
useContext bool
tracker *tracker
}
func (s *sqlTx) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
return s.tx.Exec(query, args...)
}
func (s *sqlTx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.tx.Exec(query, args...)
}
return s.tx.ExecContext(ctx, query, args...)
}
func (s *sqlTx) Prepare(ctx context.Context, query string) (Stmt, error) {
traces.Tag(ctx, traces.TagDB)
stmt, err := s.tx.Prepare(query)
if err != nil {
return nil, err
}
return &sqlStmt{
stmt: stmt,
useContext: s.useContext,
tracker: s.tracker.child(1),
}, nil
}
func (s *sqlTx) PrepareContext(ctx context.Context, query string) (Stmt, error) {
traces.Tag(ctx, traces.TagDB)
var stmt *sql.Stmt
var err error
if !s.useContext {
stmt, err = s.tx.Prepare(query)
if err != nil {
return nil, err
}
} else {
stmt, err = s.tx.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
}
return &sqlStmt{
stmt: stmt,
useContext: s.useContext,
tracker: s.tracker.child(1),
}, err
}
func (s *sqlTx) Query(ctx context.Context, query string, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
return s.tracker.wrapRows(s.tx.Query(query, args...))
}
func (s *sqlTx) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.tracker.wrapRows(s.tx.Query(query, args...))
}
return s.tracker.wrapRows(s.tx.QueryContext(ctx, query, args...))
}
func (s *sqlTx) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
return s.tx.QueryRow(query, args...)
}
func (s *sqlTx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
traces.Tag(ctx, traces.TagDB)
if !s.useContext {
return s.tx.QueryRow(query, args...)
}
return s.tx.QueryRowContext(ctx, query, args...)
}
func (s *sqlTx) Commit() error {
return errs.Combine(s.tracker.close(), s.tx.Commit())
}
func (s *sqlTx) Rollback() error {
return errs.Combine(s.tracker.close(), s.tx.Rollback())
}

View File

@ -24,7 +24,7 @@ import (
"storj.io/common/identity/testidentity"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/storj/versioncontrol"

View File

@ -10,7 +10,7 @@ import (
"testing"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/private/dbutil/pgtest"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/uplink"
)

View File

@ -9,8 +9,8 @@ import (
"github.com/zeebo/errs"
"storj.io/common/storj"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
)
// NodeAlias is a metabase local alias for NodeID-s to reduce segment table size.

View File

@ -13,9 +13,9 @@ import (
"github.com/zeebo/errs"
"storj.io/common/storj"
"storj.io/storj/private/dbutil/pgutil/pgerrcode"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgutil/pgerrcode"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
)
// we need to disable PlainSize validation for old uplinks.

View File

@ -13,9 +13,9 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgutil"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
)
// CommitObjectWithSegments contains arguments necessary for committing an object.

View File

@ -18,10 +18,10 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
var (

View File

@ -13,7 +13,7 @@ import (
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
_ "storj.io/storj/private/dbutil/cockroachutil" // register cockroach driver
_ "storj.io/private/dbutil/cockroachutil" // register cockroach driver
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)

View File

@ -12,9 +12,9 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
)
// DeleteObjectExactVersion contains arguments necessary for deleting an exact version of object.

View File

@ -9,8 +9,8 @@ import (
"errors"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/tagsql"
)
const deleteBatchSizeLimit = 100

View File

@ -14,8 +14,8 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/tagsql"
)
const (

View File

@ -11,7 +11,7 @@ import (
"github.com/zeebo/errs"
"storj.io/common/uuid"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// objectIterator enables iteration on objects in a bucket.

View File

@ -10,7 +10,7 @@ import (
"time"
"storj.io/common/uuid"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// ListSegments contains arguments necessary for listing stream segments.

View File

@ -14,9 +14,9 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
)
const loopIteratorBatchSizeLimit = 2500

View File

@ -10,7 +10,7 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// GetStreamPieceCountByNodeID contains arguments for GetStreamPieceCountByNodeID.

View File

@ -6,7 +6,7 @@ package metabase
import (
"github.com/zeebo/errs"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
func withRows(rows tagsql.Rows, err error) func(func(tagsql.Rows) error) error {

View File

@ -16,7 +16,7 @@ import (
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/private/dbutil"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/metaloop"
"storj.io/storj/satellite/metainfo/piecedeletion"

View File

@ -14,8 +14,8 @@ import (
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/tempdb"
"storj.io/storj/satellite"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb"

View File

@ -15,7 +15,7 @@ import (
"storj.io/common/memory"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/payments"
"storj.io/storj/satellite/payments/coinpayments"

View File

@ -11,11 +11,11 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/storj/pkg/cache"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/attribution"

View File

@ -5,5 +5,5 @@ package dbx
import (
// make sure we load our cockroach driver so dbx.Open can find it.
_ "storj.io/storj/private/dbutil/cockroachutil"
_ "storj.io/private/dbutil/cockroachutil"
)

View File

@ -10,9 +10,9 @@ import (
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
)
//go:generate sh gen.sh

View File

@ -6,7 +6,7 @@ dbx golang -d pgx -d pgxcockroach -p dbx -t templates satellitedb.dbx .
gofmt -r "*sql.Tx -> tagsql.Tx" -w satellitedb.dbx.go
gofmt -r "*sql.Rows -> tagsql.Rows" -w satellitedb.dbx.go
perl -0777 -pi \
-e 's,\t_ "github.com/jackc/pgx/v4/stdlib"\n\),\t_ "github.com/jackc/pgx/v4/stdlib"\n\n\t"storj.io/storj/private/tagsql"\n\),' \
-e 's,\t_ "github.com/jackc/pgx/v4/stdlib"\n\),\t_ "github.com/jackc/pgx/v4/stdlib"\n\n\t"storj.io/private/tagsql"\n\),' \
satellitedb.dbx.go
perl -0777 -pi \
-e 's/type DB struct \{\n\t\*sql\.DB/type DB struct \{\n\ttagsql.DB/' \

View File

@ -20,7 +20,7 @@ import (
"github.com/jackc/pgconn"
_ "github.com/jackc/pgx/v4/stdlib"
"storj.io/storj/private/tagsql"
"storj.io/private/tagsql"
)
// Prevent conditional imports from causing build failures.

View File

@ -14,8 +14,8 @@ import (
"github.com/zeebo/errs"
"storj.io/common/storj"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/satellitedb/dbx"

View File

@ -10,11 +10,11 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
//go:generate go run migrate_gen.go

View File

@ -16,7 +16,7 @@ import (
"strconv"
"strings"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/private/dbutil/dbschema"
)
func main() {

View File

@ -23,10 +23,10 @@ import (
"golang.org/x/sync/errgroup"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/private/dbutil/dbschema"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/pgutil"
"storj.io/private/dbutil/tempdb"
"storj.io/storj/private/migrate"
"storj.io/storj/satellite/satellitedb"
"storj.io/storj/satellite/satellitedb/dbx"

View File

@ -14,8 +14,8 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/private/dbutil/pgutil"
"storj.io/private/version"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/satellite/overlay"
)

View File

@ -16,7 +16,7 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/satellitedb/dbx"
)

View File

@ -19,10 +19,10 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/private/version"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/dbx"
)

View File

@ -15,8 +15,8 @@ import (
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/orders"

View File

@ -11,7 +11,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil"
"storj.io/private/dbutil"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb/dbx"
"storj.io/storj/storage"

View File

@ -17,10 +17,10 @@ import (
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/tempdb"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgtest"
"storj.io/private/dbutil/pgutil"
"storj.io/private/dbutil/tempdb"
"storj.io/storj/satellite"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/satellitedb"

View File

@ -11,9 +11,9 @@ import (
"github.com/zeebo/errs"
"storj.io/common/storj"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/private/dbutil"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/pgutil"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/compensation"
"storj.io/storj/satellite/satellitedb/dbx"

View File

@ -13,11 +13,11 @@ import (
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/dbutil/txutil"
"storj.io/private/tagsql"
"storj.io/storj/storage"
"storj.io/storj/storage/cockroachkv/schema"
)

View File

@ -9,8 +9,8 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/dbutil/pgtest"
"storj.io/storj/storage/testsuite"
)

View File

@ -12,8 +12,8 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/cockroachutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/cockroachutil"
"storj.io/private/tagsql"
"storj.io/storj/storage"
)

View File

@ -9,8 +9,8 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
)
// PrepareDB creates the pathdata tables if they don't already exist.

View File

@ -12,9 +12,9 @@ import (
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil"
"storj.io/private/dbutil/pgutil"
"storj.io/private/tagsql"
"storj.io/storj/storage"
"storj.io/storj/storage/postgreskv/schema"
)

View File

@ -14,8 +14,8 @@ import (
"github.com/zeebo/errs"
"storj.io/common/testcontext"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/tagsql"
"storj.io/private/dbutil/pgtest"
"storj.io/private/tagsql"
"storj.io/storj/storage"
"storj.io/storj/storage/testsuite"
)

Some files were not shown because too many files have changed in this diff Show More