satellite/metabase: add piece size calculation to segment

This code is essentially replacement for eestream.CalcPieceSize. To call
eestream.CalcPieceSize we need eestream.RedundancyStrategy which is not
trivial to get as it requires infectious.FEC. For example infectious.FEC
creation is visible on GE loop observer CPU profile because we were
doing this for each segment in DB.

New method was added to storj.Redundancy and here we are just wiring it
with metabase Segment.

BenchmarkSegmentPieceSize
BenchmarkSegmentPieceSize/eestream.CalcPieceSize
BenchmarkSegmentPieceSize/eestream.CalcPieceSize-8         	    5822	    189189 ns/op	    9776 B/op	       8 allocs/op
BenchmarkSegmentPieceSize/segment.PieceSize
BenchmarkSegmentPieceSize/segment.PieceSize-8              	94721329	        11.49 ns/op	       0 B/op	       0 allocs/op

Change-Id: I5a8b4237aedd1424c54ed0af448061a236b00295
This commit is contained in:
Michal Niewrzal 2023-02-22 11:32:26 +01:00
parent 3abe7ac0da
commit 16b7901fde
18 changed files with 79 additions and 90 deletions

View File

@ -382,7 +382,7 @@ func downloadSegment(ctx context.Context, log *zap.Logger, peer *satellite.Repai
}
esScheme := eestream.NewUnsafeRSScheme(fec, redundancy.ErasureShareSize())
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
pieceSize := segment.PieceSize()
expectedSize := pieceSize * int64(redundancy.RequiredCount())
ctx, cancel := context.WithCancel(ctx)

2
go.mod
View File

@ -54,7 +54,7 @@ require (
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
gopkg.in/segmentio/analytics-go.v3 v3.1.0
gopkg.in/yaml.v3 v3.0.1
storj.io/common v0.0.0-20230214163549-c8518523a6f0
storj.io/common v0.0.0-20230221110830-7591b205266e
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a
storj.io/monkit-jaeger v0.0.0-20220915074555-d100d7589f41
storj.io/private v0.0.0-20230123202745-d3e63b336444

4
go.sum
View File

@ -959,8 +959,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20230214163549-c8518523a6f0 h1:N7St0/39hhmSPtYybP6rrPwvjHrJUHggbFmbRNd0XUc=
storj.io/common v0.0.0-20230214163549-c8518523a6f0/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/common v0.0.0-20230221110830-7591b205266e h1:DG9G8OpdSXDOY6ACaevayS2r7nf5flqyxrZtD00sBlQ=
storj.io/common v0.0.0-20230221110830-7591b205266e/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a h1:FBaOc8c5efmW3tmPsiGy07USMkOSu/tyYCZpu2ro0y8=
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=

View File

@ -23,7 +23,6 @@ import (
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/sync2"
"storj.io/common/testcontext"
"storj.io/common/testrand"
@ -36,7 +35,6 @@ import (
"storj.io/storj/satellite/satellitedb/satellitedbtest"
snorders "storj.io/storj/storagenode/orders"
"storj.io/uplink"
"storj.io/uplink/private/eestream"
)
func TestProjectUsageStorage(t *testing.T) {
@ -1080,27 +1078,17 @@ func TestProjectUsage_BandwidthDeadAllocation(t *testing.T) {
now := time.Now()
project := planet.Uplinks[0].Projects[0]
sat := planet.Satellites[0]
rs, err := eestream.NewRedundancyStrategyFromStorj(storj.RedundancyScheme{
RequiredShares: int16(sat.Config.Metainfo.RS.Min),
RepairShares: int16(sat.Config.Metainfo.RS.Repair),
OptimalShares: int16(sat.Config.Metainfo.RS.Success),
TotalShares: int16(sat.Config.Metainfo.RS.Total),
ShareSize: sat.Config.Metainfo.RS.ErasureShareSize.Int32(),
})
require.NoError(t, err)
dataSize := 4 * memory.MiB
data := testrand.Bytes(dataSize)
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path1", data)
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path1", data)
require.NoError(t, err)
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
require.NoError(t, err)
require.Len(t, segments, 1)
pieceSize := eestream.CalcPieceSize(int64(segments[0].EncryptedSize), rs)
pieceSize := segments[0].PieceSize()
reader, cleanFn, err := planet.Uplinks[0].DownloadStream(ctx, planet.Satellites[0], "testbucket", "test/path1")
require.NoError(t, err)

View File

@ -22,7 +22,6 @@ import (
"storj.io/common/uuid"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/uplink/private/eestream"
"storj.io/uplink/private/piecestore"
)
@ -176,12 +175,7 @@ func (reverifier *Reverifier) DoReverifyPiece(ctx context.Context, logger *zap.L
return OutcomeNotNecessary, reputation, nil
}
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return OutcomeNotPerformed, reputation, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
pieceSize := segment.PieceSize()
limit, piecePrivateKey, cachedNodeInfo, err := reverifier.orders.CreateAuditPieceOrderLimit(ctx, locator.NodeID, uint16(locator.PieceNum), segment.RootPieceID, int32(pieceSize))
if err != nil {

View File

@ -27,7 +27,6 @@ import (
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
"storj.io/uplink/private/eestream"
)
// millis for the transfer queue building ticker.
@ -868,19 +867,13 @@ func (endpoint *Endpoint) generateExitStatusRequest(ctx context.Context, nodeID
func (endpoint *Endpoint) calculatePieceSize(ctx context.Context, segment metabase.Segment, incomplete *TransferQueueItem) (int64, error) {
nodeID := incomplete.NodeID
// calculate piece size
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return 0, Error.Wrap(err)
}
if len(segment.Pieces) > redundancy.OptimalThreshold() {
if len(segment.Pieces) > int(segment.Redundancy.OptimalShares) {
endpoint.log.Debug("segment has more pieces than required. removing node from segment.", zap.Stringer("node ID", nodeID), zap.Int32("piece num", incomplete.PieceNum))
return 0, ErrAboveOptimalThreshold.New("")
}
return eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy), nil
return segment.PieceSize(), nil
}
func (endpoint *Endpoint) getValidSegment(ctx context.Context, streamID uuid.UUID, position metabase.SegmentPosition, originalRootPieceID storj.PieceID) (metabase.Segment, error) {

View File

@ -12,7 +12,6 @@ import (
"storj.io/common/storj"
"storj.io/storj/satellite/metabase/rangedloop"
"storj.io/storj/satellite/metabase/segmentloop"
"storj.io/uplink/private/eestream"
)
var remoteSegmentFunc = mon.Task()
@ -71,22 +70,13 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, segment *segm
}
func (collector *PathCollector) handleRemoteSegment(ctx context.Context, segment *segmentloop.Segment) (err error) {
pieceSize := int64(-1)
numPieces := len(segment.Pieces)
for _, piece := range segment.Pieces {
if _, ok := collector.nodeIDStorage[piece.StorageNode]; !ok {
continue
}
// avoid creating new redundancy strategy for every segment piece
if pieceSize == -1 {
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return err
}
pieceSize = eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
}
pieceSize := segment.PieceSize()
collector.nodeIDStorage[piece.StorageNode] += pieceSize

View File

@ -9,9 +9,12 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testrand"
"storj.io/common/uuid"
"storj.io/storj/satellite/metabase"
"storj.io/uplink/private/eestream"
)
func TestParseBucketPrefixInvalid(t *testing.T) {
@ -716,3 +719,30 @@ func TestPiecesUpdate(t *testing.T) {
})
}
}
func BenchmarkSegmentPieceSize(b *testing.B) {
segment := metabase.Segment{
EncryptedSize: 64 * memory.MiB.Int32(),
Redundancy: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
RequiredShares: 29,
RepairShares: 35,
OptimalShares: 80,
TotalShares: 110,
ShareSize: 256,
},
}
b.Run("eestream.CalcPieceSize", func(b *testing.B) {
for k := 0; k < b.N; k++ {
redundancyScheme, _ := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
_ = eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancyScheme)
}
})
b.Run("segment.PieceSize", func(b *testing.B) {
for k := 0; k < b.N; k++ {
_ = segment.PieceSize()
}
})
}

View File

@ -52,6 +52,11 @@ func (s Segment) Expired(now time.Time) bool {
return s.ExpiresAt != nil && s.ExpiresAt.Before(now)
}
// PieceSize returns calculated piece size for segment.
func (s Segment) PieceSize() int64 {
return s.Redundancy.PieceSize(int64(s.EncryptedSize))
}
// GetObjectExactVersion contains arguments necessary for fetching an information
// about exact object version.
type GetObjectExactVersion struct {

View File

@ -43,6 +43,11 @@ func (s *Segment) Expired(now time.Time) bool {
return s.ExpiresAt != nil && s.ExpiresAt.Before(now)
}
// PieceSize returns calculated piece size for segment.
func (s Segment) PieceSize() int64 {
return s.Redundancy.PieceSize(int64(s.EncryptedSize))
}
// Observer is an interface defining an observer that can subscribe to the segments loop.
//
// architecture: Observer

View File

@ -64,7 +64,16 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
maxPieceSize := eestream.CalcPieceSize(req.MaxOrderLimit, redundancy)
config := endpoint.config
defaultRedundancy := storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
RequiredShares: int16(config.RS.Min),
RepairShares: int16(config.RS.Repair),
OptimalShares: int16(config.RS.Success),
TotalShares: int16(config.RS.Total),
ShareSize: config.RS.ErasureShareSize.Int32(),
}
maxPieceSize := defaultRedundancy.PieceSize(req.MaxOrderLimit)
request := overlay.FindStorageNodesRequest{
RequestedCount: redundancy.TotalCount(),

View File

@ -15,7 +15,6 @@ import (
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/storj/satellite/overlay"
"storj.io/uplink/private/eestream"
)
var (
@ -62,12 +61,7 @@ func (service *Service) VerifySizes(ctx context.Context, redundancy storj.Redund
return Error.New("no remote pieces")
}
redundancyScheme, err := eestream.NewRedundancyStrategyFromStorj(redundancy)
if err != nil {
return Error.New("invalid redundancy strategy: %v", err)
}
expectedSize := eestream.CalcPieceSize(encryptedSize, redundancyScheme)
expectedSize := redundancy.PieceSize(encryptedSize)
if expectedSize != commonSize {
return Error.New("expected size is different from provided (%d != %d)", expectedSize, commonSize)
}

View File

@ -19,7 +19,6 @@ import (
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/uplink/private/eestream"
)
var (
@ -129,11 +128,7 @@ func (service *Service) updateBandwidth(ctx context.Context, bucket metabase.Buc
func (service *Service) CreateGetOrderLimits(ctx context.Context, bucket metabase.BucketLocation, segment metabase.Segment, desiredNodes int32, overrideLimit int64) (_ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
orderLimit := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
orderLimit := segment.PieceSize()
if overrideLimit > 0 && overrideLimit < orderLimit {
orderLimit = overrideLimit
}
@ -176,9 +171,9 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucket metabas
break
}
}
if len(signer.AddressedLimits) < redundancy.RequiredCount() {
if len(signer.AddressedLimits) < int(segment.Redundancy.RequiredShares) {
mon.Meter("download_failed_not_enough_pieces_uplink").Mark(1) //mon:locked
return nil, storj.PiecePrivateKey{}, ErrDownloadFailedNotEnoughPieces.New("not enough orderlimits: got %d, required %d", len(signer.AddressedLimits), redundancy.RequiredCount())
return nil, storj.PiecePrivateKey{}, ErrDownloadFailedNotEnoughPieces.New("not enough orderlimits: got %d, required %d", len(signer.AddressedLimits), segment.Redundancy.RequiredShares)
}
if err := service.updateBandwidth(ctx, bucket, signer.AddressedLimits...); err != nil {
@ -404,13 +399,8 @@ func (service *Service) createAuditOrderLimitWithSigner(ctx context.Context, nod
func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucket metabase.BucketLocation, segment metabase.Segment, healthy metabase.Pieces) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, cachedNodesInfo map[storj.NodeID]overlay.NodeReputation, err error) {
defer mon.Task()(&ctx)(&err)
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return nil, storj.PiecePrivateKey{}, nil, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
totalPieces := redundancy.TotalCount()
pieceSize := segment.PieceSize()
totalPieces := segment.Redundancy.TotalShares
nodeIDs := make([]storj.NodeID, len(segment.Pieces))
for i, piece := range segment.Pieces {
@ -450,8 +440,8 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucket m
limitsCount++
}
if limitsCount < redundancy.RequiredCount() {
err = ErrDownloadFailedNotEnoughPieces.New("not enough nodes available: got %d, required %d", limitsCount, redundancy.RequiredCount())
if limitsCount < int(segment.Redundancy.RequiredShares) {
err = ErrDownloadFailedNotEnoughPieces.New("not enough nodes available: got %d, required %d", limitsCount, segment.Redundancy.RequiredShares)
return nil, storj.PiecePrivateKey{}, nil, errs.Combine(err, nodeErrors.Err())
}
@ -463,14 +453,10 @@ func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucket m
defer mon.Task()(&ctx)(&err)
// Create the order limits for being used to upload the repaired pieces
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
pieceSize := segment.PieceSize()
totalPieces := redundancy.TotalCount()
totalPiecesAfterRepair := int(math.Ceil(float64(redundancy.OptimalThreshold())*optimalThresholdMultiplier)) + numPiecesInExcludedCountries
totalPieces := int(segment.Redundancy.TotalShares)
totalPiecesAfterRepair := int(math.Ceil(float64(segment.Redundancy.OptimalShares)*optimalThresholdMultiplier)) + numPiecesInExcludedCountries
if totalPiecesAfterRepair > totalPieces {
totalPiecesAfterRepair = totalPieces

View File

@ -635,11 +635,6 @@ func (repairer *SegmentRepairer) AdminFetchPieces(ctx context.Context, seg *meta
return nil, errs.New("cannot download an inline segment")
}
redundancy, err := eestream.NewRedundancyStrategyFromStorj(seg.Redundancy)
if err != nil {
return nil, errs.New("invalid redundancy strategy: %w", err)
}
if len(seg.Pieces) < int(seg.Redundancy.RequiredShares) {
return nil, errs.New("segment only has %d pieces; needs %d for reconstruction", seg.Pieces, seg.Redundancy.RequiredShares)
}
@ -651,10 +646,10 @@ func (repairer *SegmentRepairer) AdminFetchPieces(ctx context.Context, seg *meta
return nil, errs.New("could not create order limits: %w", err)
}
pieceSize := eestream.CalcPieceSize(int64(seg.EncryptedSize), redundancy)
pieceSize := seg.PieceSize()
pieceInfos = make([]AdminFetchInfo, len(getOrderLimits))
limiter := sync2.NewLimiter(redundancy.RequiredCount())
limiter := sync2.NewLimiter(int(seg.Redundancy.RequiredShares))
for currentLimitIndex, limit := range getOrderLimits {
if limit == nil {

View File

@ -9,7 +9,7 @@ require (
github.com/zeebo/errs v1.3.0
go.uber.org/zap v1.21.0
golang.org/x/sync v0.1.0
storj.io/common v0.0.0-20230214163549-c8518523a6f0
storj.io/common v0.0.0-20230221110830-7591b205266e
storj.io/private v0.0.0-20230123202745-d3e63b336444
storj.io/storj v1.63.1
storj.io/storjscan v0.0.0-20220926140643-1623c3b391b0

View File

@ -1217,8 +1217,8 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20230214163549-c8518523a6f0 h1:N7St0/39hhmSPtYybP6rrPwvjHrJUHggbFmbRNd0XUc=
storj.io/common v0.0.0-20230214163549-c8518523a6f0/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/common v0.0.0-20230221110830-7591b205266e h1:DG9G8OpdSXDOY6ACaevayS2r7nf5flqyxrZtD00sBlQ=
storj.io/common v0.0.0-20230221110830-7591b205266e/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a h1:FBaOc8c5efmW3tmPsiGy07USMkOSu/tyYCZpu2ro0y8=
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=

View File

@ -10,7 +10,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.0
go.uber.org/zap v1.23.0
storj.io/common v0.0.0-20230214163549-c8518523a6f0
storj.io/common v0.0.0-20230221110830-7591b205266e
storj.io/gateway-mt v1.46.0
storj.io/private v0.0.0-20230123202745-d3e63b336444
storj.io/storj v0.12.1-0.20221125175451-ef4b564b82f7

View File

@ -1921,8 +1921,8 @@ sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20220719163320-cd2ef8e1b9b0/go.mod h1:mCYV6Ud5+cdbuaxdPD5Zht/HYaIn0sffnnws9ErkrMQ=
storj.io/common v0.0.0-20230214163549-c8518523a6f0 h1:N7St0/39hhmSPtYybP6rrPwvjHrJUHggbFmbRNd0XUc=
storj.io/common v0.0.0-20230214163549-c8518523a6f0/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/common v0.0.0-20230221110830-7591b205266e h1:DG9G8OpdSXDOY6ACaevayS2r7nf5flqyxrZtD00sBlQ=
storj.io/common v0.0.0-20230221110830-7591b205266e/go.mod h1:tDgoLthBVcrTPEokBgPdjrn39p/gyNx06j6ehhTSiUg=
storj.io/dotworld v0.0.0-20210324183515-0d11aeccd840 h1:oqMwoF6vaOrCe92SKRyr8cc2WSjLYAd8fjpAHA7rNqY=
storj.io/drpc v0.0.32/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/drpc v0.0.33-0.20230204035225-c9649dee8f2a h1:FBaOc8c5efmW3tmPsiGy07USMkOSu/tyYCZpu2ro0y8=