fix: re-use bytes.Buffer using sync.Pool (#11156)

This commit is contained in:
Harshavardhana 2020-12-22 23:22:37 -08:00 committed by GitHub
parent bfb92a27b7
commit 5982965839
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 9 deletions

View file

@ -113,6 +113,7 @@ func loadBucketMetaCache(ctx context.Context, bucket string) (*bucketMetacache,
// Use global context for this.
err := objAPI.GetObject(GlobalContext, minioMetaBucket, pathJoin("buckets", bucket, ".metacache", "index.s2"), 0, -1, w, "", ObjectOptions{})
logger.LogIf(ctx, w.CloseWithError(err))
wg.Wait()
if err != nil {
switch err.(type) {
case ObjectNotFound:
@ -125,7 +126,6 @@ func loadBucketMetaCache(ctx context.Context, bucket string) (*bucketMetacache,
}
return newBucketMetacache(bucket, false), err
}
wg.Wait()
if decErr != nil {
if errors.Is(err, context.Canceled) {
return newBucketMetacache(bucket, false), err

View file

@ -432,7 +432,11 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
// We got a stream to start at.
loadedPart := 0
var buf bytes.Buffer
buf := bufferPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
bufferPool.Put(buf)
}()
for {
select {
case <-ctx.Done():
@ -482,7 +486,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
}
}
buf.Reset()
err := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, fi.Size, &buf, fi, metaArr, onlineDisks)
err := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, fi.Size, buf, fi, metaArr, onlineDisks)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) {
case ObjectNotFound:
@ -498,7 +502,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
return entries, err
}
}
tmp, err := newMetacacheReader(&buf)
tmp, err := newMetacacheReader(buf)
if err != nil {
return entries, err
}

View file

@ -745,6 +745,12 @@ type metacacheBlockWriter struct {
blockEntries int
}
var bufferPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// newMetacacheBlockWriter provides a streaming block writer.
// Each block is the size of the capacity of the input channel.
// The caller should close to indicate the stream has ended.
@ -755,12 +761,15 @@ func newMetacacheBlockWriter(in <-chan metaCacheEntry, nextBlock func(b *metacac
defer w.wg.Done()
var current metacacheBlock
var n int
var buf bytes.Buffer
block := newMetacacheWriter(&buf, 1<<20)
buf := bufferPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
bufferPool.Put(buf)
}()
block := newMetacacheWriter(buf, 1<<20)
defer block.Close()
finishBlock := func() {
err := block.Close()
if err != nil {
if err := block.Close(); err != nil {
w.streamErr = err
return
}
@ -769,7 +778,7 @@ func newMetacacheBlockWriter(in <-chan metaCacheEntry, nextBlock func(b *metacac
// Prepare for next
current.n++
buf.Reset()
block.Reset(&buf)
block.Reset(buf)
current.First = ""
}
for o := range in {