erasure: avoid io.Copy in hotpaths to reduce allocation (#11213)

This commit is contained in:
Harshavardhana 2021-01-03 16:27:34 -08:00 committed by GitHub
parent c4131c2798
commit c4b1d394d6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 26 additions and 9 deletions

View file

@ -186,7 +186,6 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
readerIndex++
}
wg.Wait()
if p.canDecode(newBuf) {
p.offset += p.shardSize
if healRequired != 0 {

View file

@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"context"
"io"
@ -77,20 +76,25 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// from subsequent blocks.
offset = 0
}
// We have written all the blocks, write the last remaining block.
if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
n, err := dst.Write(block[:write])
if err != nil {
if err != io.ErrClosedPipe {
logger.LogIf(ctx, err)
}
return 0, err
}
totalWritten += n
if int64(n) != write {
return 0, io.ErrShortWrite
}
totalWritten += int64(n)
break
}
// Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block))
n, err := dst.Write(block)
if err != nil {
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
if err != io.ErrClosedPipe {
@ -99,11 +103,15 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
return 0, err
}
if n != len(block) {
return 0, io.ErrShortWrite
}
// Decrement output size.
write -= n
write -= int64(n)
// Increment written.
totalWritten += n
totalWritten += int64(n)
}
// Success.

View file

@ -432,6 +432,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return
}
defer gr.Close()
objInfo := gr.ObjInfo
// filter object lock metadata if permission does not permit

View file

@ -319,7 +319,13 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
}
if globalIsErasure {
logger.Info(fmt.Sprintf("Verifying %d buckets are consistent across drives...", len(buckets)))
if len(buckets) > 0 {
if len(buckets) == 1 {
logger.Info(fmt.Sprintf("Verifying if %d bucket is consistent across drives...", len(buckets)))
} else {
logger.Info(fmt.Sprintf("Verifying if %d buckets are consistent across drives...", len(buckets)))
}
}
for _, bucket := range buckets {
if _, err = newObject.HealBucket(ctx, bucket.Name, madmin.HealOpts{}); err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)

View file

@ -64,6 +64,9 @@ const (
// Size of each buffer.
readAheadBufSize = 1 << 20
// Small file threshold below which the metadata accompanies the data.
smallFileThreshold = 32 * humanize.KiByte
// XL metadata file carries per object metadata.
xlStorageFormatFile = "xl.meta"
)
@ -1174,7 +1177,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
// - object has not yet transitioned
// - object size lesser than 32KiB
// - object has maximum of 1 parts
if fi.TransitionStatus == "" && fi.DataDir != "" && fi.Size < 32*humanize.KiByte && len(fi.Parts) == 1 {
if fi.TransitionStatus == "" && fi.DataDir != "" && fi.Size < smallFileThreshold && len(fi.Parts) == 1 {
fi.Data, err = s.readAllData(volumeDir, pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", fi.Parts[0].Number)))
if err != nil {
return fi, err