XL/fs: initObjectLayer should cleanup tmpMetaPrefix in parallel. (#1752)

Fixes #1747
This commit is contained in:
Harshavardhana 2016-05-25 01:33:39 -07:00 committed by Harshavardhana
parent ee6645f421
commit a9e778f460
4 changed files with 116 additions and 69 deletions

View file

@ -16,25 +16,58 @@
package main
import "strings"
import (
"strings"
"sync"
)
// Common initialization needed for both object layers.
func initObjectLayer(storageDisks ...StorageAPI) error {
// This happens for the first time, but keep this here since this
// is the only place where it can be made expensive optimizing all
// other calls. Create minio meta volume, if it doesn't exist yet.
for _, storage := range storageDisks {
if err := storage.MakeVol(minioMetaBucket); err != nil {
if err != errVolumeExists && err != errDiskNotFound {
return toObjectErr(err, minioMetaBucket)
var wg = &sync.WaitGroup{}
// Initialize errs to collect errors inside go-routine.
var errs = make([]error, len(storageDisks))
// Initialize all disks in parallel.
for index, disk := range storageDisks {
wg.Add(1)
go func(index int, disk StorageAPI) {
// Indicate this wait group is done.
defer wg.Done()
// Attempt to create `.minio`.
err := disk.MakeVol(minioMetaBucket)
if err != nil {
if err != errVolumeExists && err != errDiskNotFound {
errs[index] = err
return
}
}
}
// Cleanup all temp entries upon start.
err := cleanupDir(storage, minioMetaBucket, tmpMetaPrefix)
if err != nil {
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
}
// Cleanup all temp entries upon start.
err = cleanupDir(disk, minioMetaBucket, tmpMetaPrefix)
if err != nil {
errs[index] = err
return
}
errs[index] = nil
}(index, disk)
}
// Wait for all cleanup to finish.
wg.Wait()
// Return upon first error.
for _, err := range errs {
if err == nil {
continue
}
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
}
// Return success here.
return nil
}

View file

@ -208,28 +208,45 @@ func (xl xlObjects) isObject(bucket, prefix string) bool {
return true
}
// statPart - stat a part file.
func (xl xlObjects) statPart(bucket, objectPart string) (fileInfo FileInfo, err error) {
// Count for errors encountered.
var xlJSONErrCount = 0
// Return the first success entry based on the selected random disk.
for xlJSONErrCount < len(xl.storageDisks) {
// Choose a random disk on each attempt, do not hit the same disk all the time.
disk := xl.getRandomDisk() // Pick a random disk.
fileInfo, err = disk.StatFile(bucket, objectPart)
if err == nil {
return fileInfo, nil
}
xlJSONErrCount++ // Update error count.
}
return FileInfo{}, err
}
// readXLMetadata - read xl metadata.
func readXLMetadata(disk StorageAPI, bucket, object string) (xlMeta xlMetaV1, err error) {
r, err := disk.ReadFile(bucket, path.Join(object, xlMetaJSONFile), int64(0))
if err != nil {
return xlMetaV1{}, err
}
defer r.Close()
_, err = xlMeta.ReadFrom(r)
if err != nil {
return xlMetaV1{}, err
}
return xlMeta, nil
}
func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err error) {
// Count for errors encountered.
var xlJSONErrCount = 0
// deleteXLJson - delete `xl.json` on all disks.
func (xl xlObjects) deleteXLMetadata(bucket, object string) error {
return xl.deleteObject(bucket, path.Join(object, xlMetaJSONFile))
}
// renameXLJson - rename `xl.json` on all disks.
func (xl xlObjects) renameXLMetadata(srcBucket, srcPrefix, dstBucket, dstPrefix string) error {
return xl.renameObject(srcBucket, path.Join(srcPrefix, xlMetaJSONFile), dstBucket, path.Join(dstPrefix, xlMetaJSONFile))
// Return the first success entry based on the selected random disk.
for xlJSONErrCount < len(xl.storageDisks) {
var r io.ReadCloser
// Choose a random disk on each attempt, do not hit the same disk all the time.
disk := xl.getRandomDisk() // Pick a random disk.
r, err = disk.ReadFile(bucket, path.Join(object, xlMetaJSONFile), int64(0))
if err == nil {
defer r.Close()
_, err = xlMeta.ReadFrom(r)
if err == nil {
return xlMeta, nil
}
}
xlJSONErrCount++ // Update error count.
}
return xlMetaV1{}, err
}
// getDiskDistribution - get disk distribution.

View file

@ -86,14 +86,15 @@ func (xl xlObjects) newMultipartUploadCommon(bucket string, object string, meta
if err = xl.writeXLMetadata(minioMetaBucket, tempUploadIDPath, xlMeta); err != nil {
return "", toObjectErr(err, minioMetaBucket, tempUploadIDPath)
}
if err = xl.renameXLMetadata(minioMetaBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath); err != nil {
if dErr := xl.deleteXLMetadata(minioMetaBucket, tempUploadIDPath); dErr != nil {
rErr := xl.renameObject(minioMetaBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath)
if rErr == nil {
if dErr := xl.deleteObject(minioMetaBucket, tempUploadIDPath); dErr != nil {
return "", toObjectErr(dErr, minioMetaBucket, tempUploadIDPath)
}
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
// Return success.
return uploadID, nil
}
// Return success.
return uploadID, nil
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath)
}
// NewMultipartUpload - initialize a new multipart upload, returns a unique id.
@ -129,7 +130,7 @@ func (xl xlObjects) putObjectPartCommon(bucket string, object string, uploadID s
tmpPartPath := path.Join(tmpMetaPrefix, bucket, object, uploadID, partSuffix)
fileWriter, err := xl.erasureDisk.CreateFile(minioMetaBucket, tmpPartPath)
if err != nil {
return "", toObjectErr(err, bucket, object)
return "", toObjectErr(err, minioMetaBucket, tmpPartPath)
}
// Initialize md5 writer.
@ -184,7 +185,7 @@ func (xl xlObjects) putObjectPartCommon(bucket string, object string, uploadID s
}
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
xlMeta, err := readXLMetadata(xl.getRandomDisk(), minioMetaBucket, uploadIDPath)
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
if err != nil {
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
}
@ -230,9 +231,8 @@ func (xl xlObjects) listObjectPartsCommon(bucket, object, uploadID string, partN
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
result := ListPartsInfo{}
disk := xl.getRandomDisk() // Pick a random disk and read `xl.json` from there.
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
xlMeta, err := readXLMetadata(disk, minioMetaBucket, uploadIDPath)
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, uploadIDPath)
}
@ -261,9 +261,9 @@ func (xl xlObjects) listObjectPartsCommon(bucket, object, uploadID string, partN
}
count := maxParts
for _, part := range parts {
var fi FileInfo
partNamePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name)
fi, err = disk.StatFile(minioMetaBucket, partNamePath)
var fi FileInfo
fi, err = xl.statPart(minioMetaBucket, partNamePath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, partNamePath)
}
@ -327,7 +327,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID)
// Read the current `xl.json`.
xlMeta, err := readXLMetadata(xl.getRandomDisk(), minioMetaBucket, uploadIDPath)
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
if err != nil {
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
}

View file

@ -25,13 +25,19 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.Read
if !IsValidObjectName(object) {
return nil, ObjectNameInvalid{Bucket: bucket, Object: object}
}
// Lock the object before reading.
nsMutex.RLock(bucket, object)
defer nsMutex.RUnlock(bucket, object)
fileReader, fileWriter := io.Pipe()
xlMeta, err := readXLMetadata(xl.getRandomDisk(), bucket, object)
// Read metadata associated with the object.
xlMeta, err := xl.readXLMetadata(bucket, object)
if err != nil {
return nil, toObjectErr(err, bucket, object)
}
// Get part index offset.
partIndex, offset, err := xlMeta.getPartIndexOffset(startOffset)
if err != nil {
return nil, toObjectErr(err, bucket, object)
@ -90,33 +96,24 @@ func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
return info, nil
}
// getObjectInfo - get object info.
func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
// Count for errors encountered.
var xlJSONErrCount = 0
// Return the first success entry based on the selected random disk.
for xlJSONErrCount < len(xl.storageDisks) {
// Choose a random disk on each attempt, do not hit the same disk all the time.
disk := xl.getRandomDisk() // Pick a random disk.
var xlMeta xlMetaV1
xlMeta, err = readXLMetadata(disk, bucket, object)
if err == nil {
objInfo = ObjectInfo{}
objInfo.IsDir = false
objInfo.Bucket = bucket
objInfo.Name = object
objInfo.Size = xlMeta.Stat.Size
objInfo.ModTime = xlMeta.Stat.ModTime
objInfo.MD5Sum = xlMeta.Meta["md5Sum"]
objInfo.ContentType = xlMeta.Meta["content-type"]
objInfo.ContentEncoding = xlMeta.Meta["content-encoding"]
return objInfo, nil
}
xlJSONErrCount++ // Update error count.
var xlMeta xlMetaV1
xlMeta, err = xl.readXLMetadata(bucket, object)
if err != nil {
// Return error.
return ObjectInfo{}, err
}
// Return error at the end.
return ObjectInfo{}, err
objInfo = ObjectInfo{}
objInfo.IsDir = false
objInfo.Bucket = bucket
objInfo.Name = object
objInfo.Size = xlMeta.Stat.Size
objInfo.ModTime = xlMeta.Stat.ModTime
objInfo.MD5Sum = xlMeta.Meta["md5Sum"]
objInfo.ContentType = xlMeta.Meta["content-type"]
objInfo.ContentEncoding = xlMeta.Meta["content-encoding"]
return objInfo, nil
}
// renameObject - renaming all source objects to destination object across all disks.