diff --git a/Dockerfile.arm.release b/Dockerfile.arm.release index 3e1f64192..f5e8368b8 100644 --- a/Dockerfile.arm.release +++ b/Dockerfile.arm.release @@ -9,7 +9,7 @@ ENV GO111MODULE on RUN \ apk add --no-cache git 'curl>7.61.0' && \ git clone https://github.com/minio/minio && \ - curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . + curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . FROM arm32v7/alpine:3.10 diff --git a/Dockerfile.arm64.release b/Dockerfile.arm64.release index 711886821..c456ac9ac 100644 --- a/Dockerfile.arm64.release +++ b/Dockerfile.arm64.release @@ -9,7 +9,7 @@ ENV GO111MODULE on RUN \ apk add --no-cache git 'curl>7.61.0' && \ git clone https://github.com/minio/minio && \ - curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . + curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . FROM arm64v8/alpine:3.10 diff --git a/Dockerfile.dev.browser b/Dockerfile.dev.browser index da38e5986..fbdbfed31 100644 --- a/Dockerfile.dev.browser +++ b/Dockerfile.dev.browser @@ -10,4 +10,3 @@ ENV PATH=$PATH:/root/go/bin RUN go get github.com/go-bindata/go-bindata/go-bindata && \ go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs - diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 1aeaa5391..03964b27b 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -631,7 +631,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { } // Check if this setup has an erasure coded backend. - if !globalIsXL { + if !globalIsErasure { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) return } @@ -779,7 +779,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r * } // Check if this setup has an erasure coded backend. - if !globalIsXL { + if !globalIsErasure { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) return } @@ -789,7 +789,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r * // Get local heal status first bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus()) - if globalIsDistXL { + if globalIsDistErasure { // Get heal status from other peers peersHealStates := globalNotificationSys.BackgroundHealStatus() bgHealStates = append(bgHealStates, peersHealStates...) @@ -862,11 +862,11 @@ const ( AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure" ) -// toAdminAPIErrCode - converts errXLWriteQuorum error to admin API +// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API // specific error. func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode { switch err { - case errXLWriteQuorum: + case errErasureWriteQuorum: return ErrAdminConfigNoQuorum default: return toAPIErrorCode(ctx, err) @@ -1277,7 +1277,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request) partialWrite(obdInfo) } - if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistXL { + if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure { obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx)) partialWrite(obdInfo) @@ -1384,7 +1384,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque OffDisks += v } - backend = madmin.XLBackend{ + backend = madmin.ErasureBackend{ Type: madmin.ErasureType, OnlineDisks: OnDisks, OfflineDisks: OffDisks, @@ -1413,10 +1413,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque for _, sp := range servers { for i, di := range sp.Disks { path := "" - if globalIsXL { + if globalIsErasure { path = di.DrivePath } - if globalIsDistXL { + if globalIsDistErasure { path = sp.Endpoint + di.DrivePath } // For distributed @@ -1424,13 +1424,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque for b := range storageInfo.Backend.Sets[a] { ep := storageInfo.Backend.Sets[a][b].Endpoint - if globalIsDistXL { + if globalIsDistErasure { if strings.Replace(ep, "http://", "", -1) == path || strings.Replace(ep, "https://", "", -1) == path { sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID } } - if globalIsXL { + if globalIsErasure { if ep == path { sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 5955a50e1..c00a0bc9a 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -33,27 +33,27 @@ import ( "github.com/minio/minio/pkg/madmin" ) -// adminXLTestBed - encapsulates subsystems that need to be setup for +// adminErasureTestBed - encapsulates subsystems that need to be setup for // admin-handler unit tests. -type adminXLTestBed struct { - xlDirs []string - objLayer ObjectLayer - router *mux.Router +type adminErasureTestBed struct { + erasureDirs []string + objLayer ObjectLayer + router *mux.Router } -// prepareAdminXLTestBed - helper function that setups a single-node -// XL backend for admin-handler tests. -func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { +// prepareAdminErasureTestBed - helper function that setups a single-node +// Erasure backend for admin-handler tests. +func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) { // reset global variables to start afresh. resetTestGlobals() - // Set globalIsXL to indicate that the setup uses an erasure + // Set globalIsErasure to indicate that the setup uses an erasure // code backend. - globalIsXL = true + globalIsErasure = true // Initializing objectLayer for HealFormatHandler. - objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx) + objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx) if xlErr != nil { return nil, xlErr } @@ -66,7 +66,7 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { // Initialize boot time globalBootTime = UTCNow() - globalEndpoints = mustGetZoneEndpoints(xlDirs...) + globalEndpoints = mustGetZoneEndpoints(erasureDirs...) newAllSubsystems() @@ -76,36 +76,37 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { adminRouter := mux.NewRouter() registerAdminRouter(adminRouter, true, true) - return &adminXLTestBed{ - xlDirs: xlDirs, - objLayer: objLayer, - router: adminRouter, + return &adminErasureTestBed{ + erasureDirs: erasureDirs, + objLayer: objLayer, + router: adminRouter, }, nil } // TearDown - method that resets the test bed for subsequent unit // tests to start afresh. -func (atb *adminXLTestBed) TearDown() { - removeRoots(atb.xlDirs) +func (atb *adminErasureTestBed) TearDown() { + removeRoots(atb.erasureDirs) resetTestGlobals() } -// initTestObjLayer - Helper function to initialize an XL-based object +// initTestObjLayer - Helper function to initialize an Erasure-based object // layer and set globalObjectAPI. -func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) { - xlDirs, err := getRandomDisks(16) +func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) { + erasureDirs, err := getRandomDisks(16) if err != nil { return nil, nil, err } - endpoints := mustGetNewEndpoints(xlDirs...) - storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "") + endpoints := mustGetNewEndpoints(erasureDirs...) + storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") if err != nil { - removeRoots(xlDirs) + removeRoots(erasureDirs) return nil, nil, err } globalPolicySys = NewPolicySys() - objLayer, err := newXLSets(ctx, endpoints, storageDisks, format) + objLayer := &erasureZones{zones: make([]*erasureSets, 1)} + objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format) if err != nil { return nil, nil, err } @@ -114,7 +115,7 @@ func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) { globalObjLayerMutex.Lock() globalObjectAPI = objLayer globalObjLayerMutex.Unlock() - return objLayer, xlDirs, nil + return objLayer, erasureDirs, nil } // cmdType - Represents different service subcomands like status, stop @@ -183,9 +184,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - adminTestBed, err := prepareAdminXLTestBed(ctx) + adminTestBed, err := prepareAdminErasureTestBed(ctx) if err != nil { - t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.") } defer adminTestBed.TearDown() @@ -254,9 +255,9 @@ func TestAdminServerInfo(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - adminTestBed, err := prepareAdminXLTestBed(ctx) + adminTestBed, err := prepareAdminErasureTestBed(ctx) if err != nil { - t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.") } defer adminTestBed.TearDown() @@ -298,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) { }{ // 1. Server not in quorum. { - err: errXLWriteQuorum, + err: errErasureWriteQuorum, expectedAPIErr: ErrAdminConfigNoQuorum, }, // 2. No error. diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index c3fd34bef..71082199e 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -21,7 +21,6 @@ import ( "encoding/json" "fmt" "net/http" - "strings" "sync" "time" @@ -193,7 +192,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( respBytes []byte, apiErr APIError, errMsg string) { existsAndLive := false - he, exists := ahs.getHealSequence(h.path) + he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object)) if exists { existsAndLive = !he.hasEnded() } @@ -220,8 +219,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( // Check if new heal sequence to be started overlaps with any // existing, running sequence + hpath := pathJoin(h.bucket, h.object) for k, hSeq := range ahs.healSeqMap { - if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) { + if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) { errMsg = "The provided heal sequence path overlaps with an existing " + fmt.Sprintf("heal path: %s", k) @@ -230,7 +230,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( } // Add heal state and start sequence - ahs.healSeqMap[h.path] = h + ahs.healSeqMap[hpath] = h // Launch top-level background heal go-routine go h.healSequenceStart() @@ -251,11 +251,11 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( // status results from global state and returns its JSON // representation. The clientToken helps ensure there aren't // conflicting clients fetching status. -func (ahs *allHealState) PopHealStatusJSON(path string, +func (ahs *allHealState) PopHealStatusJSON(hpath string, clientToken string) ([]byte, APIErrorCode) { // fetch heal state for given path - h, exists := ahs.getHealSequence(path) + h, exists := ahs.getHealSequence(hpath) if !exists { // If there is no such heal sequence, return error. return nil, ErrHealNoSuchProcess @@ -296,18 +296,17 @@ func (ahs *allHealState) PopHealStatusJSON(path string, // healSource denotes single entity and heal option. type healSource struct { - path string // entity path (format, buckets, objects) to heal - opts *madmin.HealOpts // optional heal option overrides default setting + bucket string + object string + versionID string + opts *madmin.HealOpts // optional heal option overrides default setting } // healSequence - state for each heal sequence initiated on the // server. type healSequence struct { - // bucket, and prefix on which heal seq. was initiated - bucket, objPrefix string - - // path is just pathJoin(bucket, objPrefix) - path string + // bucket, and object on which heal seq. was initiated + bucket, object string // A channel of entities (format, buckets, objects) to heal sourceCh chan healSource @@ -377,8 +376,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string, return &healSequence{ respCh: make(chan healResult), bucket: bucket, - objPrefix: objPrefix, - path: pathJoin(bucket, objPrefix), + object: objPrefix, reportProgress: true, startTime: UTCNow(), clientToken: mustGetUUID(), @@ -618,7 +616,9 @@ func (h *healSequence) healSequenceStart() { func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error { // Send heal request task := healTask{ - path: source.path, + bucket: source.bucket, + object: source.object, + versionID: source.versionID, opts: h.settings, responseCh: h.respCh, } @@ -690,11 +690,11 @@ func (h *healSequence) healItemsFromSourceCh() error { } var itemType madmin.HealItemType switch { - case source.path == nopHeal: + case source.bucket == nopHeal: continue - case source.path == SlashSeparator: + case source.bucket == SlashSeparator: itemType = madmin.HealItemMetadata - case !strings.Contains(source.path, SlashSeparator): + case source.bucket != "" && source.object == "": itemType = madmin.HealItemBucket default: itemType = madmin.HealItemObject @@ -762,12 +762,16 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error { // NOTE: Healing on meta is run regardless // of any bucket being selected, this is to ensure that // meta are always upto date and correct. - return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error { + return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error { if h.isQuitting() { return errHealStopSignalled } - herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata) + herr := h.queueHealTask(healSource{ + bucket: bucket, + object: object, + versionID: versionID, + }, madmin.HealItemBucketMetadata) // Object might have been deleted, by the time heal // was attempted we ignore this object an move on. if isErrObjectNotFound(herr) { @@ -791,7 +795,7 @@ func (h *healSequence) healDiskFormat() error { return errServerNotInitialized } - return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata) + return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata) } // healBuckets - check for all buckets heal or just particular bucket. @@ -833,7 +837,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { return errServerNotInitialized } - if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil { + if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil { return err } @@ -842,12 +846,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { } if !h.settings.Recursive { - if h.objPrefix != "" { + if h.object != "" { // Check if an object named as the objPrefix exists, // and if so heal it. - _, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{}) + _, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{}) if err == nil { - if err = h.healObject(bucket, h.objPrefix); err != nil { + if err = h.healObject(bucket, h.object, ""); err != nil { return err } } @@ -856,14 +860,14 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { return nil } - if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil { + if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil { return errFnHealFromAPIErr(h.ctx, err) } return nil } // healObject - heal the given object and record result -func (h *healSequence) healObject(bucket, object string) error { +func (h *healSequence) healObject(bucket, object, versionID string) error { // Get current object layer instance. objectAPI := newObjectLayerWithoutSafeModeFn() if objectAPI == nil { @@ -874,5 +878,9 @@ func (h *healSequence) healObject(bucket, object string) error { return errHealStopSignalled } - return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject) + return h.queueHealTask(healSource{ + bucket: bucket, + object: object, + versionID: versionID, + }, madmin.HealItemObject) } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index f3c877339..0fe823f73 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -64,7 +64,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // DataUsageInfo operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler)) - if globalIsDistXL || globalIsXL { + if globalIsDistErasure || globalIsErasure { /// Heal operations // Heal processing endpoint. @@ -172,7 +172,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) } // Quota operations - if globalIsXL || globalIsDistXL { + if globalIsDistErasure || globalIsErasure { if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn { // GetBucketQuotaConfig adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc( @@ -185,7 +185,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // -- Top APIs -- // Top locks - if globalIsDistXL { + if globalIsDistErasure { adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler)) } diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index d6f562658..0faaadc6c 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -29,7 +29,7 @@ import ( func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties { var disks []madmin.Disk addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(endpointZones) } network := make(map[string]string) diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index 59f2fd41e..eb74e20f7 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -20,9 +20,18 @@ import ( "encoding/xml" ) -// ObjectIdentifier carries key name for the object to delete. -type ObjectIdentifier struct { +// DeletedObject objects deleted +type DeletedObject struct { + DeleteMarker bool `xml:"DeleteMarker"` + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` + ObjectName string `xml:"Key,omitempty"` + VersionID string `xml:"VersionId,omitempty"` +} + +// ObjectToDelete carries key name for the object to delete. +type ObjectToDelete struct { ObjectName string `xml:"Key"` + VersionID string `xml:"VersionId"` } // createBucketConfiguration container for bucket configuration request from client. @@ -37,5 +46,5 @@ type DeleteObjectsRequest struct { // Element to enable quiet mode for the request Quiet bool // List of objects to be deleted - Objects []ObjectIdentifier `xml:"Object"` + Objects []ObjectToDelete `xml:"Object"` } diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 3d334d9c5..961735be4 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -36,6 +36,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" ) @@ -538,9 +539,9 @@ var errorCodes = errorCodeMap{ HTTPStatusCode: http.StatusNotFound, }, ErrNoSuchVersion: { - Code: "NoSuchVersion", - Description: "Indicates that the version ID specified in the request does not match an existing version.", - HTTPStatusCode: http.StatusNotFound, + Code: "InvalidArgument", + Description: "Invalid version id specified", + HTTPStatusCode: http.StatusBadRequest, }, ErrNotImplemented: { Code: "NotImplemented", @@ -1782,6 +1783,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrBucketAlreadyOwnedByYou case ObjectNotFound: apiErr = ErrNoSuchKey + case MethodNotAllowed: + apiErr = ErrMethodNotAllowed + case VersionNotFound: + apiErr = ErrNoSuchVersion case ObjectAlreadyExists: apiErr = ErrMethodNotAllowed case ObjectNameInvalid: @@ -1918,6 +1923,12 @@ func toAPIError(ctx context.Context, err error) APIError { e.Error()), HTTPStatusCode: http.StatusBadRequest, } + case versioning.Error: + apiErr = APIError{ + Code: "IllegalVersioningConfigurationException", + Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()), + HTTPStatusCode: http.StatusBadRequest, + } case lifecycle.Error: apiErr = APIError{ Code: "InvalidRequest", diff --git a/cmd/api-headers.go b/cmd/api-headers.go index a9b8031d0..39274d344 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -29,6 +29,7 @@ import ( "github.com/minio/minio/cmd/crypto" xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/pkg/bucket/lifecycle" ) // Returns a hexadecimal representation of time at the @@ -152,5 +153,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp w.Header().Set(xhttp.ContentRange, contentRange) } + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + } + + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil { + ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{ + Name: objInfo.Name, + UserTags: objInfo.UserTags, + VersionID: objInfo.VersionID, + ModTime: objInfo.ModTime, + IsLatest: objInfo.IsLatest, + DeleteMarker: objInfo.DeleteMarker, + }) + if !expiryTime.IsZero() { + w.Header()[xhttp.AmzExpiration] = []string{ + fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID), + } + } + } + return nil } diff --git a/cmd/api-response.go b/cmd/api-response.go index b5bbc1170..846d5be7d 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -81,6 +81,7 @@ type ListVersionsResponse struct { CommonPrefixes []CommonPrefix Versions []ObjectVersion + DeleteMarkers []DeletedVersion // Encoding type used to encode object keys in the response. EncodingType string `xml:"EncodingType,omitempty"` @@ -237,8 +238,22 @@ type Bucket struct { type ObjectVersion struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"` Object - VersionID string `xml:"VersionId"` IsLatest bool + VersionID string `xml:"VersionId"` +} + +// DeletedVersion container for the delete object version metadata. +type DeletedVersion struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker" json:"-"` + + IsLatest bool + Key string + LastModified string // time string of format "2006-01-02T15:04:05.000Z" + + // Owner of the object. + Owner Owner + + VersionID string `xml:"VersionId"` } // StringMap is a map[string]string. @@ -333,9 +348,10 @@ type CompleteMultipartUploadResponse struct { // DeleteError structure. type DeleteError struct { - Code string - Message string - Key string + Code string + Message string + Key string + VersionID string `xml:"VersionId"` } // DeleteObjectsResponse container for multiple object deletes. @@ -343,7 +359,7 @@ type DeleteObjectsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` // Collection of all deleted objects - DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + DeletedObjects []DeletedObject `xml:"Deleted,omitempty"` // Collection of errors deleting certain objects. Errors []DeleteError `xml:"Error,omitempty"` @@ -413,8 +429,9 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { } // generates an ListBucketVersions response for the said bucket with other enumerated options. -func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse { +func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse { var versions []ObjectVersion + var deletedVersions []DeletedVersion var prefixes []CommonPrefix var owner = Owner{} var data = ListVersionsResponse{} @@ -436,15 +453,29 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp } else { content.StorageClass = globalMinioDefaultStorageClass } - content.Owner = owner - content.VersionID = "null" - content.IsLatest = true + content.VersionID = object.VersionID + if content.VersionID == "" { + content.VersionID = nullVersionID + } + content.IsLatest = object.IsLatest versions = append(versions, content) } + for _, deleted := range resp.DeleteObjects { + var dv = DeletedVersion{ + Key: s3EncodeName(deleted.Name, encodingType), + Owner: owner, + LastModified: deleted.ModTime.UTC().Format(iso8601TimeFormat), + VersionID: deleted.VersionID, + IsLatest: deleted.IsLatest, + } + deletedVersions = append(deletedVersions, dv) + } + data.Name = bucket data.Versions = versions + data.DeleteMarkers = deletedVersions data.EncodingType = encodingType data.Prefix = s3EncodeName(prefix, encodingType) data.KeyMarker = s3EncodeName(marker, encodingType) @@ -452,6 +483,8 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp data.MaxKeys = maxKeys data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType) + data.NextVersionIDMarker = resp.NextVersionIDMarker + data.VersionIDMarker = versionIDMarker data.IsTruncated = resp.IsTruncated for _, prefix := range resp.Prefixes { @@ -666,11 +699,14 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult } // generate multi objects delete response. -func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse { +func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse { deleteResp := DeleteObjectsResponse{} if !quiet { deleteResp.DeletedObjects = deletedObjects } + if len(errs) == len(deletedObjects) { + deleteResp.DeletedObjects = nil + } deleteResp.Errors = errs return deleteResp } diff --git a/cmd/api-router.go b/cmd/api-router.go index 4011c9aa5..cfd26e7f7 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -224,9 +224,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) // ListObjectsV2 bucket.Methods(http.MethodGet).HandlerFunc( maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2") - // ListBucketVersions + // ListObjectVersions bucket.Methods(http.MethodGet).HandlerFunc( - maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "") + maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "") // ListObjectsV1 (Legacy) bucket.Methods(http.MethodGet).HandlerFunc( maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))) diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index 20271977b..dd0582a3d 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -18,6 +18,7 @@ package cmd import ( "context" + "path" "time" "github.com/minio/minio/cmd/logger" @@ -29,8 +30,10 @@ import ( // path: 'bucket/' or '/bucket/' => Heal bucket // path: 'bucket/object' => Heal object type healTask struct { - path string - opts madmin.HealOpts + bucket string + object string + versionID string + opts madmin.HealOpts // Healing response will be sent here responseCh chan healResult } @@ -79,17 +82,18 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) { var res madmin.HealResultItem var err error - bucket, object := path2BucketObject(task.path) switch { - case bucket == "" && object == "": + case task.bucket == nopHeal: + continue + case task.bucket == SlashSeparator: res, err = healDiskFormat(ctx, objAPI, task.opts) - case bucket != "" && object == "": - res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove) - case bucket != "" && object != "": - res, err = objAPI.HealObject(ctx, bucket, object, task.opts) + case task.bucket != "" && task.object == "": + res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove) + case task.bucket != "" && task.object != "": + res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts) } - if task.path != slashSeparator && task.path != nopHeal { - ObjectPathUpdated(task.path) + if task.bucket != "" && task.object != "" { + ObjectPathUpdated(path.Join(task.bucket, task.object)) } task.responseCh <- healResult{result: res, err: err} case <-h.doneCh: diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 5bfc86fde..0d1637000 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -33,7 +33,7 @@ func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) { // 1. Only the concerned erasure set will be listed and healed // 2. Only the node hosting the disk is responsible to perform the heal func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) { - z, ok := objAPI.(*xlZones) + z, ok := objAPI.(*erasureZones) if !ok { return } @@ -84,10 +84,10 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) { } // Reformat disks - bgSeq.sourceCh <- healSource{path: SlashSeparator} + bgSeq.sourceCh <- healSource{bucket: SlashSeparator} // Ensure that reformatting disks is finished - bgSeq.sourceCh <- healSource{path: nopHeal} + bgSeq.sourceCh <- healSource{bucket: nopHeal} var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal)) // Compute the list of erasure set to heal diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index c13772872..92cc4a578 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { object := getRandomObjectName() // create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -127,9 +127,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { b.StopTimer() } -// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -143,9 +143,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { runPutObjectPartBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -159,9 +159,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { runPutObjectBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object. +// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object. func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -181,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -190,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // generate etag for the generated data. // etag of the data to written is required as input for PutObject. - // PutObject is the functions which writes the data onto the FS/XL backend. + // PutObject is the functions which writes the data onto the FS/Erasure backend. // get text data generated for number of bytes equal to object size. md5hex := getMD5Hash(textData) @@ -240,9 +240,9 @@ func generateBytesData(size int) []byte { return bytes.Repeat(getRandomByte(), size) } -// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -256,9 +256,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { runGetObjectBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . +// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -278,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -322,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -331,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. - // PutObject is the functions which writes the data onto the FS/XL backend. + // PutObject is the functions which writes the data onto the FS/Erasure backend. md5hex := getMD5Hash([]byte(textData)) sha256hex := "" diff --git a/cmd/bitrot.go b/cmd/bitrot.go index bfdecfbd3..cddc17a96 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -30,25 +30,6 @@ import ( // magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key. var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") -// BitrotAlgorithm specifies a algorithm used for bitrot protection. -type BitrotAlgorithm uint - -const ( - // SHA256 represents the SHA-256 hash function - SHA256 BitrotAlgorithm = 1 + iota - // HighwayHash256 represents the HighwayHash-256 hash function - HighwayHash256 - // HighwayHash256S represents the Streaming HighwayHash-256 hash function - HighwayHash256S - // BLAKE2b512 represents the BLAKE2b-512 hash function - BLAKE2b512 -) - -// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection. -const ( - DefaultBitrotAlgorithm = HighwayHash256S -) - var bitrotAlgorithms = map[BitrotAlgorithm]string{ SHA256: "sha256", BLAKE2b512: "blake2b", diff --git a/cmd/bitrot_test.go b/cmd/bitrot_test.go index 78cf9dc5c..a31b909f0 100644 --- a/cmd/bitrot_test.go +++ b/cmd/bitrot_test.go @@ -34,7 +34,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) { volume := "testvol" filePath := "testfile" - disk, err := newPosix(tmpDir, "") + disk, err := newXLStorage(tmpDir, "") if err != nil { t.Fatal(err) } diff --git a/cmd/bucket-encryption.go b/cmd/bucket-encryption.go index b5504c909..2c2ab1606 100644 --- a/cmd/bucket-encryption.go +++ b/cmd/bucket-encryption.go @@ -55,5 +55,6 @@ func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) { if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 { return encConfig, nil } + return nil, errors.New("Unsupported bucket encryption configuration") } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index d6ae21bea..df6102607 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -45,9 +45,8 @@ import ( ) const ( - getBucketVersioningResponse = `` - objectLockConfig = "object-lock.xml" - bucketTaggingConfigFile = "tagging.xml" + objectLockConfig = "object-lock.xml" + bucketTaggingConfigFile = "tagging.xml" ) // Check if there are buckets on server without corresponding entry in etcd backend and @@ -382,75 +381,86 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, deleteObjectsFn = api.CacheAPI().DeleteObjects } - var objectsToDelete = map[string]int{} + var objectsToDelete = map[ObjectToDelete]int{} getObjectInfoFn := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfoFn = api.CacheAPI().GetObjectInfo } - var dErrs = make([]APIErrorCode, len(deleteObjects.Objects)) - + dErrs := make([]DeleteError, len(deleteObjects.Objects)) for index, object := range deleteObjects.Objects { - if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone { - if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r)) + if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone { + if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r)) return } + apiErr := errorCodes.ToAPIErr(apiErrCode) + dErrs[index] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: object.ObjectName, + VersionID: object.VersionID, + } continue } - if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { - if apiErr := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); apiErr != ErrNone { - dErrs[index] = apiErr - continue + if object.VersionID != "" { + if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { + if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone { + apiErr := errorCodes.ToAPIErr(apiErrCode) + dErrs[index] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: object.ObjectName, + VersionID: object.VersionID, + } + continue + } } } // Avoid duplicate objects, we use map to filter them out. - if _, ok := objectsToDelete[object.ObjectName]; !ok { - objectsToDelete[object.ObjectName] = index + if _, ok := objectsToDelete[object]; !ok { + objectsToDelete[object] = index } } - toNames := func(input map[string]int) (output []string) { - output = make([]string, len(input)) + toNames := func(input map[ObjectToDelete]int) (output []ObjectToDelete) { + output = make([]ObjectToDelete, len(input)) idx := 0 - for name := range input { - output[idx] = name + for obj := range input { + output[idx] = obj idx++ } return } deleteList := toNames(objectsToDelete) - errs, err := deleteObjectsFn(ctx, bucket, deleteList) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } + dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{ + Versioned: globalBucketVersioningSys.Enabled(bucket), + }) - for i, objName := range deleteList { - dIdx := objectsToDelete[objName] - dErrs[dIdx] = toAPIErrorCode(ctx, errs[i]) - } - - // Collect deleted objects and errors if any. - var deletedObjects []ObjectIdentifier - var deleteErrors []DeleteError - for index, errCode := range dErrs { - object := deleteObjects.Objects[index] - // Success deleted objects are collected separately. - if errCode == ErrNone || errCode == ErrNoSuchKey { - deletedObjects = append(deletedObjects, object) + deletedObjects := make([]DeletedObject, len(deleteObjects.Objects)) + for i := range errs { + dindex := objectsToDelete[deleteList[i]] + apiErr := toAPIError(ctx, errs[i]) + if apiErr.Code == "" || apiErr.Code == "NoSuchKey" { + deletedObjects[dindex] = dObjects[i] continue } - apiErr := getAPIError(errCode) - // Error during delete should be collected separately. - deleteErrors = append(deleteErrors, DeleteError{ - Code: apiErr.Code, - Message: apiErr.Description, - Key: object.ObjectName, - }) + dErrs[dindex] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: deleteList[i].ObjectName, + VersionID: deleteList[i].VersionID, + } + } + + var deleteErrors []DeleteError + for _, dErr := range dErrs { + if dErr.Code != "" { + deleteErrors = append(deleteErrors, dErr) + } } // Generate response @@ -462,12 +472,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Notify deleted event for objects. for _, dobj := range deletedObjects { + objInfo := ObjectInfo{ + Name: dobj.ObjectName, + VersionID: dobj.VersionID, + } + if dobj.DeleteMarker { + objInfo = ObjectInfo{ + Name: dobj.ObjectName, + DeleteMarker: dobj.DeleteMarker, + VersionID: dobj.DeleteMarkerVersionID, + } + } sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: dobj.ObjectName, - }, + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: objInfo, ReqParams: extractReqParams(r), RespElements: extractRespElements(w), UserAgent: r.UserAgent(), @@ -522,12 +541,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req return } + opts := BucketOptions{ + Location: location, + LockEnabled: objectLockEnabled, + } + if globalDNSConfig != nil { sr, err := globalDNSConfig.Get(bucket) if err != nil { if err == dns.ErrNoEntriesFound { // Proceed to creating a bucket. - if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled); err != nil { + if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -565,7 +589,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } // Proceed to creating a bucket. - err := objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled) + err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -797,9 +821,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - location := getObjectLocation(r, globalDomainNames, bucket, object) + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} - w.Header().Set(xhttp.Location, location) + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + } + + w.Header().Set(xhttp.Location, getObjectLocation(r, globalDomainNames, bucket, object)) // Notify object created event. defer sendEvent(eventArgs{ @@ -826,9 +858,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h Bucket: objInfo.Bucket, Key: objInfo.Name, ETag: `"` + objInfo.ETag + `"`, - Location: location, + Location: w.Header().Get(xhttp.Location), }) - writeResponse(w, http.StatusCreated, resp, "application/xml") + writeResponse(w, http.StatusCreated, resp, mimeXML) case "200": writeSuccessResponseHeadersOnly(w) default: @@ -921,79 +953,30 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. // Attempt to delete bucket. if err := deleteBucket(ctx, bucket, forceDelete); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + if _, ok := err.(BucketNotEmpty); ok && (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) { + apiErr := toAPIError(ctx, err) + apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket." + writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) + } else { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + } return } + globalNotificationSys.DeleteBucketMetadata(ctx, bucket) + if globalDNSConfig != nil { if err := globalDNSConfig.Delete(bucket); err != nil { - // Deleting DNS entry failed, attempt to create the bucket again. - objectAPI.MakeBucketWithLocation(ctx, bucket, "", false) + logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } } - globalNotificationSys.DeleteBucketMetadata(ctx, bucket) - // Write success response. writeSuccessNoContent(w) } -// PutBucketVersioningHandler - PUT Bucket Versioning. -// ---------- -// No-op. Available for API compatibility. -func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "PutBucketVersioning") - - defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r)) - - vars := mux.Vars(r) - bucket := vars["bucket"] - - objectAPI := api.ObjectAPI() - if objectAPI == nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) - return - } - - getBucketInfo := objectAPI.GetBucketInfo - if _, err := getBucketInfo(ctx, bucket); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } - - // Write success response. - writeSuccessResponseHeadersOnly(w) -} - -// GetBucketVersioningHandler - GET Bucket Versioning. -// ---------- -// No-op. Available for API compatibility. -func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "GetBucketVersioning") - - defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r)) - - vars := mux.Vars(r) - bucket := vars["bucket"] - - objectAPI := api.ObjectAPI() - if objectAPI == nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) - return - } - - getBucketInfo := objectAPI.GetBucketInfo - if _, err := getBucketInfo(ctx, bucket); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } - - // Write success response. - writeSuccessResponseXML(w, []byte(getBucketVersioningResponse)) -} - // PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration. // ---------- // Places an Object Lock configuration on the specified bucket. The rule diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 3f4f80706..87e351ac8 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -19,6 +19,7 @@ package cmd import ( "bytes" "encoding/xml" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -28,7 +29,7 @@ import ( "github.com/minio/minio/pkg/auth" ) -// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestRemoveBucketHandler(t *testing.T) { ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"}) } @@ -73,7 +74,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a } } -// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketLocationHandler(t *testing.T) { ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"}) } @@ -217,7 +218,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestHeadBucketHandler(t *testing.T) { ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"}) } @@ -322,7 +323,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup. func TestListMultipartUploadsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"}) } @@ -559,7 +560,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling TestListBucketsHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup. func TestListBucketsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"}) } @@ -653,7 +654,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq) } -// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup. func TestAPIDeleteMultipleObjectsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"}) } @@ -679,14 +680,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa objectNames = append(objectNames, objectName) } - getObjectIdentifierList := func(objectNames []string) (objectIdentifierList []ObjectIdentifier) { + getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) { for _, objectName := range objectNames { - objectIdentifierList = append(objectIdentifierList, ObjectIdentifier{objectName}) + objectList = append(objectList, ObjectToDelete{ + ObjectName: objectName, + }) } - return objectIdentifierList + return objectList } - getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) { + + getDeleteErrorList := func(objects []ObjectToDelete) (deleteErrorList []DeleteError) { for _, obj := range objects { deleteErrorList = append(deleteErrorList, DeleteError{ Code: errorCodes[ErrAccessDenied].Code, @@ -699,22 +703,38 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa } requestList := []DeleteObjectsRequest{ - {Quiet: false, Objects: getObjectIdentifierList(objectNames[:5])}, - {Quiet: true, Objects: getObjectIdentifierList(objectNames[5:])}, + {Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])}, + {Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])}, } // generate multi objects delete response. successRequest0 := encodeResponse(requestList[0]) - successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, requestList[0].Objects, nil) + + deletedObjects := make([]DeletedObject, len(requestList[0].Objects)) + for i := range requestList[0].Objects { + deletedObjects[i] = DeletedObject{ + ObjectName: requestList[0].Objects[i].ObjectName, + } + } + + successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, deletedObjects, nil) encodedSuccessResponse0 := encodeResponse(successResponse0) successRequest1 := encodeResponse(requestList[1]) - successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil) + + deletedObjects = make([]DeletedObject, len(requestList[1].Objects)) + for i := range requestList[0].Objects { + deletedObjects[i] = DeletedObject{ + ObjectName: requestList[1].Objects[i].ObjectName, + } + } + + successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil) encodedSuccessResponse1 := encodeResponse(successResponse1) // generate multi objects delete response for errors. // errorRequest := encodeResponse(requestList[1]) - errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil) + errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil) encodedErrorResponse := encodeResponse(errorResponse) anonRequest := encodeResponse(requestList[0]) @@ -817,6 +837,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa // Verify whether the bucket obtained object is same as the one created. if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) { + fmt.Println(string(testCase.expectedContent), string(actualContent)) t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType) } } diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index fd7c9f6df..01d69978e 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -21,7 +21,6 @@ import ( ) const ( - // Disabled means the lifecycle rule is inactive Disabled = "Disabled" ) diff --git a/cmd/bucket-listobjects-handlers.go b/cmd/bucket-listobjects-handlers.go index ad03b1fd5..567864d30 100644 --- a/cmd/bucket-listobjects-handlers.go +++ b/cmd/bucket-listobjects-handlers.go @@ -49,13 +49,13 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int return ErrNone } -// ListBucketObjectVersions - GET Bucket Object versions +// ListObjectVersions - GET Bucket Object versions // You can use the versions subresource to list metadata about all // of the versions of objects in a bucket. -func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "ListBucketObjectVersions") +func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ListObjectVersions") - defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r)) + defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r)) vars := mux.Vars(r) bucket := vars["bucket"] @@ -74,8 +74,7 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit urlValues := r.URL.Query() // Extract all the listBucketVersions query params to their native values. - // versionIDMarker is ignored here. - prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues) + prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues) if errCode != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r)) return @@ -87,29 +86,29 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit return } - listObjects := objectAPI.ListObjects + listObjectVersions := objectAPI.ListObjectVersions - // Inititate a list objects operation based on the input params. + // Inititate a list object versions operation based on the input params. // On success would return back ListObjectsInfo object to be // marshaled into S3 compatible XML header. - listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys) + listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } - for i := range listObjectsInfo.Objects { - if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) { - listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false) + for i := range listObjectVersionsInfo.Objects { + if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) { + listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false) } - listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize() + listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize() if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } } - response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo) + response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo) // Write success response. writeSuccessResponseXML(w, encodeResponse(response)) diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 55ab43d9f..72d001713 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -28,6 +28,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/sync/errgroup" @@ -111,6 +112,8 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat meta.TaggingConfigXML = configData case objectLockConfig: meta.ObjectLockConfigXML = configData + case bucketVersioningConfig: + meta.VersioningConfigXML = configData case bucketQuotaConfigFile: meta.QuotaConfigJSON = configData default: @@ -147,6 +150,16 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) { return meta, nil } +// GetVersioningConfig returns configured versioning config +// The returned object may not be modified. +func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) { + meta, err := sys.GetConfig(bucket) + if err != nil { + return nil, err + } + return meta.versioningConfig, nil +} + // GetTaggingConfig returns configured tagging config // The returned object may not be modified. func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) { diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 681ebd2d7..0ab18f066 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -32,6 +32,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/madmin" ) @@ -47,6 +48,7 @@ const ( var ( enabledBucketObjectLockConfig = []byte(`Enabled`) + enabledBucketVersioningConfig = []byte(`Enabled`) ) //go:generate msgp -file $GOFILE @@ -64,6 +66,7 @@ type BucketMetadata struct { NotificationConfigXML []byte LifecycleConfigXML []byte ObjectLockConfigXML []byte + VersioningConfigXML []byte EncryptionConfigXML []byte TaggingConfigXML []byte QuotaConfigJSON []byte @@ -73,6 +76,7 @@ type BucketMetadata struct { notificationConfig *event.Config lifecycleConfig *lifecycle.Lifecycle objectLockConfig *objectlock.Config + versioningConfig *versioning.Versioning sseConfig *bucketsse.BucketSSEConfig taggingConfig *tags.Tags quotaConfig *madmin.BucketQuota @@ -87,6 +91,9 @@ func newBucketMetadata(name string) BucketMetadata { XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", }, quotaConfig: &madmin.BucketQuota{}, + versioningConfig: &versioning.Versioning{ + XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", + }, } } @@ -188,6 +195,13 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa b.objectLockConfig = nil } + if len(b.VersioningConfigXML) != 0 { + b.versioningConfig, err = versioning.ParseConfig(bytes.NewReader(b.VersioningConfigXML)) + if err != nil { + return err + } + } + if len(b.QuotaConfigJSON) != 0 { b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON) if err != nil { @@ -244,6 +258,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj case legacyBucketObjectLockEnabledConfigFile: if string(configData) == legacyBucketObjectLockEnabledConfig { b.ObjectLockConfigXML = enabledBucketObjectLockConfig + b.VersioningConfigXML = enabledBucketVersioningConfig b.LockEnabled = false // legacy value unset it // we are only interested in b.ObjectLockConfigXML } @@ -259,6 +274,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj b.TaggingConfigXML = configData case objectLockConfig: b.ObjectLockConfigXML = configData + b.VersioningConfigXML = enabledBucketVersioningConfig case bucketQuotaConfigFile: b.QuotaConfigJSON = configData } diff --git a/cmd/bucket-metadata_gen.go b/cmd/bucket-metadata_gen.go index 5ec12ebdc..a331be67c 100644 --- a/cmd/bucket-metadata_gen.go +++ b/cmd/bucket-metadata_gen.go @@ -66,6 +66,12 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + case "VersioningConfigXML": + z.VersioningConfigXML, err = dc.ReadBytes(z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } case "EncryptionConfigXML": z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML) if err != nil { @@ -97,9 +103,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 10 + // map header, size 11 // write "Name" - err = en.Append(0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = en.Append(0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -168,6 +174,16 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + // write "VersioningConfigXML" + err = en.Append(0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) + if err != nil { + return + } + err = en.WriteBytes(z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } // write "EncryptionConfigXML" err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) if err != nil { @@ -204,9 +220,9 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 10 + // map header, size 11 // string "Name" - o = append(o, 0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "Created" o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64) @@ -226,6 +242,9 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { // string "ObjectLockConfigXML" o = append(o, 0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = msgp.AppendBytes(o, z.ObjectLockConfigXML) + // string "VersioningConfigXML" + o = append(o, 0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) + o = msgp.AppendBytes(o, z.VersioningConfigXML) // string "EncryptionConfigXML" o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = msgp.AppendBytes(o, z.EncryptionConfigXML) @@ -298,6 +317,12 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + case "VersioningConfigXML": + z.VersioningConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } case "EncryptionConfigXML": z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML) if err != nil { @@ -330,6 +355,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BucketMetadata) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) return } diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index 73b103579..f8c7931be 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -52,79 +52,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, return config.ToRetention(), nil } -// Similar to enforceRetentionBypassForDelete but for WebUI -func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode { - opts, err := getOpts(ctx, r, bucket, object) - if err != nil { - return toAPIErrorCode(ctx, err) - } - - oi, err := getObjectInfoFn(ctx, bucket, object, opts) - if err != nil { - return toAPIErrorCode(ctx, err) - } - - lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined) - if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn { - return ErrObjectLocked - } - - ret := objectlock.GetObjectRetentionMeta(oi.UserDefined) - if ret.Mode.Valid() { - switch ret.Mode { - case objectlock.RetCompliance: - // In compliance mode, a protected object version can't be overwritten - // or deleted by any user, including the root user in your AWS account. - // When an object is locked in compliance mode, its retention mode can't - // be changed, and its retention period can't be shortened. Compliance mode - // ensures that an object version can't be overwritten or deleted for the - // duration of the retention period. - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return ErrObjectLocked - } - - if !ret.RetainUntilDate.Before(t) { - return ErrObjectLocked - } - return ErrNone - case objectlock.RetGovernance: - // In governance mode, users can't overwrite or delete an object - // version or alter its lock settings unless they have special - // permissions. With governance mode, you protect objects against - // being deleted by most users, but you can still grant some users - // permission to alter the retention settings or delete the object - // if necessary. You can also use governance mode to test retention-period - // settings before creating a compliance-mode retention period. - // To override or remove governance-mode retention settings, a - // user must have the s3:BypassGovernanceRetention permission - // and must explicitly include x-amz-bypass-governance-retention:true - // as a request header with any request that requires overriding - // governance mode. - byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header) - if !byPassSet { - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return ErrObjectLocked - } - - if !ret.RetainUntilDate.Before(t) { - return ErrObjectLocked - } - - if !govBypassPerms { - return ErrObjectLocked - } - - return ErrNone - } - } - } - return ErrNone -} - // enforceRetentionForDeletion checks if it is appropriate to remove an // object according to locking configuration when this is lifecycle/ bucket quota asking. func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) { @@ -153,14 +80,23 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke // For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR // governance bypass headers are set and user has governance bypass permissions. // Objects in "Compliance" mode can be overwritten only if retention date is past. -func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode { - opts, err := getOpts(ctx, r, bucket, object) +func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode { + opts, err := getOpts(ctx, r, bucket, object.ObjectName) if err != nil { return toAPIErrorCode(ctx, err) } - oi, err := getObjectInfoFn(ctx, bucket, object, opts) + opts.VersionID = object.VersionID + + oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts) if err != nil { + switch err.(type) { + case MethodNotAllowed: // This happens usually for a delete marker + if oi.DeleteMarker { + // Delete marker should be present and valid. + return ErrNone + } + } return toAPIErrorCode(ctx, err) } @@ -219,8 +155,8 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke // https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes // If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention // or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed. - govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object) - govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object) + govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName) + govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object.ObjectName) if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone { return ErrAccessDenied } @@ -331,30 +267,32 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj return mode, retainDate, legalHold, ErrNone } - var objExists bool opts, err := getOpts(ctx, r, bucket, object) if err != nil { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return mode, retainDate, legalHold, ErrObjectLocked - } + if opts.VersionID != "" { + if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil { + r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) - if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil { - objExists = true - r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) - if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } - mode = r.Mode - retainDate = r.RetainUntilDate - legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) - // Disallow overwriting an object on legal hold - if legalHold.Status == objectlock.LegalHoldOn { - return mode, retainDate, legalHold, ErrObjectLocked + t, err := objectlock.UTCNowNTP() + if err != nil { + logger.LogIf(ctx, err) + return mode, retainDate, legalHold, ErrObjectLocked + } + + if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { + return mode, retainDate, legalHold, ErrObjectLocked + } + + mode = r.Mode + retainDate = r.RetainUntilDate + legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) + // Disallow overwriting an object on legal hold + if legalHold.Status == objectlock.LegalHoldOn { + return mode, retainDate, legalHold, ErrObjectLocked + } } } @@ -374,9 +312,6 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj if err != nil { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } - if objExists && retainDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } if retentionPermErr != ErrNone { return mode, retainDate, legalHold, retentionPermErr } @@ -387,16 +322,14 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj if retentionPermErr != ErrNone { return mode, retainDate, legalHold, retentionPermErr } + t, err := objectlock.UTCNowNTP() if err != nil { logger.LogIf(ctx, err) return mode, retainDate, legalHold, ErrObjectLocked } - // AWS S3 just creates a new version of object when an object is being overwritten. - if objExists && retainDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } - if !legalHoldRequested { + + if !legalHoldRequested && retentionCfg.LockEnabled { // inherit retention from bucket configuration return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone } diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index f659d2466..9c3f79290 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -164,7 +164,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht } // Read bucket access policy. - config, err := globalBucketMetadataSys.GetPolicyConfig(bucket) + config, err := globalPolicySys.Get(bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 91c1b5cdf..c5a3579cc 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -92,7 +92,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy { } } -// Wrapper for calling Put Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestPutBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"}) } @@ -102,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string credentials auth.Credentials, t *testing.T) { bucketName1 := fmt.Sprintf("%s-1", bucketName) - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil { t.Fatal(err) } @@ -314,7 +314,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string } -// Wrapper for calling Get Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"}) } @@ -520,7 +520,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling Delete Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestDeleteBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"}) } diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index baec0f42f..6fff203a3 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018,2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,6 +70,16 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ principalType = "User" } + vid := r.URL.Query().Get("versionId") + if vid == "" { + if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil { + vid = u.Query().Get("versionId") + } + if vid == "" { + vid = r.Header.Get(xhttp.AmzCopySourceVersionID) + } + } + args := map[string][]string{ "CurrentTime": {currTime.Format(time.RFC3339)}, "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, @@ -80,6 +90,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ "principaltype": {principalType}, "userid": {username}, "username": {username}, + "versionid": {vid}, } if lc != "" { @@ -142,7 +153,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ return args } -// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy. +// PolicyToBucketAccessPolicy converts a MinIO policy into a minio-go policy data structure. func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) { // Return empty BucketAccessPolicy for empty bucket policy. if bucketPolicy == nil { diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 4fdcf6d3d..84288d166 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -138,7 +138,7 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) { case <-ctx.Done(): return case <-time.NewTimer(bgQuotaInterval).C: - logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI)) + enforceFIFOQuota(ctx, objAPI) } } @@ -146,20 +146,22 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) { // enforceFIFOQuota deletes objects in FIFO order until sufficient objects // have been deleted so as to bring bucket usage within quota -func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { +func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) { // Turn off quota enforcement if data usage info is unavailable. if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff { - return nil + return } buckets, err := objectAPI.ListBuckets(ctx) if err != nil { - return err + logger.LogIf(ctx, err) + return } dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI) if err != nil { - return err + logger.LogIf(ctx, err) + return } for _, binfo := range buckets { @@ -196,7 +198,8 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { // Walk through all objects if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil { - return err + logger.LogIf(ctx, err) + continue } // reuse the fileScorer used by disk cache to score entries by @@ -205,53 +208,61 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { // irrelevant. scorer, err := newFileScorer(toFree, time.Now().Unix(), 1) if err != nil { - return err + logger.LogIf(ctx, err) + continue } rcfg, _ := globalBucketObjectLockSys.Get(bucket) - for obj := range objInfoCh { + if obj.DeleteMarker { + // Delete markers are automatically added for FIFO purge. + scorer.addFileWithObjInfo(obj, 1) + continue + } // skip objects currently under retention if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) { continue } - scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1) + scorer.addFileWithObjInfo(obj, 1) } - var objects []string - numKeys := len(scorer.fileNames()) - for i, key := range scorer.fileNames() { - objects = append(objects, key) + + versioned := globalBucketVersioningSys.Enabled(bucket) + + var objects []ObjectToDelete + numKeys := len(scorer.fileObjInfos()) + for i, obj := range scorer.fileObjInfos() { + objects = append(objects, ObjectToDelete{ + ObjectName: obj.Name, + VersionID: obj.VersionID, + }) if len(objects) < maxDeleteList && (i < numKeys-1) { - // skip deletion until maxObjectList or end of slice + // skip deletion until maxDeleteList or end of slice continue } if len(objects) == 0 { break } + // Deletes a list of objects. - deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects) - if err != nil { - logger.LogIf(ctx, err) - } else { - for i := range deleteErrs { - if deleteErrs[i] != nil { - logger.LogIf(ctx, deleteErrs[i]) - continue - } - // Notify object deleted event. - sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: objects[i], - }, - Host: "Internal: [FIFO-QUOTA-EXPIRY]", - }) + _, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{ + Versioned: versioned, + }) + for i := range deleteErrs { + if deleteErrs[i] != nil { + logger.LogIf(ctx, deleteErrs[i]) + continue } - objects = nil + + // Notify object deleted event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: obj, + Host: "Internal: [FIFO-QUOTA-EXPIRY]", + }) } + objects = nil } } - return nil } diff --git a/cmd/bucket-versioning-handler.go b/cmd/bucket-versioning-handler.go new file mode 100644 index 000000000..5ce740391 --- /dev/null +++ b/cmd/bucket-versioning-handler.go @@ -0,0 +1,128 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/xml" + "io" + "net/http" + + humanize "github.com/dustin/go-humanize" + "github.com/gorilla/mux" + xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" +) + +const ( + bucketVersioningConfig = "versioning.xml" + + // Maximum size of bucket versioning configuration payload sent to the PutBucketVersioningHandler. + maxBucketVersioningConfigSize = 1 * humanize.MiByte +) + +// PutBucketVersioningHandler - PUT Bucket Versioning. +// ---------- +func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "PutBucketVersioning") + + defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) + return + } + + // PutBucketVersioning API requires Content-Md5 + if _, ok := r.Header[xhttp.ContentMD5]; !ok { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r)) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketVersioningAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) + return + } + + v, err := versioning.ParseConfig(io.LimitReader(r.Body, maxBucketVersioningConfigSize)) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + configData, err := xml.Marshal(v) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + if err = globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, configData); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + writeSuccessResponseHeadersOnly(w) +} + +// GetBucketVersioningHandler - GET Bucket Versioning. +// ---------- +func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "GetBucketVersioning") + + defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketVersioningAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) + return + } + + // Check if bucket exists. + if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + config, err := globalBucketVersioningSys.Get(bucket) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + // Write bucket versioning configuration to client + writeSuccessResponseXML(w, configData) + +} diff --git a/cmd/bucket-versioning.go b/cmd/bucket-versioning.go new file mode 100644 index 000000000..55c2e50b7 --- /dev/null +++ b/cmd/bucket-versioning.go @@ -0,0 +1,57 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "github.com/minio/minio/pkg/bucket/versioning" + +// BucketVersioningSys - policy subsystem. +type BucketVersioningSys struct{} + +// Enabled enabled versioning? +func (sys *BucketVersioningSys) Enabled(bucket string) bool { + vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket) + if err != nil { + return false + } + return vc.Enabled() +} + +// Suspended suspended versioning? +func (sys *BucketVersioningSys) Suspended(bucket string) bool { + vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket) + if err != nil { + return false + } + return vc.Suspended() +} + +// Get returns stored bucket policy +func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) { + if globalIsGateway { + objAPI := newObjectLayerFn() + if objAPI == nil { + return nil, errServerNotInitialized + } + return nil, NotImplemented{} + } + return globalBucketMetadataSys.GetVersioningConfig(bucket) +} + +// NewBucketVersioningSys - creates new versioning system. +func NewBucketVersioningSys() *BucketVersioningSys { + return &BucketVersioningSys{} +} diff --git a/cmd/config-common.go b/cmd/config-common.go index 1cc9543d1..1bb1a202a 100644 --- a/cmd/config-common.go +++ b/cmd/config-common.go @@ -50,7 +50,7 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b } func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error { - err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile) + _, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{}) if err != nil && isErrObjectNotFound(err) { return errConfigNotFound } diff --git a/cmd/config-current.go b/cmd/config-current.go index 8ce9ceaef..4c7352ecf 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -59,7 +59,7 @@ func initHelp() { for k, v := range notify.DefaultNotificationKVS { kvs[k] = v } - if globalIsXL { + if globalIsErasure { kvs[config.StorageClassSubSys] = storageclass.DefaultKVS } config.RegisterDefaultKVS(kvs) @@ -168,7 +168,7 @@ func initHelp() { }, } - if globalIsXL { + if globalIsErasure { helpSubSys = append(helpSubSys, config.HelpKV{}) copy(helpSubSys[2:], helpSubSys[1:]) helpSubSys[1] = config.HelpKV{ @@ -232,9 +232,9 @@ func validateConfig(s config.Config) error { return err } - if globalIsXL { + if globalIsErasure { if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], - globalXLSetDriveCount); err != nil { + globalErasureSetDriveCount); err != nil { return err } } @@ -367,9 +367,9 @@ func lookupConfigs(s config.Config) { globalAPIConfig.init(apiConfig) - if globalIsXL { + if globalIsErasure { globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], - globalXLSetDriveCount) + globalErasureSetDriveCount) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) } diff --git a/cmd/config.go b/cmd/config.go index 199ece926..a06addb58 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -92,7 +92,8 @@ func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData b func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error { historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix) - return objAPI.DeleteObject(ctx, minioMetaBucket, historyFile) + _, err := objAPI.DeleteObject(ctx, minioMetaBucket, historyFile, ObjectOptions{}) + return err } func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) { diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 103a892d1..81f8438d6 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -45,7 +45,7 @@ func mustGetNodeName(endpointZones EndpointZones) (nodeName string) { if err != nil { logger.FatalIf(err, "Unable to start console logging subsystem") } - if globalIsDistXL { + if globalIsDistErasure { nodeName = host.Name } return nodeName diff --git a/cmd/copy-part-range.go b/cmd/copy-part-range.go index ed9eca390..8c1f87d87 100644 --- a/cmd/copy-part-range.go +++ b/cmd/copy-part-range.go @@ -32,7 +32,9 @@ func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser) return default: - writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser) + apiErr := errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource) + apiErr.Description = err.Error() + writeErrorResponse(ctx, w, apiErr, url, browser) return } } diff --git a/cmd/data-crawler.go b/cmd/data-crawler.go index 46b1e405d..08a84c4de 100644 --- a/cmd/data-crawler.go +++ b/cmd/data-crawler.go @@ -28,7 +28,6 @@ import ( "time" "github.com/minio/minio/cmd/config" - xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/color" @@ -512,7 +511,6 @@ func (i *crawlItem) transformMetaDir() { type actionMeta struct { oi ObjectInfo trustOI bool // Set true if oi can be trusted and has been read with quorum. - meta map[string]string } // applyActions will apply lifecycle checks on to a scanned item. @@ -528,7 +526,16 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action return size } - action := i.lifeCycle.ComputeAction(i.objectPath(), meta.meta[xhttp.AmzObjectTagging], meta.oi.ModTime) + versionID := meta.oi.VersionID + action := i.lifeCycle.ComputeAction( + lifecycle.ObjectOpts{ + Name: i.objectPath(), + UserTags: meta.oi.UserTags, + ModTime: meta.oi.ModTime, + VersionID: meta.oi.VersionID, + DeleteMarker: meta.oi.DeleteMarker, + IsLatest: meta.oi.IsLatest, + }) if i.debug { logger.Info(color.Green("applyActions:")+" lifecycle: %q, Initial scan: %v", i.objectPath(), action) } @@ -542,19 +549,42 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action // These (expensive) operations should only run on items we are likely to delete. // Load to ensure that we have the correct version and not an unsynced version. if !meta.trustOI { - obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{}) + obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{ + VersionID: versionID, + }) if err != nil { - // Do nothing - heal in the future. - logger.LogIf(ctx, err) - return size + switch err.(type) { + case MethodNotAllowed: // This happens usually for a delete marker + if !obj.DeleteMarker { // if this is not a delete marker log and return + // Do nothing - heal in the future. + logger.LogIf(ctx, err) + return size + } + case ObjectNotFound: + // object not found return 0 + return 0 + default: + // All other errors proceed. + logger.LogIf(ctx, err) + return size + } } size = obj.Size // Recalculate action. - action = i.lifeCycle.ComputeAction(i.objectPath(), obj.UserTags, obj.ModTime) + action = i.lifeCycle.ComputeAction( + lifecycle.ObjectOpts{ + Name: i.objectPath(), + UserTags: obj.UserTags, + ModTime: obj.ModTime, + VersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + IsLatest: obj.IsLatest, + }) if i.debug { logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action) } + versionID = obj.VersionID switch action { case lifecycle.DeleteAction: default: @@ -563,7 +593,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action } } - err = o.DeleteObject(ctx, i.bucket, i.objectPath()) + obj, err := o.DeleteObject(ctx, i.bucket, i.objectPath(), ObjectOptions{VersionID: versionID}) if err != nil { // Assume it is still there. logger.LogIf(ctx, err) @@ -574,10 +604,8 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action sendEvent(eventArgs{ EventName: event.ObjectRemovedDelete, BucketName: i.bucket, - Object: ObjectInfo{ - Name: i.objectPath(), - }, - Host: "Internal: [ILM-EXPIRY]", + Object: obj, + Host: "Internal: [ILM-EXPIRY]", }) return 0 } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 6eb8cd4a3..d73fa5542 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -60,7 +60,7 @@ type CacheChecksumInfoV1 struct { // Represents the cache metadata struct type cacheMeta struct { Version string `json:"version"` - Stat statInfo `json:"stat"` // Stat of the current object `cache.json`. + Stat StatInfo `json:"stat"` // Stat of the current object `cache.json`. // checksums of blocks on disk. Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` @@ -553,7 +553,7 @@ func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Rea } f, err := os.Create(filePath) if err != nil { - return 0, osErrToFSFileErr(err) + return 0, osErrToFileErr(err) } defer f.Close() diff --git a/cmd/disk-cache-utils.go b/cmd/disk-cache-utils.go index 1987e21e9..335af10fc 100644 --- a/cmd/disk-cache-utils.go +++ b/cmd/disk-cache-utils.go @@ -187,12 +187,12 @@ func readCacheFileStream(filePath string, offset, length int64) (io.ReadCloser, fr, err := os.Open(filePath) if err != nil { - return nil, osErrToFSFileErr(err) + return nil, osErrToFileErr(err) } // Stat to get the size of the file at path. st, err := fr.Stat() if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) return nil, err } @@ -298,9 +298,10 @@ type fileScorer struct { } type queuedFile struct { - name string - size uint64 - score float64 + name string + versionID string + size uint64 + score float64 } // newFileScorer allows to collect files to save a specific number of bytes. @@ -321,15 +322,33 @@ func newFileScorer(saveBytes uint64, now int64, maxHits int) (*fileScorer, error return &f, nil } -func (f *fileScorer) addFile(name string, lastAccess time.Time, size int64, hits int) { +func (f *fileScorer) addFile(name string, accTime time.Time, size int64, hits int) { + f.addFileWithObjInfo(ObjectInfo{ + Name: name, + AccTime: accTime, + Size: size, + }, hits) +} + +func (f *fileScorer) addFileWithObjInfo(objInfo ObjectInfo, hits int) { // Calculate how much we want to delete this object. file := queuedFile{ - name: name, - size: uint64(size), + name: objInfo.Name, + versionID: objInfo.VersionID, + size: uint64(objInfo.Size), } - score := float64(f.now - lastAccess.Unix()) + + var score float64 + if objInfo.ModTime.IsZero() { + // Mod time is not available with disk cache use atime. + score = float64(f.now - objInfo.AccTime.Unix()) + } else { + // if not used mod time when mod time is available. + score = float64(f.now - objInfo.ModTime.Unix()) + } + // Size as fraction of how much we want to save, 0->1. - szWeight := math.Max(0, (math.Min(1, float64(size)*f.sizeMult))) + szWeight := math.Max(0, (math.Min(1, float64(file.size)*f.sizeMult))) // 0 at f.maxHits, 1 at 0. hitsWeight := (1.0 - math.Max(0, math.Min(1.0, float64(hits)/float64(f.maxHits)))) file.score = score * (1 + 0.25*szWeight + 0.25*hitsWeight) @@ -404,6 +423,22 @@ func (f *fileScorer) trimQueue() { } } +// fileObjInfos returns all queued file object infos +func (f *fileScorer) fileObjInfos() []ObjectInfo { + res := make([]ObjectInfo, 0, f.queue.Len()) + e := f.queue.Front() + for e != nil { + qfile := e.Value.(queuedFile) + res = append(res, ObjectInfo{ + Name: qfile.name, + Size: int64(qfile.size), + VersionID: qfile.versionID, + }) + e = e.Next() + } + return res +} + // fileNames returns all queued file names. func (f *fileScorer) fileNames() []string { res := make([]string, 0, f.queue.Len()) diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 742d357e9..b9bbd11ef 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -51,8 +51,8 @@ type CacheObjectLayer interface { // Object operations. GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObject(ctx context.Context, bucket, object string) error - DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) + DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) // Storage operations. @@ -78,8 +78,7 @@ type cacheObjects struct { GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObjectFn func(ctx context.Context, bucket, object string) error - DeleteObjectsFn func(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) } @@ -120,8 +119,8 @@ func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *disk } // DeleteObject clears cache entry if backend delete operation succeeds -func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { - if err = c.DeleteObjectFn(ctx, bucket, object); err != nil { +func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + if objInfo, err = c.DeleteObjectFn(ctx, bucket, object, opts); err != nil { return } if c.isCacheExclude(bucket, object) || c.skipCache() { @@ -130,19 +129,38 @@ func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) dcache, cerr := c.getCacheLoc(bucket, object) if cerr != nil { - return + return objInfo, cerr } dcache.Delete(ctx, bucket, object) return } // DeleteObjects batch deletes objects in slice, and clears any cached entries -func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + objInfos := make([]ObjectInfo, len(objects)) for idx, object := range objects { - errs[idx] = c.DeleteObject(ctx, bucket, object) + opts.VersionID = object.VersionID + objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts) } - return errs, nil + deletedObjects := make([]DeletedObject, len(objInfos)) + for idx := range errs { + if errs[idx] != nil { + continue + } + if objInfos[idx].DeleteMarker { + deletedObjects[idx] = DeletedObject{ + DeleteMarker: objInfos[idx].DeleteMarker, + DeleteMarkerVersionID: objInfos[idx].VersionID, + } + continue + } + deletedObjects[idx] = DeletedObject{ + ObjectName: objInfos[idx].Name, + VersionID: objInfos[idx].VersionID, + } + } + return deletedObjects, errs } // construct a metadata k-v map @@ -649,15 +667,8 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) }, - DeleteObjectFn: func(ctx context.Context, bucket, object string) error { - return newObjectLayerFn().DeleteObject(ctx, bucket, object) - }, - DeleteObjectsFn: func(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = newObjectLayerFn().DeleteObject(ctx, bucket, object) - } - return errs, nil + DeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { + return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts) }, PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts) diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index 789f40e5e..5900736d7 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -31,6 +31,7 @@ import ( "strconv" "strings" + "github.com/google/uuid" "github.com/minio/minio-go/v6/pkg/encrypt" "github.com/minio/minio/cmd/crypto" "github.com/minio/minio/cmd/logger" @@ -82,7 +83,7 @@ func isEncryptedMultipart(objInfo ObjectInfo) bool { } } // Further check if this object is uploaded using multipart mechanism - // by the user and it is not about XL internally splitting the + // by the user and it is not about Erasure internally splitting the // object into parts in PutObject() return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32) } @@ -859,6 +860,7 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str var clientKey [32]byte var sse encrypt.ServerSide + opts = ObjectOptions{UserDefined: metadata} if copySource { if crypto.SSECopy.IsRequested(header) { clientKey, err = crypto.SSECopy.ParseHTTP(header) @@ -868,7 +870,8 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { return } - return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse), UserDefined: metadata}, nil + opts.ServerSideEncryption = encrypt.SSECopy(sse) + return } return } @@ -881,12 +884,13 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { return } - return ObjectOptions{ServerSideEncryption: sse, UserDefined: metadata}, nil + opts.ServerSideEncryption = sse + return } if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) { - return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil + opts.ServerSideEncryption = encrypt.NewSSE() } - return ObjectOptions{UserDefined: metadata}, nil + return } // get ObjectOptions for GET calls from encryption headers @@ -908,6 +912,19 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec } } + vid := strings.TrimSpace(r.URL.Query().Get("versionId")) + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + logger.LogIf(ctx, err) + return opts, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: vid, + } + } + } + if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { key, err := crypto.SSEC.ParseHTTP(r.Header) if err != nil { @@ -916,7 +933,11 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec derivedKey := deriveClientKey(key, bucket, object) encryption, err = encrypt.NewSSEC(derivedKey[:]) logger.CriticalIf(ctx, err) - return ObjectOptions{ServerSideEncryption: encryption, PartNumber: partNumber}, nil + return ObjectOptions{ + ServerSideEncryption: encryption, + VersionID: vid, + PartNumber: partNumber, + }, nil } // default case of passing encryption headers to backend @@ -925,18 +946,21 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec return opts, err } opts.PartNumber = partNumber + opts.VersionID = vid return opts, nil } // get ObjectOptions for PUT calls from encryption headers and metadata func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) { + versioned := globalBucketVersioningSys.Enabled(bucket) // In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it // is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) { - return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil + return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata, Versioned: versioned}, nil } if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { opts, err = getOpts(ctx, r, bucket, object) + opts.Versioned = versioned opts.UserDefined = metadata return } @@ -949,10 +973,15 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada if err != nil { return ObjectOptions{}, err } - return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata}, nil + return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata, Versioned: versioned}, nil } // default case of passing encryption headers and UserDefined metadata to backend - return getDefaultOpts(r.Header, false, metadata) + opts, err = getDefaultOpts(r.Header, false, metadata) + if err != nil { + return opts, err + } + opts.Versioned = versioned + return opts, nil } // get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata @@ -981,5 +1010,9 @@ func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (O } // default case of passing encryption headers to backend - return getDefaultOpts(r.Header, true, nil) + opts, err := getDefaultOpts(r.Header, false, nil) + if err != nil { + return opts, err + } + return opts, nil } diff --git a/cmd/endpoint.go b/cmd/endpoint.go index bbb39905f..4ac6c357d 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -547,9 +547,9 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints") } - // Return XL setup when all endpoints are path style. + // Return Erasure setup when all endpoints are path style. if endpoints[0].Type() == PathEndpointType { - setupType = XLSetupType + setupType = ErasureSetupType return endpoints, setupType, nil } @@ -614,18 +614,18 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp // All endpoints are pointing to local host if len(endpoints) == localEndpointCount { - // If all endpoints have same port number, Just treat it as distXL setup + // If all endpoints have same port number, Just treat it as distErasure setup // using URL style endpoints. if len(localPortSet) == 1 { if len(localServerHostSet) > 1 { return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips") } - return endpoints, DistXLSetupType, nil + return endpoints, DistErasureSetupType, nil } // Even though all endpoints are local, but those endpoints use different ports. - // This means it is DistXL setup. + // This means it is DistErasure setup. } // Add missing port in all endpoints. @@ -645,7 +645,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp } // Error out if we have less than 2 unique servers. - if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType { + if len(uniqueArgs.ToSlice()) < 2 && setupType == DistErasureSetupType { err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints) return endpoints, setupType, err } @@ -655,7 +655,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp updateDomainIPs(uniqueArgs) } - setupType = DistXLSetupType + setupType = DistErasureSetupType return endpoints, setupType, nil } diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index e562211b4..0aab439bc 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -232,71 +232,71 @@ func TestCreateEndpoints(t *testing.T) { {"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil}, {"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")}, - // XL Setup with PathEndpointType + // Erasure Setup with PathEndpointType {":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234", Endpoints{ Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true}, - }, XLSetupType, nil}, - // DistXL Setup with URLEndpointType + }, ErasureSetupType, nil}, + // DistErasure Setup with URLEndpointType {":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{ Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d2"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d3"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d4"}, IsLocal: true}, - }, DistXLSetupType, nil}, - // DistXL Setup with URLEndpointType having mixed naming to local host. + }, DistErasureSetupType, nil}, + // DistErasure Setup with URLEndpointType having mixed naming to local host. {"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")}, {":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")}, {":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")}, - // DistXL type + // DistErasure type {"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]}, Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]}, Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]}, Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]}, Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]}, Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]}, Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{ Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]}, Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]}, Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]}, Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{ Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]}, Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]}, Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]}, Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{ Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]}, Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]}, Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]}, Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, - // DistXL Setup using only local host. + // DistErasure Setup using only local host. {":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{ Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]}, Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]}, Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]}, Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, } for _, testCase := range testCases { diff --git a/cmd/xl-v1-bucket.go b/cmd/erasure-bucket.go similarity index 84% rename from cmd/xl-v1-bucket.go rename to cmd/erasure-bucket.go index 5c1cf6a38..a9622ad27 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/erasure-bucket.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ import ( "github.com/minio/minio-go/v6/pkg/s3utils" "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/sync/errgroup" ) @@ -35,13 +34,13 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) /// Bucket operations // MakeBucket - make a bucket. -func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { // Verify if bucket is valid. if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil { return BucketNameInvalid{Bucket: bucket} } - storageDisks := xl.getDisks() + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -86,9 +85,9 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) { } // getBucketInfo - returns the BucketInfo from one of the load balanced disks. -func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { +func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { var bucketErrs []error - for _, disk := range xl.getLoadBalancedDisks() { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { bucketErrs = append(bucketErrs, errDiskNotFound) continue @@ -110,13 +109,13 @@ func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucke // reduce to one error based on read quorum. // `nil` is deliberately passed for ignoredErrs // because these errors were already ignored. - readQuorum := getReadQuorum(len(xl.getDisks())) + readQuorum := getReadQuorum(len(er.getDisks())) return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum) } // GetBucketInfo - returns BucketInfo for a bucket. -func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { - bucketInfo, err := xl.getBucketInfo(ctx, bucket) +func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { + bucketInfo, err := er.getBucketInfo(ctx, bucket) if err != nil { return bi, toObjectErr(err, bucket) } @@ -124,8 +123,8 @@ func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucket } // listBuckets - returns list of all buckets from a disk picked at random. -func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) { - for _, disk := range xl.getLoadBalancedDisks() { +func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { continue } @@ -161,8 +160,8 @@ func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, } // ListBuckets - lists all the buckets, sorted by its name. -func (xl xlObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { - bucketInfos, err := xl.listBuckets(ctx) +func (er erasureObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + bucketInfos, err := er.listBuckets(ctx) if err != nil { return nil, toObjectErr(err) } @@ -196,9 +195,9 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs } // DeleteBucket - deletes a bucket. -func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { // Collect if all disks report volume not found. - storageDisks := xl.getDisks() + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -235,7 +234,7 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete writeQuorum := getWriteQuorum(len(storageDisks)) err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum) - if err == errXLWriteQuorum { + if err == errErasureWriteQuorum { undoDeleteBucket(storageDisks, bucket) } if err != nil { @@ -251,25 +250,26 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (xl xlObjects) IsNotificationSupported() bool { +func (er erasureObjects) IsNotificationSupported() bool { return true } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (xl xlObjects) IsListenBucketSupported() bool { +func (er erasureObjects) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (xl xlObjects) IsEncryptionSupported() bool { +func (er erasureObjects) IsEncryptionSupported() bool { return true } // IsCompressionSupported returns whether compression is applicable for this layer. -func (xl xlObjects) IsCompressionSupported() bool { +func (er erasureObjects) IsCompressionSupported() bool { return true } -func (xl xlObjects) IsTaggingSupported() bool { +// IsTaggingSupported indicates whether erasureObjects implements tagging support. +func (er erasureObjects) IsTaggingSupported() bool { return true } diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go new file mode 100644 index 000000000..b0a4cb40a --- /dev/null +++ b/cmd/erasure-coding.go @@ -0,0 +1,143 @@ +/* + * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "sync" + + "github.com/klauspost/reedsolomon" + "github.com/minio/minio/cmd/logger" +) + +// Erasure - erasure encoding details. +type Erasure struct { + encoder func() reedsolomon.Encoder + dataBlocks, parityBlocks int + blockSize int64 +} + +// NewErasure creates a new ErasureStorage. +func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { + // Check the parameters for sanity now. + if dataBlocks <= 0 || parityBlocks <= 0 { + return e, reedsolomon.ErrInvShardNum + } + + if dataBlocks+parityBlocks > 256 { + return e, reedsolomon.ErrMaxShardNum + } + + e = Erasure{ + dataBlocks: dataBlocks, + parityBlocks: parityBlocks, + blockSize: blockSize, + } + + // Encoder when needed. + var enc reedsolomon.Encoder + var once sync.Once + e.encoder = func() reedsolomon.Encoder { + once.Do(func() { + e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize()))) + if err != nil { + // Error conditions should be checked above. + panic(err) + } + enc = e + }) + return enc + } + return +} + +// EncodeData encodes the given data and returns the erasure-coded data. +// It returns an error if the erasure coding failed. +func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { + if len(data) == 0 { + return make([][]byte, e.dataBlocks+e.parityBlocks), nil + } + encoded, err := e.encoder().Split(data) + if err != nil { + logger.LogIf(ctx, err) + return nil, err + } + if err = e.encoder().Encode(encoded); err != nil { + logger.LogIf(ctx, err) + return nil, err + } + return encoded, nil +} + +// DecodeDataBlocks decodes the given erasure-coded data. +// It only decodes the data blocks but does not verify them. +// It returns an error if the decoding failed. +func (e *Erasure) DecodeDataBlocks(data [][]byte) error { + var isZero = 0 + for _, b := range data[:] { + if len(b) == 0 { + isZero++ + break + } + } + if isZero == 0 || isZero == len(data) { + // If all are zero, payload is 0 bytes. + return nil + } + return e.encoder().ReconstructData(data) +} + +// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. +// It returns an error if the decoding failed. +func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { + if err := e.encoder().Reconstruct(data); err != nil { + logger.LogIf(ctx, err) + return err + } + return nil +} + +// ShardSize - returns actual shared size from erasure blockSize. +func (e *Erasure) ShardSize() int64 { + return ceilFrac(e.blockSize, int64(e.dataBlocks)) +} + +// ShardFileSize - returns final erasure size from original size. +func (e *Erasure) ShardFileSize(totalLength int64) int64 { + if totalLength == 0 { + return 0 + } + if totalLength == -1 { + return -1 + } + numShards := totalLength / e.blockSize + lastBlockSize := totalLength % int64(e.blockSize) + lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks)) + return numShards*e.ShardSize() + lastShardSize +} + +// ShardFileOffset - returns the effective offset where erasure reading begins. +func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 { + shardSize := e.ShardSize() + shardFileSize := e.ShardFileSize(totalLength) + endShard := (startOffset + int64(length)) / e.blockSize + tillOffset := endShard*shardSize + shardSize + if tillOffset > shardFileSize { + tillOffset = shardFileSize + } + return tillOffset +} diff --git a/cmd/xl-v1-common.go b/cmd/erasure-common.go similarity index 72% rename from cmd/xl-v1-common.go rename to cmd/erasure-common.go index be826dee0..48218f1d4 100644 --- a/cmd/xl-v1-common.go +++ b/cmd/erasure-common.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,8 +24,8 @@ import ( ) // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. -func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { - disks := xl.getDisks() +func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { + disks := er.getDisks() // Based on the random shuffling return back randomized disks. for _, i := range hashOrder(UTCNow().String(), len(disks)) { newDisks = append(newDisks, disks[i-1]) @@ -36,13 +36,13 @@ func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { // This function does the following check, suppose // object is "a/b/c/d", stat makes sure that objects ""a/b/c"" // "a/b" and "a" do not exist. -func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { +func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { var isParentDirObject func(string) bool isParentDirObject = func(p string) bool { if p == "." || p == SlashSeparator { return false } - if xl.isObject(bucket, p) { + if er.isObject(ctx, bucket, p) { // If there is already a file at prefix "p", return true. return true } @@ -53,9 +53,9 @@ func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string } // isObject - returns `true` if the prefix is an object i.e if -// `xl.json` exists at the leaf, false otherwise. -func (xl xlObjects) isObject(bucket, prefix string) (ok bool) { - storageDisks := xl.getDisks() +// `xl.meta` exists at the leaf, false otherwise. +func (er erasureObjects) isObject(ctx context.Context, bucket, prefix string) (ok bool) { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -66,22 +66,15 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) { return errDiskNotFound } // Check if 'prefix' is an object on this 'disk', else continue the check the next disk - fi, err := storageDisks[index].StatFile(bucket, pathJoin(prefix, xlMetaJSONFile)) - if err != nil { - return err - } - if fi.Size == 0 { - return errCorruptedFormat - } - return nil + return storageDisks[index].CheckFile(bucket, prefix) }, index) } - // NOTE: Observe we are not trying to read `xl.json` and figure out the actual + // NOTE: Observe we are not trying to read `xl.meta` and figure out the actual // quorum intentionally, but rely on the default case scenario. Actual quorum // verification will happen by top layer by using getObjectInfo() and will be // ignored if necessary. readQuorum := getReadQuorum(len(storageDisks)) - return reduceReadQuorumErrs(GlobalContext, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil + return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil } diff --git a/cmd/xl-v1-common_test.go b/cmd/erasure-common_test.go similarity index 91% rename from cmd/xl-v1-common_test.go rename to cmd/erasure-common_test.go index 876ecb72e..ce2d2e58f 100644 --- a/cmd/xl-v1-common_test.go +++ b/cmd/erasure-common_test.go @@ -24,13 +24,13 @@ import ( ) // Tests for if parent directory is object -func TestXLParentDirIsObject(t *testing.T) { +func TestErasureParentDirIsObject(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, fsDisks, err := prepareXL16(ctx) + obj, fsDisks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Unable to initialize 'XL' object layer.") + t.Fatalf("Unable to initialize 'Erasure' object layer.") } // Remove all disks. @@ -41,7 +41,7 @@ func TestXLParentDirIsObject(t *testing.T) { bucketName := "testbucket" objectName := "object" - if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } objectContent := "12345" @@ -54,7 +54,7 @@ func TestXLParentDirIsObject(t *testing.T) { t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName) } - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] testCases := []struct { parentIsObject bool diff --git a/cmd/erasure-decode.go b/cmd/erasure-decode.go index 2468ab661..ed8abbb16 100644 --- a/cmd/erasure-decode.go +++ b/cmd/erasure-decode.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -191,7 +191,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) { return newBuf, nil } - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } type errDecodeHealRequired struct { diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index f168b3cb1..17beb2ea1 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -132,7 +132,7 @@ func TestErasureDecode(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) + tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) } @@ -163,7 +163,7 @@ func TestErasureDecode(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) + tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) } for j := range disks[:test.offDisks] { @@ -268,7 +268,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(offset, readLen, length) + tillOffset := erasure.ShardFileOffset(offset, readLen, length) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil) @@ -330,7 +330,7 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, if writers[index] == nil { continue } - tillOffset := erasure.ShardFileTillOffset(0, size, size) + tillOffset := erasure.ShardFileOffset(0, size, size) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size, nil); err != nil { diff --git a/cmd/erasure-encode.go b/cmd/erasure-encode.go index f263b579d..d8f9cc65c 100644 --- a/cmd/erasure-encode.go +++ b/cmd/erasure-encode.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/erasure-encode_test.go b/cmd/erasure-encode_test.go index 8fbe2da70..ab923ec1b 100644 --- a/cmd/erasure-encode_test.go +++ b/cmd/erasure-encode_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-v1-errors.go b/cmd/erasure-errors.go similarity index 73% rename from cmd/xl-v1-errors.go rename to cmd/erasure-errors.go index 3dc0260b7..e7b2d366e 100644 --- a/cmd/xl-v1-errors.go +++ b/cmd/erasure-errors.go @@ -18,11 +18,11 @@ package cmd import "errors" -// errXLReadQuorum - did not meet read quorum. -var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online") +// errErasureReadQuorum - did not meet read quorum. +var errErasureReadQuorum = errors.New("Read failed. Insufficient number of disks online") -// errXLWriteQuorum - did not meet write quorum. -var errXLWriteQuorum = errors.New("Write failed. Insufficient number of disks online") +// errErasureWriteQuorum - did not meet write quorum. +var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of disks online") // errNoHealRequired - returned when healing is attempted on a previously healed disks. var errNoHealRequired = errors.New("No healing is required") diff --git a/cmd/erasure-heal_test.go b/cmd/erasure-heal_test.go index 5ab0e23db..4d1fff044 100644 --- a/cmd/erasure-heal_test.go +++ b/cmd/erasure-heal_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ func TestErasureHeal(t *testing.T) { // create some test data setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) if err != nil { - t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) + t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err) } disks := setup.disks erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) diff --git a/cmd/xl-v1-healing-common.go b/cmd/erasure-healing-common.go similarity index 66% rename from cmd/xl-v1-healing-common.go rename to cmd/erasure-healing-common.go index f7daef0bd..61cacc19d 100644 --- a/cmd/xl-v1-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -18,10 +18,8 @@ package cmd import ( "context" - "fmt" "time" - "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/madmin" ) @@ -31,7 +29,7 @@ func commonTime(modTimes []time.Time) (modTime time.Time, count int) { timeOccurenceMap := make(map[time.Time]int) // Ignore the uuid sentinel and count the rest. for _, time := range modTimes { - if time == timeSentinel { + if time.Equal(timeSentinel) { continue } timeOccurenceMap[time]++ @@ -61,45 +59,45 @@ func bootModtimes(diskCount int) []time.Time { return modTimes } -// Extracts list of times from xlMetaV1 slice and returns, skips +// Extracts list of times from FileInfo slice and returns, skips // slice elements which have errors. -func listObjectModtimes(partsMetadata []xlMetaV1, errs []error) (modTimes []time.Time) { +func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time.Time) { modTimes = bootModtimes(len(partsMetadata)) for index, metadata := range partsMetadata { if errs[index] != nil { continue } // Once the file is found, save the uuid saved on disk. - modTimes[index] = metadata.Stat.ModTime + modTimes[index] = metadata.ModTime } return modTimes } // Notes: // There are 5 possible states a disk could be in, -// 1. __online__ - has the latest copy of xl.json - returned by listOnlineDisks +// 1. __online__ - has the latest copy of xl.meta - returned by listOnlineDisks // // 2. __offline__ - err == errDiskNotFound // -// 3. __availableWithParts__ - has the latest copy of xl.json and has all +// 3. __availableWithParts__ - has the latest copy of xl.meta and has all // parts with checksums matching; returned by disksWithAllParts // // 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI // returned by diskWithAllParts is passed for latestDisks. -// - has an old copy of xl.json -// - doesn't have xl.json (errFileNotFound) -// - has the latest xl.json but one or more parts are corrupt +// - has an old copy of xl.meta +// - doesn't have xl.meta (errFileNotFound) +// - has the latest xl.meta but one or more parts are corrupt // -// 5. __missingParts__ - has the latest copy of xl.json but has some parts +// 5. __missingParts__ - has the latest copy of xl.meta but has some parts // missing. This is identified separately since this may need manual // inspection to understand the root cause. E.g, this could be due to // backend filesystem corruption. // listOnlineDisks - returns -// - a slice of disks where disk having 'older' xl.json (or nothing) +// - a slice of disks where disk having 'older' xl.meta (or nothing) // are set to nil. // - latest (in time) of the maximally occurring modTime(s). -func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, modTime time.Time) { +func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) { onlineDisks = make([]StorageAPI, len(disks)) // List all the file commit ids from parts metadata. @@ -110,7 +108,7 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) // Create a new online disks slice, which have common uuid. for index, t := range modTimes { - if t == modTime { + if t.Equal(modTime) { onlineDisks[index] = disks[index] } else { onlineDisks[index] = nil @@ -119,89 +117,67 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) return onlineDisks, modTime } -// Returns the latest updated xlMeta files and error in case of failure. -func getLatestXLMeta(ctx context.Context, partsMetadata []xlMetaV1, errs []error) (xlMetaV1, error) { - +// Returns the latest updated FileInfo files and error in case of failure. +func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []error) (FileInfo, error) { // There should be atleast half correct entries, if not return failure if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2); reducedErr != nil { - return xlMetaV1{}, reducedErr + return FileInfo{}, reducedErr } // List all the file commit ids from parts metadata. modTimes := listObjectModtimes(partsMetadata, errs) - // Count all latest updated xlMeta values + // Count all latest updated FileInfo values var count int - var latestXLMeta xlMetaV1 + var latestFileInfo FileInfo // Reduce list of UUIDs to a single common value - i.e. the last updated Time modTime, _ := commonTime(modTimes) - // Interate through all the modTimes and count the xlMeta(s) with latest time. + // Interate through all the modTimes and count the FileInfo(s) with latest time. for index, t := range modTimes { - if t == modTime && partsMetadata[index].IsValid() { - latestXLMeta = partsMetadata[index] + if t.Equal(modTime) && partsMetadata[index].IsValid() { + latestFileInfo = partsMetadata[index] count++ } } if count < len(partsMetadata)/2 { - return xlMetaV1{}, errXLReadQuorum + return FileInfo{}, errErasureReadQuorum } - return latestXLMeta, nil + return latestFileInfo, nil } // disksWithAllParts - This function needs to be called with // []StorageAPI returned by listOnlineDisks. Returns, // -// - disks which have all parts specified in the latest xl.json. +// - disks which have all parts specified in the latest xl.meta. // // - slice of errors about the state of data files on disk - can have // a not-found error or a hash-mismatch error. -func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket, +func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, errs []error, bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) { availableDisks := make([]StorageAPI, len(onlineDisks)) dataErrs := make([]error, len(onlineDisks)) for i, onlineDisk := range onlineDisks { - if onlineDisk == nil { + if errs[i] != nil { dataErrs[i] = errs[i] continue } + if onlineDisk == nil { + dataErrs[i] = errDiskNotFound + continue + } switch scanMode { case madmin.HealDeepScan: - erasure := partsMetadata[i].Erasure - - // disk has a valid xl.json but may not have all the + // disk has a valid xl.meta but may not have all the // parts. This is considered an outdated disk, since // it needs healing too. - for _, part := range partsMetadata[i].Parts { - checksumInfo := erasure.GetChecksumInfo(part.Number) - partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number)) - err := onlineDisk.VerifyFile(bucket, partPath, erasure.ShardFileSize(part.Size), checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) - if err != nil { - if !IsErr(err, []error{ - errFileNotFound, - errVolumeNotFound, - errFileCorrupt, - }...) { - logger.GetReqInfo(ctx).AppendTags("disk", onlineDisk.String()) - logger.LogIf(ctx, err) - } - dataErrs[i] = err - break - } - } + dataErrs[i] = onlineDisk.VerifyFile(bucket, object, partsMetadata[i]) case madmin.HealNormalScan: - for _, part := range partsMetadata[i].Parts { - partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number)) - _, err := onlineDisk.StatFile(bucket, partPath) - if err != nil { - dataErrs[i] = err - break - } - } + dataErrs[i] = onlineDisk.CheckParts(bucket, object, partsMetadata[i]) } if dataErrs[i] == nil { diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/erasure-healing-common_test.go similarity index 77% rename from cmd/xl-v1-healing-common_test.go rename to cmd/erasure-healing-common_test.go index 7d2df85e1..300f99775 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -95,9 +95,9 @@ func TestListOnlineDisks(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, disks, err := prepareXL16(ctx) + obj, disks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Prepare XL backend failed - %v", err) + t.Fatalf("Prepare Erasure backend failed - %v", err) } defer removeRoots(disks) @@ -141,9 +141,9 @@ func TestListOnlineDisks(t *testing.T) { modTimes: modTimesThreeNone, expectedTime: threeNanoSecs, errs: []error{ - // Disks that have a valid xl.json. + // Disks that have a valid xl.meta. nil, nil, nil, nil, nil, nil, nil, - // Majority of disks don't have xl.json. + // Majority of disks don't have xl.meta. errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errDiskAccessDenied, @@ -156,9 +156,9 @@ func TestListOnlineDisks(t *testing.T) { modTimes: modTimesThreeNone, expectedTime: threeNanoSecs, errs: []error{ - // Disks that have a valid xl.json. + // Disks that have a valid xl.meta. nil, nil, nil, nil, nil, nil, nil, - // Majority of disks don't have xl.json. + // Majority of disks don't have xl.meta. errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errDiskAccessDenied, @@ -170,27 +170,34 @@ func TestListOnlineDisks(t *testing.T) { } bucket := "bucket" + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) + if err != nil { + t.Fatalf("Failed to make a bucket %v", err) + } + object := "object" data := bytes.Repeat([]byte("a"), 1024) - z := obj.(*xlZones) - xlDisks := z.zones[0].sets[0].getDisks() + z := obj.(*erasureZones) + erasureDisks := z.zones[0].sets[0].getDisks() for i, test := range testCases { - // Prepare bucket/object backend for the tests below. - - // Cleanup from previous test. - obj.DeleteObject(GlobalContext, bucket, object) - obj.DeleteBucket(GlobalContext, bucket, false) - - err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "", false) - if err != nil { - t.Fatalf("Failed to make a bucket %v", err) - } - - _, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) + _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) if err != nil { t.Fatalf("Failed to putObject %v", err) } + partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + fi, err := getLatestFileInfo(ctx, partsMetadata, errs) + if err != nil { + t.Fatalf("Failed to getLatestFileInfo %v", err) + } + + for j := range partsMetadata { + if errs[j] != nil { + t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[j]) + } + partsMetadata[j].ModTime = test.modTimes[j] + } + tamperedIndex := -1 switch test._tamperBackend { case deletePart: @@ -199,11 +206,11 @@ func TestListOnlineDisks(t *testing.T) { continue } // Remove a part from a disk - // which has a valid xl.json, + // which has a valid xl.meta, // and check if that disk // appears in outDatedDisks. tamperedIndex = index - dErr := xlDisks[index].DeleteFile(bucket, filepath.Join(object, "part.1")) + dErr := erasureDisks[index].DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if dErr != nil { t.Fatalf("Test %d: Failed to delete %s - %v", i+1, filepath.Join(object, "part.1"), dErr) @@ -216,11 +223,11 @@ func TestListOnlineDisks(t *testing.T) { continue } // Corrupt a part from a disk - // which has a valid xl.json, + // which has a valid xl.meta, // and check if that disk // appears in outDatedDisks. tamperedIndex = index - filePath := pathJoin(xlDisks[index].String(), bucket, object, "part.1") + filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1") f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) @@ -232,27 +239,19 @@ func TestListOnlineDisks(t *testing.T) { } - partsMetadata, errs := readAllXLMetadata(GlobalContext, xlDisks, bucket, object) - for i := range partsMetadata { - if errs[i] != nil { - t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[i].Error()) - } - partsMetadata[i].Stat.ModTime = test.modTimes[i] - } - - onlineDisks, modTime := listOnlineDisks(xlDisks, partsMetadata, test.errs) + onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs) if !modTime.Equal(test.expectedTime) { t.Fatalf("Test %d: Expected modTime to be equal to %v but was found to be %v", i+1, test.expectedTime, modTime) } - availableDisks, newErrs := disksWithAllParts(GlobalContext, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) + availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) test.errs = newErrs if test._tamperBackend != noTamper { if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { t.Fatalf("Test %d: disk (%v) with part.1 missing is not a disk with available data", - i+1, xlDisks[tamperedIndex]) + i+1, erasureDisks[tamperedIndex]) } } @@ -262,9 +261,9 @@ func TestListOnlineDisks(t *testing.T) { func TestDisksWithAllParts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, disks, err := prepareXL16(ctx) + obj, disks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Prepare XL backend failed - %v", err) + t.Fatalf("Prepare Erasure backend failed - %v", err) } defer removeRoots(disks) @@ -273,10 +272,10 @@ func TestDisksWithAllParts(t *testing.T) { // make data with more than one part partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + z := obj.(*erasureZones) + s := z.zones[0].sets[0] + erasureDisks := s.getDisks() + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket %v", err) } @@ -286,22 +285,22 @@ func TestDisksWithAllParts(t *testing.T) { t.Fatalf("Failed to putObject %v", err) } - _, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) - readQuorum := len(xlDisks) / 2 + _, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + readQuorum := len(erasureDisks) / 2 if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { t.Fatalf("Failed to read xl meta data %v", reducedErr) } // Test that all disks are returned without any failures with // unmodified meta data - partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) + partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") if err != nil { t.Fatalf("Failed to read xl meta data %v", err) } - filteredDisks, errs := disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) + filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(xlDisks) { + if len(filteredDisks) != len(erasureDisks) { t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) } @@ -324,7 +323,7 @@ func TestDisksWithAllParts(t *testing.T) { for diskIndex, partName := range diskFailures { for i := range partsMetadata[diskIndex].Erasure.Checksums { if fmt.Sprintf("part.%d", i+1) == partName { - filePath := pathJoin(xlDisks[diskIndex].String(), bucket, object, partName) + filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) @@ -335,10 +334,10 @@ func TestDisksWithAllParts(t *testing.T) { } } - errs = make([]error, len(xlDisks)) - filteredDisks, errs = disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) + errs = make([]error, len(erasureDisks)) + filteredDisks, errs = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(xlDisks) { + if len(filteredDisks) != len(erasureDisks) { t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) } diff --git a/cmd/xl-v1-healing.go b/cmd/erasure-healing.go similarity index 78% rename from cmd/xl-v1-healing.go rename to cmd/erasure-healing.go index 20d9c57a3..9a2c58938 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/erasure-healing.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "sync" "time" "github.com/minio/minio/cmd/logger" @@ -27,12 +28,12 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -func (xl xlObjects) ReloadFormat(ctx context.Context, dryRun bool) error { +func (er erasureObjects) ReloadFormat(ctx context.Context, dryRun bool) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { +func (er erasureObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) return madmin.HealResultItem{}, NotImplemented{} } @@ -40,14 +41,14 @@ func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealRes // Heals a bucket if it doesn't exist on one of the disks, additionally // also heals the missing entries for bucket metadata files // `policy.json, notification.xml, listeners.json`. -func (xl xlObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) ( +func (er erasureObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) ( result madmin.HealResultItem, err error) { if !dryRun { defer ObjectPathUpdated(bucket) } - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // get write quorum for an object writeQuorum := getWriteQuorum(len(storageDisks)) @@ -158,7 +159,6 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints State: afterState[i], }) } - return res, nil } @@ -196,22 +196,22 @@ func listAllBuckets(storageDisks []StorageAPI, healBuckets map[string]VolInfo) ( // Only heal on disks where we are sure that healing is needed. We can expand // this list as and when we figure out more errors can be added to this list safely. -func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime time.Time) bool { - switch xlErr { +func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime time.Time) bool { + switch erErr { case errFileNotFound: return true case errCorruptedFormat: return true } - if xlErr == nil { - // If xl.json was read fine but there may be problem with the part.N files. + if erErr == nil { + // If er.meta was read fine but there may be problem with the part.N files. if IsErr(dataErr, []error{ errFileNotFound, errFileCorrupt, }...) { return true } - if !quorumModTime.Equal(meta.Stat.ModTime) { + if !quorumModTime.Equal(meta.ModTime) { return true } } @@ -219,20 +219,20 @@ func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime t } // Heals an object by re-writing corrupt/missing erasure blocks. -func (xl xlObjects) healObject(ctx context.Context, bucket string, object string, - partsMetadata []xlMetaV1, errs []error, latestXLMeta xlMetaV1, +func (er erasureObjects) healObject(ctx context.Context, bucket string, object string, + partsMetadata []FileInfo, errs []error, latestFileInfo FileInfo, dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) { - dataBlocks := latestXLMeta.Erasure.DataBlocks + dataBlocks := latestFileInfo.Erasure.DataBlocks - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() - // List of disks having latest version of the object xl.json + // List of disks having latest version of the object er.meta // (by modtime). latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) - // List of disks having all parts as per latest xl.json. + // List of disks having all parts as per latest er.meta. availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode) // Initialize heal result object @@ -241,8 +241,8 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string Bucket: bucket, Object: object, DiskCount: len(storageDisks), - ParityBlocks: latestXLMeta.Erasure.ParityBlocks, - DataBlocks: latestXLMeta.Erasure.DataBlocks, + ParityBlocks: latestFileInfo.Erasure.ParityBlocks, + DataBlocks: latestFileInfo.Erasure.DataBlocks, // Initialize object size to -1, so we can detect if we are // unable to reliably find the object size. @@ -263,7 +263,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string numAvailableDisks++ // If data is sane on any one disk, we can // extract the correct object size. - result.ObjectSize = partsMetadata[i].Stat.Size + result.ObjectSize = partsMetadata[i].Size result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks result.DataBlocks = partsMetadata[i].Erasure.DataBlocks case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound: @@ -307,18 +307,18 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string // If less than read quorum number of disks have all the parts // of the data, we can't reconstruct the erasure-coded data. if numAvailableDisks < dataBlocks { - // Check if xl.json, and corresponding parts are also missing. + // Check if er.meta, and corresponding parts are also missing. if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok { writeQuorum := m.Erasure.DataBlocks + 1 if m.Erasure.DataBlocks == 0 { writeQuorum = getWriteQuorum(len(storageDisks)) } if !dryRun && remove { - err = xl.deleteObject(ctx, bucket, object, writeQuorum, false) + err = er.deleteObject(ctx, bucket, object, writeQuorum) } - return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), err + return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), err } - return result, toObjectErr(errXLReadQuorum, bucket, object) + return result, toObjectErr(errErasureReadQuorum, bucket, object) } if disksToHealCount == 0 { @@ -332,32 +332,19 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string return result, nil } - // Latest xlMetaV1 for reference. If a valid metadata is not + // Latest FileInfo for reference. If a valid metadata is not // present, it is as good as object not found. - latestMeta, pErr := pickValidXLMeta(ctx, partsMetadata, modTime, dataBlocks) + latestMeta, pErr := pickValidFileInfo(ctx, partsMetadata, modTime, dataBlocks) if pErr != nil { return result, toObjectErr(pErr, bucket, object) } - // Clear data files of the object on outdated disks - for _, disk := range outDatedDisks { - // Before healing outdated disks, we need to remove - // xl.json and part files from "bucket/object/" so - // that rename(minioMetaBucket, "tmp/tmpuuid/", - // "bucket", "object/") succeeds. - if disk == nil { - // Not an outdated disk. - continue - } - - // List and delete the object directory, - files, derr := disk.ListDir(bucket, object, -1, "") - if derr == nil { - for _, entry := range files { - _ = disk.DeleteFile(bucket, - pathJoin(object, entry)) - } - } + cleanFileInfo := func(fi FileInfo) FileInfo { + // Returns a copy of the 'fi' with checksums and parts nil'ed. + nfi := fi + nfi.Erasure.Checksums = nil + nfi.Parts = nil + return nfi } // Reorder so that we have data disks first and parity disks next. @@ -368,7 +355,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string if outDatedDisks[i] == nil { continue } - partsMetadata[i] = newXLMetaFromXLMeta(latestMeta) + partsMetadata[i] = cleanFileInfo(latestMeta) } // We write at temporary location and then rename to final location. @@ -388,7 +375,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string partSize := latestMeta.Parts[partIndex].Size partActualSize := latestMeta.Parts[partIndex].ActualSize partNumber := latestMeta.Parts[partIndex].Number - tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize) + tillOffset := erasure.ShardFileOffset(0, partSize, partSize) readers := make([]io.ReaderAt, len(latestDisks)) checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm for i, disk := range latestDisks { @@ -396,7 +383,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string continue } checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partNumber) - partPath := pathJoin(object, fmt.Sprintf("part.%d", partNumber)) + partPath := pathJoin(object, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber)) readers[i] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) } writers := make([]io.Writer, len(outDatedDisks)) @@ -404,21 +391,22 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string if disk == OfflineDisk { continue } - partPath := pathJoin(tmpID, fmt.Sprintf("part.%d", partNumber)) - writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, checksumAlgo, erasure.ShardSize()) + partPath := pathJoin(tmpID, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber)) + writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } - hErr := erasure.Heal(ctx, readers, writers, partSize) + err = erasure.Heal(ctx, readers, writers, partSize) closeBitrotReaders(readers) closeBitrotWriters(writers) - if hErr != nil { - return result, toObjectErr(hErr, bucket, object) + if err != nil { + return result, toObjectErr(err, bucket, object) } // outDatedDisks that had write errors should not be // written to for remaining parts, so we nil it out. for i, disk := range outDatedDisks { - if disk == nil { + if disk == OfflineDisk { continue } + // A non-nil stale disk which did not receive // a healed part checksum had a write error. if writers[i] == nil { @@ -426,6 +414,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string disksToHealCount-- continue } + partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize) partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ PartNumber: partNumber, @@ -436,33 +425,31 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string // If all disks are having errors, we give up. if disksToHealCount == 0 { - return result, fmt.Errorf("all disks without up-to-date data had write errors") + return result, fmt.Errorf("all disks had write errors, unable to heal") } } - // Cleanup in case of xl.json writing failure + // Cleanup in case of er.meta writing failure writeQuorum := latestMeta.Erasure.DataBlocks + 1 - defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum) - // Generate and write `xl.json` generated from other disks. - outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, + // Generate and write `xl.meta` generated from other disks. + outDatedDisks, err = writeUniqueFileInfo(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, partsMetadata, diskCount(outDatedDisks)) - if aErr != nil { - return result, toObjectErr(aErr, bucket, object) + if err != nil { + return result, toObjectErr(err, bucket, object) } // Rename from tmp location to the actual location. for _, disk := range outDatedDisks { - if disk == nil { + if disk == OfflineDisk { continue } // Attempt a rename now from healed data to final location. - aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket, - retainSlash(object)) - if aErr != nil { - logger.LogIf(ctx, aErr) - return result, toObjectErr(aErr, bucket, object) + if err = disk.RenameData(minioMetaTmpBucket, tmpID, latestMeta.DataDir, bucket, object); err != nil { + logger.LogIf(ctx, err) + return result, toObjectErr(err, bucket, object) } for i, v := range result.Before.Drives { @@ -473,16 +460,16 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string } // Set the size of the object in the heal result - result.ObjectSize = latestMeta.Stat.Size + result.ObjectSize = latestMeta.Size return result, nil } // healObjectDir - heals object directory specifically, this special call // is needed since we do not have a special backend format for directories. -func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) { - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() +func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) { + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // Initialize heal result object hr = madmin.HealResultItem{ @@ -502,7 +489,19 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr danglingObject := isObjectDirDangling(errs) if danglingObject { if !dryRun && remove { - xl.deleteObject(ctx, bucket, object, hr.DataBlocks+1, true) + var wg sync.WaitGroup + // Remove versions in bulk for each disk + for index, disk := range storageDisks { + if disk == nil { + continue + } + wg.Add(1) + go func(index int, disk StorageAPI) { + defer wg.Done() + _ = disk.DeleteFile(bucket, object) + }(index, disk) + } + wg.Wait() } } @@ -548,7 +547,7 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr // Populates default heal result item entries with possible values when we are returning prematurely. // This is to ensure that in any circumstance we are not returning empty arrays with wrong values. -func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem { +func defaultHealResult(latestFileInfo FileInfo, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem { // Initialize heal result object result := madmin.HealResultItem{ Type: madmin.HealItemObject, @@ -560,8 +559,8 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage // unable to reliably find the object size. ObjectSize: -1, } - if latestXLMeta.IsValid() { - result.ObjectSize = latestXLMeta.Stat.Size + if latestFileInfo.IsValid() { + result.ObjectSize = latestFileInfo.Size } for index, disk := range storageDisks { @@ -595,13 +594,13 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage }) } - if !latestXLMeta.IsValid() { + if !latestFileInfo.IsValid() { // Default to most common configuration for erasure blocks. result.ParityBlocks = getDefaultParityBlocks(len(storageDisks)) result.DataBlocks = getDefaultDataBlocks(len(storageDisks)) } else { - result.ParityBlocks = latestXLMeta.Erasure.ParityBlocks - result.DataBlocks = latestXLMeta.Erasure.DataBlocks + result.ParityBlocks = latestFileInfo.Erasure.ParityBlocks + result.DataBlocks = latestFileInfo.Erasure.DataBlocks } return result @@ -616,7 +615,7 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix } index := index g.Go(func() error { - entries, err := storageDisks[index].ListDir(bucket, prefix, 1, "") + entries, err := storageDisks[index].ListDir(bucket, prefix, 1) if err != nil { return err } @@ -655,23 +654,23 @@ func isObjectDirDangling(errs []error) (ok bool) { // Object is considered dangling/corrupted if any only // if total disks - a combination of corrupted and missing // files is lesser than number of data blocks. -func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (validMeta xlMetaV1, ok bool) { +func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (validMeta FileInfo, ok bool) { // We can consider an object data not reliable - // when xl.json is not found in read quorum disks. - // or when xl.json is not readable in read quorum disks. - var notFoundXLJSON, corruptedXLJSON int + // when er.meta is not found in read quorum disks. + // or when er.meta is not readable in read quorum disks. + var notFoundErasureJSON, corruptedErasureJSON int for _, readErr := range errs { if readErr == errFileNotFound { - notFoundXLJSON++ + notFoundErasureJSON++ } else if readErr == errCorruptedFormat { - corruptedXLJSON++ + corruptedErasureJSON++ } } var notFoundParts int for i := range dataErrs { // Only count part errors, if the error is not - // same as xl.json error. This is to avoid - // double counting when both parts and xl.json + // same as er.meta error. This is to avoid + // double counting when both parts and er.meta // are not available. if errs[i] != dataErrs[i] { if dataErrs[i] == errFileNotFound { @@ -694,11 +693,11 @@ func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (valid } // We have valid meta, now verify if we have enough files with parity blocks. - return validMeta, corruptedXLJSON+notFoundXLJSON+notFoundParts > validMeta.Erasure.ParityBlocks + return validMeta, corruptedErasureJSON+notFoundErasureJSON+notFoundParts > validMeta.Erasure.ParityBlocks } // HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true. -func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) { +func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) { // Create context that also contains information about the object and bucket. // The top level handler might not have this information. reqInfo := logger.GetReqInfo(ctx) @@ -712,14 +711,14 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts // Healing directories handle it separately. if HasSuffix(object, SlashSeparator) { - return xl.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove) + return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove) } - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // Read metadata files from all the disks - partsMetadata, errs := readAllXLMetadata(healCtx, storageDisks, bucket, object) + partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID) // Check if the object is dangling, if yes and user requested // remove we simply delete it from namespace. @@ -729,15 +728,15 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts writeQuorum = getWriteQuorum(len(storageDisks)) } if !opts.DryRun && opts.Remove { - xl.deleteObject(healCtx, bucket, object, writeQuorum, false) + er.deleteObject(healCtx, bucket, object, writeQuorum) } err = reduceReadQuorumErrs(ctx, errs, nil, writeQuorum-1) - return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } - latestXLMeta, err := getLatestXLMeta(healCtx, partsMetadata, errs) + latestFileInfo, err := getLatestFileInfo(healCtx, partsMetadata, errs) if err != nil { - return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } errCount := 0 @@ -751,20 +750,20 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts // Only if we get errors from all the disks we return error. Else we need to // continue to return filled madmin.HealResultItem struct which includes info // on what disks the file is available etc. - if err = reduceReadQuorumErrs(ctx, errs, nil, latestXLMeta.Erasure.DataBlocks); err != nil { + if err = reduceReadQuorumErrs(ctx, errs, nil, latestFileInfo.Erasure.DataBlocks); err != nil { if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok { writeQuorum := m.Erasure.DataBlocks + 1 if m.Erasure.DataBlocks == 0 { writeQuorum = getWriteQuorum(len(storageDisks)) } if !opts.DryRun && opts.Remove { - xl.deleteObject(ctx, bucket, object, writeQuorum, false) + er.deleteObject(ctx, bucket, object, writeQuorum) } } - return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } } // Heal the object. - return xl.healObject(healCtx, bucket, object, partsMetadata, errs, latestXLMeta, opts.DryRun, opts.Remove, opts.ScanMode) + return er.healObject(healCtx, bucket, object, partsMetadata, errs, latestFileInfo, opts.DryRun, opts.Remove, opts.ScanMode) } diff --git a/cmd/xl-v1-healing_test.go b/cmd/erasure-healing_test.go similarity index 53% rename from cmd/xl-v1-healing_test.go rename to cmd/erasure-healing_test.go index 3afd9ed71..a4f1addea 100644 --- a/cmd/xl-v1-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,127 @@ package cmd import ( "bytes" "context" - "path/filepath" + "crypto/rand" + "os" + "path" + "reflect" "testing" + "time" + "github.com/dustin/go-humanize" "github.com/minio/minio/pkg/madmin" ) +// Tests both object and bucket healing. +func TestHealing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal(err) + } + defer removeRoots(fsDirs) + + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + + // Create "bucket" + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) + if err != nil { + t.Fatal(err) + } + + bucket := "bucket" + object := "object" + + data := make([]byte, 1*humanize.MiByte) + length := int64(len(data)) + _, err = rand.Read(data) + if err != nil { + t.Fatal(err) + } + + _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + disk := er.getDisks()[0] + fileInfoPreHeal, err := disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // Remove the object - to simulate the case where the disk was down when the object + // was created. + err = removeAll(pathJoin(disk.String(), bucket, object)) + if err != nil { + t.Fatal(err) + } + + _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + if err != nil { + t.Fatal(err) + } + + fileInfoPostHeal, err := disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // After heal the meta file should be as expected. + if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) { + t.Fatal("HealObject failed") + } + + err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "er.meta")) + if err != nil { + t.Fatal(err) + } + + // Write er.meta with different modtime to simulate the case where a disk had + // gone down when an object was replaced by a new object. + fileInfoOutDated := fileInfoPreHeal + fileInfoOutDated.ModTime = time.Now() + err = disk.WriteMetadata(bucket, object, fileInfoOutDated) + if err != nil { + t.Fatal(err) + } + + _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan}) + if err != nil { + t.Fatal(err) + } + + fileInfoPostHeal, err = disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // After heal the meta file should be as expected. + if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) { + t.Fatal("HealObject failed") + } + + // Remove the bucket - to simulate the case where bucket was + // created when the disk was down. + err = os.RemoveAll(path.Join(fsDirs[0], bucket)) + if err != nil { + t.Fatal(err) + } + // This would create the bucket. + _, err = er.HealBucket(ctx, bucket, false, false) + if err != nil { + t.Fatal(err) + } + // Stat the bucket to make sure that it was created. + _, err = er.getDisks()[0].StatVol(bucket) + if err != nil { + t.Fatal(err) + } +} + func TestHealObjectCorrupted(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -51,7 +166,7 @@ func TestHealObjectCorrupted(t *testing.T) { data := bytes.Repeat([]byte("a"), 5*1024*1024) var opts ObjectOptions - err = objLayer.MakeBucketWithLocation(ctx, bucket, "", false) + err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -81,91 +196,96 @@ func TestHealObjectCorrupted(t *testing.T) { } // Test 1: Remove the object backend files from the first disk. - z := objLayer.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] - err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + z := objLayer.(*erasureZones) + er := z.zones[0].sets[0] + erasureDisks := er.getDisks() + firstDisk := erasureDisks[0] + err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } - _, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) + fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + fi, err := getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected xl.json file to be present but stat failed - %v", err) + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 2: Heal when part.1 is empty - partSt1, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) - if err != nil { - t.Errorf("Expected part.1 file to be present but stat failed - %v", err) + if err = firstDisk.CheckFile(bucket, object); err != nil { + t.Errorf("Expected er.meta file to be present but stat failed - %v", err) } - err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1")) + + err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Errorf("Failure during deleting part.1 - %v", err) } - err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader([]byte{})) + + err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader([]byte{})) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { t.Errorf("Expected nil but received %v", err) } - partSt2, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) + + fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "") + nfi, err := getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) - } - if partSt1.Size != partSt2.Size { - t.Errorf("part.1 file size is not the same before and after heal") + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 3: Heal when part.1 is correct in size but corrupted - partSt1, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) - if err != nil { - t.Errorf("Expected part.1 file to be present but stat failed - %v", err) + if !reflect.DeepEqual(fi, nfi) { + t.Fatalf("FileInfo not equal after healing") } - err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1")) + + err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Errorf("Failure during deleting part.1 - %v", err) } - bdata := bytes.Repeat([]byte("b"), int(partSt1.Size)) - err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader(bdata)) + + bdata := bytes.Repeat([]byte("b"), int(nfi.Size)) + err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader(bdata)) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { t.Errorf("Expected nil but received %v", err) } - partSt2, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) + + fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "") + nfi, err = getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) - } - if partSt1.Size != partSt2.Size { - t.Errorf("part.1 file size is not the same before and after heal") + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 4: checks if HealObject returns an error when xl.json is not found + if !reflect.DeepEqual(fi, nfi) { + t.Fatalf("FileInfo not equal after healing") + } + + // Test 4: checks if HealObject returns an error when xl.meta is not found // in more than read quorum number of disks, to create a corrupted situation. - - for i := 0; i <= len(xl.getDisks())/2; i++ { - xl.getDisks()[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + for i := 0; i <= len(er.getDisks())/2; i++ { + er.getDisks()[i].DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) } // Try healing now, expect to receive errFileNotFound. - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { if _, ok := err.(ObjectNotFound); !ok { t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) } } - // since majority of xl.jsons are not available, object should be successfully deleted. + // since majority of xl.meta's are not available, object should be successfully deleted. _, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) if _, ok := err.(ObjectNotFound); !ok { t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) @@ -173,7 +293,7 @@ func TestHealObjectCorrupted(t *testing.T) { } // Tests healing of object. -func TestHealObjectXL(t *testing.T) { +func TestHealObjectErasure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -196,7 +316,7 @@ func TestHealObjectXL(t *testing.T) { data := bytes.Repeat([]byte("a"), 5*1024*1024) var opts ObjectOptions - err = obj.MakeBucketWithLocation(ctx, bucket, "", false) + err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -220,51 +340,51 @@ func TestHealObjectXL(t *testing.T) { }) } + // Remove the object backend files from the first disk. + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + firstDisk := er.getDisks()[0] + _, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) if err != nil { t.Fatalf("Failed to complete multipart upload - %v", err) } - // Remove the object backend files from the first disk. - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] - err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } - _, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } - _, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) - if err != nil { - t.Errorf("Expected xl.json file to be present but stat failed - %v", err) + if err = firstDisk.CheckFile(bucket, object); err != nil { + t.Errorf("Expected er.meta file to be present but stat failed - %v", err) } - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() - xl.getDisks = func() []StorageAPI { + erasureDisks := er.getDisks() + z.zones[0].erasureDisksMu.Lock() + er.getDisks = func() []StorageAPI { // Nil more than half the disks, to remove write quorum. - for i := 0; i <= len(xlDisks)/2; i++ { - xlDisks[i] = nil + for i := 0; i <= len(erasureDisks)/2; i++ { + erasureDisks[i] = nil } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Try healing now, expect to receive errDiskNotFound. - _, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan}) - // since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum + _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan}) + // since majority of er.meta's are not available, object quorum can't be read properly and error will be errErasureReadQuorum if _, ok := err.(InsufficientReadQuorum); !ok { t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err) } } // Tests healing of empty directories -func TestHealEmptyDirectoryXL(t *testing.T) { +func TestHealEmptyDirectoryErasure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -285,7 +405,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) { object := "empty-dir/" var opts ObjectOptions - err = obj.MakeBucketWithLocation(ctx, bucket, "", false) + err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -298,16 +418,16 @@ func TestHealEmptyDirectoryXL(t *testing.T) { } // Remove the object backend files from the first disk. - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + firstDisk := er.getDisks()[0] err = firstDisk.DeleteFile(bucket, object) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } // Heal the object - hr, err := obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + hr, err := obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } @@ -331,7 +451,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) { } // Heal the same object again - hr, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + hr, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } diff --git a/cmd/erasure-list-objects.go b/cmd/erasure-list-objects.go new file mode 100644 index 000000000..d5c2c6b0d --- /dev/null +++ b/cmd/erasure-list-objects.go @@ -0,0 +1,58 @@ +/* + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + + "github.com/minio/minio/pkg/madmin" +) + +// ListObjectVersions - This is not implemented, look for erasure-zones.ListObjectVersions() +func (er erasureObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { + return loi, NotImplemented{} +} + +// ListObjectsV2 - This is not implemented/needed anymore, look for erasure-zones.ListObjectsV2() +func (er erasureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { + return loi, NotImplemented{} +} + +// ListObjects - This is not implemented/needed anymore, look for erasure-zones.ListObjects() +func (er erasureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { + return loi, NotImplemented{} +} + +// ListBucketsHeal - This is not implemented/needed anymore, look for erasure-zones.ListBucketHeal() +func (er erasureObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { + return nil, NotImplemented{} +} + +// ListObjectsHeal - This is not implemented, look for erasure-zones.ListObjectsHeal() +func (er erasureObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + return ListObjectsInfo{}, NotImplemented{} +} + +// HealObjects - This is not implemented/needed anymore, look for erasure-zones.HealObjects() +func (er erasureObjects) HealObjects(ctx context.Context, bucket, prefix string, _ madmin.HealOpts, _ HealObjectFn) (e error) { + return NotImplemented{} +} + +// Walk - This is not implemented/needed anymore, look for erasure-zones.Walk() +func (er erasureObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { + return NotImplemented{} +} diff --git a/cmd/erasure-heal.go b/cmd/erasure-lowlevel-heal.go similarity index 96% rename from cmd/erasure-heal.go rename to cmd/erasure-lowlevel-heal.go index 947416abf..66b031d92 100644 --- a/cmd/erasure-heal.go +++ b/cmd/erasure-lowlevel-heal.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-v1-utils.go b/cmd/erasure-metadata-utils.go similarity index 81% rename from cmd/xl-v1-utils.go rename to cmd/erasure-metadata-utils.go index c77b2ed4e..ca78b9678 100644 --- a/cmd/xl-v1-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -20,9 +20,7 @@ import ( "context" "errors" "hash/crc32" - "path" - jsoniter "github.com/json-iterator/go" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/sync/errgroup" ) @@ -72,13 +70,13 @@ func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, qu // reduceReadQuorumErrs behaves like reduceErrs but only for returning // values of maximally occurring errors validated against readQuorum. func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) { - return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum) + return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errErasureReadQuorum) } // reduceWriteQuorumErrs behaves like reduceErrs but only for returning // values of maximally occurring errors validated against writeQuorum. func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) { - return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum) + return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errErasureWriteQuorum) } // Similar to 'len(slice)' but returns the actual elements count @@ -115,44 +113,26 @@ func hashOrder(key string, cardinality int) []int { return nums } -// Constructs xlMetaV1 using `jsoniter` lib. -func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, err error) { - var json = jsoniter.ConfigCompatibleWithStandardLibrary - err = json.Unmarshal(xlMetaBuf, &xlMeta) - return xlMeta, err -} - -// readXLMeta reads `xl.json` and returns back XL metadata structure. -func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) { - // Reads entire `xl.json`. - xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) - if err != nil { - if err != errFileNotFound && err != errVolumeNotFound { - logger.GetReqInfo(ctx).AppendTags("disk", disk.String()) - logger.LogIf(ctx, err) - } - return xlMetaV1{}, err - } - if len(xlMetaBuf) == 0 { - return xlMetaV1{}, errFileNotFound - } - return xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) -} - -// Reads all `xl.json` metadata as a xlMetaV1 slice. +// Reads all `xl.meta` metadata as a FileInfo slice. // Returns error slice indicating the failed metadata reads. -func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) { - metadataArray := make([]xlMetaV1, len(disks)) +func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, versionID string) ([]FileInfo, []error) { + metadataArray := make([]FileInfo, len(disks)) g := errgroup.WithNErrs(len(disks)) - // Read `xl.json` parallelly across disks. + // Read `xl.meta` parallelly across disks. for index := range disks { index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound } - metadataArray[index], err = readXLMeta(ctx, disks[index], bucket, object) + metadataArray[index], err = disks[index].ReadVersion(bucket, object, versionID) + if err != nil { + if err != errFileNotFound && err != errVolumeNotFound && err != errFileVersionNotFound { + logger.GetReqInfo(ctx).AppendTags("disk", disks[index].String()) + logger.LogIf(ctx, err) + } + } return err }, index) } @@ -162,11 +142,11 @@ func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object s } // Return shuffled partsMetadata depending on distribution. -func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) { +func shufflePartsMetadata(partsMetadata []FileInfo, distribution []int) (shuffledPartsMetadata []FileInfo) { if distribution == nil { return partsMetadata } - shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata)) + shuffledPartsMetadata = make([]FileInfo, len(partsMetadata)) // Shuffle slice xl metadata for expected distribution. for index := range partsMetadata { blockIndex := distribution[index] diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go new file mode 100644 index 000000000..43341eb1d --- /dev/null +++ b/cmd/erasure-metadata-utils_test.go @@ -0,0 +1,201 @@ +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "reflect" + "testing" +) + +// Tests caclculating disk count. +func TestDiskCount(t *testing.T) { + testCases := []struct { + disks []StorageAPI + diskCount int + }{ + // Test case - 1 + { + disks: []StorageAPI{&xlStorage{}, &xlStorage{}, &xlStorage{}, &xlStorage{}}, + diskCount: 4, + }, + // Test case - 2 + { + disks: []StorageAPI{nil, &xlStorage{}, &xlStorage{}, &xlStorage{}}, + diskCount: 3, + }, + } + for i, testCase := range testCases { + cdiskCount := diskCount(testCase.disks) + if cdiskCount != testCase.diskCount { + t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.diskCount, cdiskCount) + } + } +} + +// Test for reduceErrs, reduceErr reduces collection +// of errors into a single maximal error with in the list. +func TestReduceErrs(t *testing.T) { + // List all of all test cases to validate various cases of reduce errors. + testCases := []struct { + errs []error + ignoredErrs []error + err error + }{ + // Validate if have reduced properly. + {[]error{ + errDiskNotFound, + errDiskNotFound, + errDiskFull, + }, []error{}, errErasureReadQuorum}, + // Validate if have no consensus. + {[]error{ + errDiskFull, + errDiskNotFound, + nil, nil, + }, []error{}, errErasureReadQuorum}, + // Validate if have consensus and errors ignored. + {[]error{ + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errDiskNotFound, + errDiskNotFound, + }, []error{errDiskNotFound}, errVolumeNotFound}, + {[]error{}, []error{}, errErasureReadQuorum}, + {[]error{errFileNotFound, errFileNotFound, errFileNotFound, + errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil}, + nil, nil}, + } + // Validates list of all the testcases for returning valid errors. + for i, testCase := range testCases { + gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5) + if gotErr != testCase.err { + t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) + } + gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6) + if gotNewErr != errErasureWriteQuorum { + t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr) + } + } +} + +// TestHashOrder - test order of ints in array +func TestHashOrder(t *testing.T) { + testCases := []struct { + objectName string + hashedOrder []int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}, + {"The Shining Script .pdf", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, + {"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, + {"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + {"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, + {"/a/b/c", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}}, + {string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + hashedOrder := hashOrder(testCase.objectName, 16) + if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.hashedOrder, hashedOrder) + } + } + + // Tests hashing order to fail for when order is '-1'. + if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil { + t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) + } + + if hashedOrder := hashOrder("This will fail", 0); hashedOrder != nil { + t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) + } +} + +func TestShuffleDisks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDisks := 16 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) + if err != nil { + removeRoots(disks) + t.Fatal(err) + } + defer removeRoots(disks) + z := objLayer.(*erasureZones) + testShuffleDisks(t, z) +} + +// Test shuffleDisks which returns shuffled slice of disks for their actual distribution. +func testShuffleDisks(t *testing.T, z *erasureZones) { + disks := z.zones[0].GetDisks(0)() + distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} + shuffledDisks := shuffleDisks(disks, distribution) + // From the "distribution" above you can notice that: + // 1st data block is in the 9th disk (i.e distribution index 8) + // 2nd data block is in the 8th disk (i.e distribution index 7) and so on. + if shuffledDisks[0] != disks[8] || + shuffledDisks[1] != disks[7] || + shuffledDisks[2] != disks[9] || + shuffledDisks[3] != disks[6] || + shuffledDisks[4] != disks[10] || + shuffledDisks[5] != disks[5] || + shuffledDisks[6] != disks[11] || + shuffledDisks[7] != disks[4] || + shuffledDisks[8] != disks[12] || + shuffledDisks[9] != disks[3] || + shuffledDisks[10] != disks[13] || + shuffledDisks[11] != disks[2] || + shuffledDisks[12] != disks[14] || + shuffledDisks[13] != disks[1] || + shuffledDisks[14] != disks[15] || + shuffledDisks[15] != disks[0] { + t.Errorf("shuffleDisks returned incorrect order.") + } +} + +// TestEvalDisks tests the behavior of evalDisks +func TestEvalDisks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDisks := 16 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) + if err != nil { + removeRoots(disks) + t.Fatal(err) + } + defer removeRoots(disks) + z := objLayer.(*erasureZones) + testShuffleDisks(t, z) +} diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go new file mode 100644 index 000000000..661032586 --- /dev/null +++ b/cmd/erasure-metadata.go @@ -0,0 +1,326 @@ +/* + * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "encoding/hex" + "fmt" + "net/http" + "sort" + "time" + + xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/sync/errgroup" + "github.com/minio/sha256-simd" +) + +const erasureAlgorithm = "rs-vandermonde" + +// byObjectPartNumber is a collection satisfying sort.Interface. +type byObjectPartNumber []ObjectPartInfo + +func (t byObjectPartNumber) Len() int { return len(t) } +func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number } + +// AddChecksumInfo adds a checksum of a part. +func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) { + for i, sum := range e.Checksums { + if sum.PartNumber == ckSumInfo.PartNumber { + e.Checksums[i] = ckSumInfo + return + } + } + e.Checksums = append(e.Checksums, ckSumInfo) +} + +// GetChecksumInfo - get checksum of a part. +func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) { + for _, sum := range e.Checksums { + if sum.PartNumber == partNumber { + // Return the checksum + return sum + } + } + return ChecksumInfo{} +} + +// ShardFileSize - returns final erasure size from original size. +func (e ErasureInfo) ShardFileSize(totalLength int64) int64 { + if totalLength == 0 { + return 0 + } + if totalLength == -1 { + return -1 + } + numShards := totalLength / e.BlockSize + lastBlockSize := totalLength % e.BlockSize + lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks)) + return numShards*e.ShardSize() + lastShardSize +} + +// ShardSize - returns actual shared size from erasure blockSize. +func (e ErasureInfo) ShardSize() int64 { + return ceilFrac(e.BlockSize, int64(e.DataBlocks)) +} + +// IsValid - tells if erasure info fields are valid. +func (fi FileInfo) IsValid() bool { + if fi.Deleted { + // Delete marker has no data, no need to check + // for erasure coding information + return true + } + data := fi.Erasure.DataBlocks + parity := fi.Erasure.ParityBlocks + return ((data >= parity) && (data != 0) && (parity != 0)) +} + +// ToObjectInfo - Converts metadata to object info. +func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo { + if HasSuffix(object, SlashSeparator) { + return ObjectInfo{ + Bucket: bucket, + Name: object, + IsDir: true, + } + } + objInfo := ObjectInfo{ + IsDir: false, + Bucket: bucket, + Name: object, + VersionID: fi.VersionID, + IsLatest: fi.IsLatest, + DeleteMarker: fi.Deleted, + Size: fi.Size, + ModTime: fi.ModTime, + ContentType: fi.Metadata["content-type"], + ContentEncoding: fi.Metadata["content-encoding"], + } + // Update expires + var ( + t time.Time + e error + ) + if exp, ok := fi.Metadata["expires"]; ok { + if t, e = time.Parse(http.TimeFormat, exp); e == nil { + objInfo.Expires = t.UTC() + } + } + objInfo.backendType = BackendErasure + + // Extract etag from metadata. + objInfo.ETag = extractETag(fi.Metadata) + + // Add user tags to the object info + objInfo.UserTags = fi.Metadata[xhttp.AmzObjectTagging] + + // etag/md5Sum has already been extracted. We need to + // remove to avoid it from appearing as part of + // response headers. e.g, X-Minio-* or X-Amz-*. + // Tags have also been extracted, we remove that as well. + objInfo.UserDefined = cleanMetadata(fi.Metadata) + + // All the parts per object. + objInfo.Parts = fi.Parts + + // Update storage class + if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok { + objInfo.StorageClass = sc + } else { + objInfo.StorageClass = globalMinioDefaultStorageClass + } + + // Success. + return objInfo +} + +// objectPartIndex - returns the index of matching object part number. +func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { + for i, part := range parts { + if partNumber == part.Number { + return i + } + } + return -1 +} + +// AddObjectPart - add a new object part in order. +func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) { + partInfo := ObjectPartInfo{ + Number: partNumber, + ETag: partETag, + Size: partSize, + ActualSize: actualSize, + } + + // Update part info if it already exists. + for i, part := range fi.Parts { + if partNumber == part.Number { + fi.Parts[i] = partInfo + return + } + } + + // Proceed to include new part info. + fi.Parts = append(fi.Parts, partInfo) + + // Parts in FileInfo should be in sorted order by part number. + sort.Sort(byObjectPartNumber(fi.Parts)) +} + +// ObjectToPartOffset - translate offset of an object to offset of its individual part. +func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { + if offset == 0 { + // Special case - if offset is 0, then partIndex and partOffset are always 0. + return 0, 0, nil + } + partOffset = offset + // Seek until object offset maps to a particular part offset. + for i, part := range fi.Parts { + partIndex = i + // Offset is smaller than size we have reached the proper part offset. + if partOffset < part.Size { + return partIndex, partOffset, nil + } + // Continue to towards the next part. + partOffset -= part.Size + } + logger.LogIf(ctx, InvalidRange{}) + // Offset beyond the size of the object return InvalidRange. + return 0, 0, InvalidRange{} +} + +func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) { + metaHashes := make([]string, len(metaArr)) + for i, meta := range metaArr { + if meta.IsValid() && meta.ModTime.Equal(modTime) { + h := sha256.New() + for _, part := range meta.Parts { + h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) + } + metaHashes[i] = hex.EncodeToString(h.Sum(nil)) + } + } + + metaHashCountMap := make(map[string]int) + for _, hash := range metaHashes { + if hash == "" { + continue + } + metaHashCountMap[hash]++ + } + + maxHash := "" + maxCount := 0 + for hash, count := range metaHashCountMap { + if count > maxCount { + maxCount = count + maxHash = hash + } + } + + if maxCount < quorum { + return FileInfo{}, errErasureReadQuorum + } + + for i, hash := range metaHashes { + if hash == maxHash { + return metaArr[i], nil + } + } + + return FileInfo{}, errErasureReadQuorum +} + +// pickValidFileInfo - picks one valid FileInfo content and returns from a +// slice of FileInfo. +func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) { + return findFileInfoInQuorum(ctx, metaArr, modTime, quorum) +} + +// Rename metadata content to destination location for each disk concurrently. +func renameFileInfo(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) { + ignoredErr := []error{errFileNotFound} + + g := errgroup.WithNErrs(len(disks)) + + // Rename file on all underlying storage disks. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + if err := disks[index].RenameData(srcBucket, srcEntry, "", dstBucket, dstEntry); err != nil { + if !IsErrIgnored(err, ignoredErr...) { + return err + } + } + return nil + }, index) + } + + // Wait for all renames to finish. + errs := g.Wait() + + // We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum + // otherwise return failure. Cleanup successful renames. + err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, quorum) + return evalDisks(disks, errs), err +} + +// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. +func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { + g := errgroup.WithNErrs(len(disks)) + + // Start writing `xl.meta` to all disks in parallel. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + // Pick one FileInfo for a disk at index. + files[index].Erasure.Index = index + 1 + return disks[index].WriteMetadata(bucket, prefix, files[index]) + }, index) + } + + // Wait for all the routines. + mErrs := g.Wait() + + err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) + return evalDisks(disks, mErrs), err +} + +// Returns per object readQuorum and writeQuorum +// readQuorum is the min required disks to read data. +// writeQuorum is the min required disks to write data. +func objectQuorumFromMeta(ctx context.Context, er erasureObjects, partsMetaData []FileInfo, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) { + // get the latest updated Metadata and a count of all the latest updated FileInfo(s) + latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs) + if err != nil { + return 0, 0, err + } + + // Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks + // from latestFileInfo to get the quorum + return latestFileInfo.Erasure.DataBlocks, latestFileInfo.Erasure.DataBlocks + 1, nil +} diff --git a/cmd/erasure-metadata_test.go b/cmd/erasure-metadata_test.go new file mode 100644 index 000000000..438955e7e --- /dev/null +++ b/cmd/erasure-metadata_test.go @@ -0,0 +1,153 @@ +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "strconv" + "testing" + + humanize "github.com/dustin/go-humanize" +) + +const ActualSize = 1000 + +// Test FileInfo.AddObjectPart() +func TestAddObjectPart(t *testing.T) { + testCases := []struct { + partNum int + expectedIndex int + }{ + {1, 0}, + {2, 1}, + {4, 2}, + {5, 3}, + {7, 4}, + // Insert part. + {3, 2}, + // Replace existing part. + {4, 3}, + // Missing part. + {6, -1}, + } + + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Test them. + for _, testCase := range testCases { + if testCase.expectedIndex > -1 { + partNumString := strconv.Itoa(testCase.partNum) + fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize) + } + + if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex { + t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + } +} + +// Test objectPartIndex(). generates a sample FileInfo data and asserts +// the output of objectPartIndex() with the expected value. +func TestObjectPartIndex(t *testing.T) { + testCases := []struct { + partNum int + expectedIndex int + }{ + {2, 1}, + {1, 0}, + {5, 3}, + {4, 2}, + {7, 4}, + } + + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Add some parts for testing. + for _, testCase := range testCases { + partNumString := strconv.Itoa(testCase.partNum) + fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize) + } + + // Add failure test case. + testCases = append(testCases, struct { + partNum int + expectedIndex int + }{6, -1}) + + // Test them. + for _, testCase := range testCases { + if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex { + t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + } +} + +// Test FileInfo.ObjectToPartOffset(). +func TestObjectToPartOffset(t *testing.T) { + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Add some parts for testing. + // Total size of all parts is 5,242,899 bytes. + for _, partNum := range []int{1, 2, 4, 5, 7} { + partNumString := strconv.Itoa(partNum) + fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize) + } + + testCases := []struct { + offset int64 + expectedIndex int + expectedOffset int64 + expectedErr error + }{ + {0, 0, 0, nil}, + {1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil}, + {1 + humanize.MiByte, 1, 0, nil}, + {2 + humanize.MiByte, 1, 1, nil}, + // Its valid for zero sized object. + {-1, 0, -1, nil}, + // Max fffset is always (size - 1). + {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil}, + // Error if offset is size. + {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}}, + } + + // Test them. + for _, testCase := range testCases { + index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset) + if err != testCase.expectedErr { + t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) + } + if index != testCase.expectedIndex { + t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + if offset != testCase.expectedOffset { + t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset) + } + } +} diff --git a/cmd/xl-v1-multipart.go b/cmd/erasure-multipart.go similarity index 53% rename from cmd/xl-v1-multipart.go rename to cmd/erasure-multipart.go index 81e953852..c96d4a3d2 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/erasure-multipart.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( "sort" "strconv" "strings" - "time" xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" @@ -32,24 +31,25 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -func (xl xlObjects) getUploadIDDir(bucket, object, uploadID string) string { - return pathJoin(xl.getMultipartSHADir(bucket, object), uploadID) +func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string { + return pathJoin(er.getMultipartSHADir(bucket, object), uploadID) } -func (xl xlObjects) getMultipartSHADir(bucket, object string) string { +func (er erasureObjects) getMultipartSHADir(bucket, object string) string { return getSHA256Hash([]byte(pathJoin(bucket, object))) } // checkUploadIDExists - verify if a given uploadID exists and is valid. -func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error { - _, err := xl.getObjectInfo(ctx, minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID), ObjectOptions{}) +func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error { + _, err := er.getObjectInfo(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID), ObjectOptions{}) return err } // Removes part given by partName belonging to a mulitpart upload from minioMetaBucket -func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber int) { - curpartPath := pathJoin(xl.getUploadIDDir(bucket, object, uploadID), fmt.Sprintf("part.%d", partNumber)) - storageDisks := xl.getDisks() +func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) { + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) + curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber)) + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) for index, disk := range storageDisks { @@ -59,7 +59,7 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber index := index g.Go(func() error { // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload - // requests. xl.json is the authoritative source of truth on which parts constitute + // requests. xl.meta is the authoritative source of truth on which parts constitute // the object. The presence of parts that don't belong in the object doesn't affect correctness. _ = storageDisks[index].DeleteFile(minioMetaMultipartBucket, curpartPath) return nil @@ -68,36 +68,6 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber g.Wait() } -// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks. -func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) { - srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile) - dstJSONFile := path.Join(dstPrefix, xlMetaJSONFile) - - g := errgroup.WithNErrs(len(disks)) - - // Rename `xl.json` to all disks in parallel. - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound - } - - // Delete any dangling directories. - defer disks[index].DeleteFile(srcBucket, srcPrefix) - - // Renames `xl.json` from source prefix to destination prefix. - return disks[index].RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile) - }, index) - } - - // Wait for all the routines. - mErrs := g.Wait() - - err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) - return evalDisks(disks, mErrs), err -} - // ListMultipartUploads - lists all the pending multipart // uploads for a particular object in a bucket. // @@ -105,17 +75,17 @@ func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPre // not support prefix based listing, this is a deliberate attempt // towards simplification of multipart APIs. // The resulting ListMultipartsInfo structure is unmarshalled directly as XML. -func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { +func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { result.MaxUploads = maxUploads result.KeyMarker = keyMarker result.Prefix = object result.Delimiter = delimiter - for _, disk := range xl.getLoadBalancedDisks() { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { continue } - uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, xl.getMultipartSHADir(bucket, object), -1, "") + uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1) if err != nil { if err == errFileNotFound { return result, nil @@ -147,16 +117,16 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke // '.minio.sys/multipart/bucket/object/uploads.json' on all the // disks. `uploads.json` carries metadata regarding on-going multipart // operation(s) on the object. -func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, object string, meta map[string]string) (string, error) { +func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) { - onlineDisks := xl.getDisks() - parityBlocks := globalStorageClass.GetParityForSC(meta[xhttp.AmzStorageClass]) + onlineDisks := er.getDisks() + parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) if parityBlocks == 0 { parityBlocks = len(onlineDisks) / 2 } dataBlocks := len(onlineDisks) - parityBlocks - xlMeta := newXLMetaV1(object, dataBlocks, parityBlocks) + fi := newFileInfo(object, dataBlocks, parityBlocks) // we now know the number of blocks this object needs for data and parity. // establish the writeQuorum using this data @@ -165,30 +135,37 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec writeQuorum = dataBlocks + 1 } - if meta["content-type"] == "" { + if opts.UserDefined["content-type"] == "" { contentType := mimedb.TypeByExtension(path.Ext(object)) - meta["content-type"] = contentType + opts.UserDefined["content-type"] = contentType } - xlMeta.Stat.ModTime = UTCNow() - xlMeta.Meta = meta + + // Calculate the version to be saved. + if opts.Versioned { + fi.VersionID = mustGetUUID() + } + + fi.DataDir = mustGetUUID() + fi.ModTime = UTCNow() + fi.Metadata = opts.UserDefined uploadID := mustGetUUID() - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) tempUploadIDPath := uploadID // Delete the tmp path later in case we fail to commit (ignore // returned errors) - this will be a no-op in case of a commit // success. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum) - var partsMetadata = make([]xlMetaV1, len(onlineDisks)) + var partsMetadata = make([]FileInfo, len(onlineDisks)) for i := range onlineDisks { - partsMetadata[i] = xlMeta + partsMetadata[i] = fi } var err error - // Write updated `xl.json` to all disks. - onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum) + // Write updated `xl.meta` to all disks. + onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum) if err != nil { return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) } @@ -208,12 +185,12 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec // subsequent request each UUID is unique. // // Implements S3 compatible initiate multipart API. -func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { +func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { // No metadata is set, allocate a new one. if opts.UserDefined == nil { opts.UserDefined = make(map[string]string) } - return xl.newMultipartUpload(ctx, bucket, object, opts.UserDefined) + return er.newMultipartUpload(ctx, bucket, object, opts) } // CopyObjectPart - reads incoming stream and internally erasure codes @@ -221,8 +198,8 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin // data is read from an existing object. // // Implements S3 compatible Upload Part Copy API. -func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { - partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) +func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { + partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) if err != nil { return pi, toObjectErr(err, dstBucket, dstObject) } @@ -236,64 +213,60 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds // of the multipart transaction. // // Implements S3 compatible Upload Part API. -func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { +func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { data := r.Reader - if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil { - return pi, err - } - // Validate input data size and it can never be less than zero. if data.Size() < -1 { logger.LogIf(ctx, errInvalidArgument, logger.Application) return pi, toObjectErr(errInvalidArgument) } - var partsMetadata []xlMetaV1 + var partsMetadata []FileInfo var errs []error - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return pi, toObjectErr(err, bucket, object, uploadID) } // Read metadata associated with the object from all disks. - partsMetadata, errs = readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, - uploadIDPath) + partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, + uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return pi, toObjectErr(err, bucket, object) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return pi, toObjectErr(reducedErr, bucket, object) } // List all online disks. - onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs) + onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return pi, err } - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Need a unique name for the part being written in minioMetaBucket to // accommodate concurrent PutObjectPart requests partSuffix := fmt.Sprintf("part.%d", partID) tmpPart := mustGetUUID() - tmpPartPath := path.Join(tmpPart, partSuffix) + tmpPartPath := pathJoin(tmpPart, partSuffix) // Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum) - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return pi, toObjectErr(err, bucket, object) } @@ -303,16 +276,16 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF - case size == -1 || size >= blockSizeV1: - buffer = xl.bp.Get() - defer xl.bp.Put(buffer) - case size < blockSizeV1: - // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. - buffer = make([]byte, size, 2*size+int64(erasure.parityBlocks+erasure.dataBlocks-1)) + case size == -1 || size >= fi.Erasure.BlockSize: + buffer = er.bp.Get() + defer er.bp.Put(buffer) + case size < fi.Erasure.BlockSize: + // No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller. + buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) } - if len(buffer) > int(xlMeta.Erasure.BlockSize) { - buffer = buffer[:xlMeta.Erasure.BlockSize] + if len(buffer) > int(fi.Erasure.BlockSize) { + buffer = buffer[:fi.Erasure.BlockSize] } writers := make([]io.Writer, len(onlineDisks)) for i, disk := range onlineDisks { @@ -322,7 +295,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) } - n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(ctx, data, writers, buffer, fi.Erasure.DataBlocks+1) closeBitrotWriters(writers) if err != nil { return pi, toObjectErr(err, bucket, object) @@ -341,21 +314,21 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID } // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return pi, toObjectErr(err, bucket, object, uploadID) } // Rename temporary part file to its final location. - partPath := path.Join(uploadIDPath, partSuffix) + partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix) onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil) if err != nil { return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } // Read metadata again because it might be updated with parallel upload of another part. - partsMetadata, errs = readAllXLMetadata(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs = readAllFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "") reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return pi, toObjectErr(reducedErr, bucket, object) } @@ -363,25 +336,26 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err = pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return pi, err } - // Once part is successfully committed, proceed with updating XL metadata. - xlMeta.Stat.ModTime = UTCNow() + // Once part is successfully committed, proceed with updating erasure metadata. + fi.ModTime = UTCNow() md5hex := r.MD5CurrentHexString() // Add the current part. - xlMeta.AddObjectPart(partID, md5hex, n, data.ActualSize()) + fi.AddObjectPart(partID, md5hex, n, data.ActualSize()) for i, disk := range onlineDisks { if disk == OfflineDisk { continue } - partsMetadata[i].Stat = xlMeta.Stat - partsMetadata[i].Parts = xlMeta.Parts + partsMetadata[i].Size = fi.Size + partsMetadata[i].ModTime = fi.ModTime + partsMetadata[i].Parts = fi.Parts partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ PartNumber: partID, Algorithm: DefaultBitrotAlgorithm, @@ -389,19 +363,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID }) } - // Write all the checksum metadata. - tempXLMetaPath := mustGetUUID() - - // Cleanup in case of xl.json writing failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false) - - // Writes a unique `xl.json` each disk carrying new checksum related information. - onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum) - if err != nil { - return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) - } - - if _, err = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil { + // Writes update `xl.meta` format for each disk. + if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } @@ -409,8 +372,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID return PartInfo{ PartNumber: partID, ETag: md5hex, - LastModified: xlMeta.Stat.ModTime, - Size: xlMeta.Stat.Size, + LastModified: fi.ModTime, + Size: fi.Size, ActualSize: data.ActualSize(), }, nil } @@ -419,44 +382,44 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID // by callers to verify object states // - encrypted // - compressed -func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { +func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { result := MultipartInfo{ Bucket: bucket, Object: object, UploadID: uploadID, } - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return result, toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID) // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + readQuorum, _, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } - reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum) + if reducedErr == errErasureReadQuorum { return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) } _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum) if err != nil { return result, err } - result.UserDefined = xlMeta.Meta + result.UserDefined = fi.Metadata return result, nil } @@ -467,51 +430,47 @@ func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, upload // Implements S3 compatible ListObjectParts API. The resulting // ListPartsInfo structure is marshaled directly into XML and // replied back to the client. -func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { - - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { +func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return result, toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) } _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlValidMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return result, err } - var xlMeta = xlValidMeta.Meta - var xlParts = xlValidMeta.Parts - // Populate the result stub. result.Bucket = bucket result.Object = object result.UploadID = uploadID result.MaxParts = maxParts result.PartNumberMarker = partNumberMarker - result.UserDefined = xlMeta + result.UserDefined = fi.Metadata // For empty number of parts or maxParts as zero, return right here. - if len(xlParts) == 0 || maxParts == 0 { + if len(fi.Parts) == 0 || maxParts == 0 { return result, nil } @@ -521,17 +480,17 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI } // Only parts with higher part numbers will be listed. - partIdx := objectPartIndex(xlParts, partNumberMarker) - parts := xlParts + partIdx := objectPartIndex(fi.Parts, partNumberMarker) + parts := fi.Parts if partIdx != -1 { - parts = xlParts[partIdx+1:] + parts = fi.Parts[partIdx+1:] } count := maxParts for _, part := range parts { result.Parts = append(result.Parts, PartInfo{ PartNumber: part.Number, ETag: part.ETag, - LastModified: xlValidMeta.Stat.ModTime, + LastModified: fi.ModTime, Size: part.Size, }) count-- @@ -556,14 +515,14 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { +func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return oi, toObjectErr(err, bucket, object, uploadID) } // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { return oi, toObjectErr(errFileParentIsFile, bucket, object) } @@ -572,21 +531,21 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, // Calculate s3 compatible md5sum for complete multipart. s3MD5 := getCompleteMultipartMD5(parts) - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return oi, toObjectErr(err, bucket, object) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return oi, toObjectErr(reducedErr, bucket, object) } @@ -599,28 +558,26 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, var objectActualSize int64 // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return oi, err } // Order online disks in accordance with distribution order. - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Order parts metadata in accordance with distribution order. - partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution) + partsMetadata = shufflePartsMetadata(partsMetadata, fi.Erasure.Distribution) - // Save current xl meta for validation. - var currentXLMeta = xlMeta + // Save current erasure metadata for validation. + var currentFI = fi // Allocate parts similar to incoming slice. - xlMeta.Parts = make([]ObjectPartInfo, len(parts)) + fi.Parts = make([]ObjectPartInfo, len(parts)) // Validate each part and then commit to disk. for i, part := range parts { - // ensure that part ETag is canonicalized to strip off extraneous quotes - part.ETag = canonicalizeETag(part.ETag) - partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber) + partIdx := objectPartIndex(currentFI.Parts, part.PartNumber) // All parts should have same part number. if partIdx == -1 { invp := InvalidPart{ @@ -630,116 +587,103 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, return oi, invp } - if currentXLMeta.Parts[partIdx].ETag != part.ETag { + // ensure that part ETag is canonicalized to strip off extraneous quotes + part.ETag = canonicalizeETag(part.ETag) + if currentFI.Parts[partIdx].ETag != part.ETag { invp := InvalidPart{ PartNumber: part.PartNumber, - ExpETag: currentXLMeta.Parts[partIdx].ETag, + ExpETag: currentFI.Parts[partIdx].ETag, GotETag: part.ETag, } return oi, invp } // All parts except the last part has to be atleast 5MB. - if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].ActualSize) { + if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) { return oi, PartTooSmall{ PartNumber: part.PartNumber, - PartSize: currentXLMeta.Parts[partIdx].ActualSize, + PartSize: currentFI.Parts[partIdx].ActualSize, PartETag: part.ETag, } } // Save for total object size. - objectSize += currentXLMeta.Parts[partIdx].Size + objectSize += currentFI.Parts[partIdx].Size // Save the consolidated actual size. - objectActualSize += currentXLMeta.Parts[partIdx].ActualSize + objectActualSize += currentFI.Parts[partIdx].ActualSize // Add incoming parts. - xlMeta.Parts[i] = ObjectPartInfo{ + fi.Parts[i] = ObjectPartInfo{ Number: part.PartNumber, - Size: currentXLMeta.Parts[partIdx].Size, - ActualSize: currentXLMeta.Parts[partIdx].ActualSize, + Size: currentFI.Parts[partIdx].Size, + ActualSize: currentFI.Parts[partIdx].ActualSize, } } // Save the final object size and modtime. - xlMeta.Stat.Size = objectSize - xlMeta.Stat.ModTime = UTCNow() + fi.Size = objectSize + fi.ModTime = UTCNow() // Save successfully calculated md5sum. - xlMeta.Meta["etag"] = s3MD5 + fi.Metadata["etag"] = s3MD5 // Save the consolidated actual size. - xlMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) + fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) - // Update all xl metadata, make sure to not modify fields like + // Update all erasure metadata, make sure to not modify fields like // checksum which are different on each disks. for index := range partsMetadata { - partsMetadata[index].Stat = xlMeta.Stat - partsMetadata[index].Meta = xlMeta.Meta - partsMetadata[index].Parts = xlMeta.Parts + partsMetadata[index].Size = fi.Size + partsMetadata[index].ModTime = fi.ModTime + partsMetadata[index].Metadata = fi.Metadata + partsMetadata[index].Parts = fi.Parts } - tempXLMetaPath := mustGetUUID() - - // Cleanup in case of failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false) - - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil { - return oi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) - } - - var rErr error - onlineDisks, rErr = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum) - if rErr != nil { - return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) - } - - if xl.isObject(bucket, object) { - // Rename if an object already exists to temporary location. - newUniqueID := mustGetUUID() - - // Delete success renamed object. - defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false) - - // NOTE: Do not use online disks slice here: the reason is that existing object should be purged - // regardless of `xl.json` status and rolled back in case of errors. Also allow renaming of the - // existing object if it is not present in quorum disks so users can overwrite stale objects. - _, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound}) - if err != nil { - return oi, toObjectErr(err, bucket, object) - } + // Write final `xl.meta` at uploadID location + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { + return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } // Remove parts that weren't present in CompleteMultipartUpload request. - for _, curpart := range currentXLMeta.Parts { - if objectPartIndex(xlMeta.Parts, curpart.Number) == -1 { + for _, curpart := range currentFI.Parts { + if objectPartIndex(fi.Parts, curpart.Number) == -1 { // Delete the missing part files. e.g, // Request 1: NewMultipart // Request 2: PutObjectPart 1 // Request 3: PutObjectPart 2 // Request 4: CompleteMultipartUpload --part 2 // N.B. 1st part is not present. This part should be removed from the storage. - xl.removeObjectPart(bucket, object, uploadID, curpart.Number) + er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number) } } // Rename the multipart object to final location. - if onlineDisks, err = rename(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, true, writeQuorum, nil); err != nil { + if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, + fi.DataDir, bucket, object, writeQuorum, nil); err != nil { return oi, toObjectErr(err, bucket, object) } // Check if there is any offline disk and add it to the MRF list for i := 0; i < len(onlineDisks); i++ { if onlineDisks[i] == nil || storageDisks[i] == nil { - xl.addPartialUpload(bucket, object) + er.addPartialUpload(bucket, object) break } } + for i := 0; i < len(onlineDisks); i++ { + if onlineDisks[i] == nil { + continue + } + // Object info is the same in all disks, so we can pick + // the first meta from online disk + fi = partsMetadata[i] + break + } + // Success, return object info. - return xlMeta.ToObjectInfo(bucket, object), nil + return fi.ToObjectInfo(bucket, object), nil } // AbortMultipartUpload - aborts an ongoing multipart operation @@ -753,79 +697,28 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, // Implements S3 compatible Abort multipart API, slight difference is // that this is an atomic idempotent operation. Subsequent calls have // no affect and further requests to the same uploadID would not be honored. -func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return toObjectErr(err, bucket, object, uploadID) } // Cleanup all uploaded parts. - if err = xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false); err != nil { + if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil { return toObjectErr(err, bucket, object, uploadID) } // Successfully purged. return nil } - -// Clean-up the old multipart uploads. Should be run in a Go routine. -func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh <-chan struct{}) { - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-doneCh: - return - case <-ticker.C: - var disk StorageAPI - for _, d := range xl.getLoadBalancedDisks() { - if d != nil { - disk = d - break - } - } - if disk == nil { - continue - } - xl.cleanupStaleMultipartUploadsOnDisk(ctx, disk, expiry) - } - } -} - -// Remove the old multipart uploads on the given disk. -func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) { - now := time.Now() - shaDirs, err := disk.ListDir(minioMetaMultipartBucket, "", -1, "") - if err != nil { - return - } - for _, shaDir := range shaDirs { - uploadIDDirs, err := disk.ListDir(minioMetaMultipartBucket, shaDir, -1, "") - if err != nil { - continue - } - for _, uploadIDDir := range uploadIDDirs { - uploadIDPath := pathJoin(shaDir, uploadIDDir) - fi, err := disk.StatFile(minioMetaMultipartBucket, pathJoin(uploadIDPath, xlMetaJSONFile)) - if err != nil { - continue - } - if now.Sub(fi.ModTime) > expiry { - writeQuorum := getWriteQuorum(len(xl.getDisks())) - xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false) - } - } - } -} diff --git a/cmd/xl-v1-object.go b/cmd/erasure-object.go similarity index 51% rename from cmd/xl-v1-object.go rename to cmd/erasure-object.go index e89413a5e..2ccdccc2d 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/erasure-object.go @@ -36,8 +36,8 @@ import ( var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied) // putObjectDir hints the bottom layer to create a new directory. -func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error { - storageDisks := xl.getDisks() +func (er erasureObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -64,7 +64,7 @@ func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, wri // CopyObject - copy object source object to destination object. // if source object and destination object are same we only // update metadata. -func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { +func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { // This call shouldn't be used for anything other than metadata updates. if !srcInfo.metadataOnly { return oi, NotImplemented{} @@ -73,58 +73,59 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) // Read metadata associated with the object from all disks. - storageDisks := xl.getDisks() + storageDisks := er.getDisks() - metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject) + metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID) // get Quorum for this object - readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { - return oi, toObjectErr(reducedErr, srcBucket, srcObject) - } - // List all online disks. - _, modTime := listOnlineDisks(storageDisks, metaArr, errs) + onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs) // Pick latest valid metadata. - xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum) + fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) if err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - // Update `xl.json` content on each disks. - for index := range metaArr { - metaArr[index].Meta = srcInfo.UserDefined - metaArr[index].Meta["etag"] = srcInfo.ETag + if fi.Deleted { + if srcOpts.VersionID == "" { + return oi, toObjectErr(errFileNotFound, srcBucket, srcObject) + } + return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) } - var onlineDisks []StorageAPI + // Update `xl.meta` content on each disks. + for index := range metaArr { + metaArr[index].Metadata = srcInfo.UserDefined + metaArr[index].Metadata["etag"] = srcInfo.ETag + } tempObj := mustGetUUID() - // Cleanup in case of xl.json writing failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false) + // Cleanup in case of xl.meta writing failure + defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - // Rename atomically `xl.json` from tmp location to destination for each disk. - if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { + // Rename atomically `xl.meta` from tmp location to destination for each disk. + if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - return xlMeta.ToObjectInfo(srcBucket, srcObject), nil + return fi.ToObjectInfo(srcBucket, srcObject), nil } // GetObjectNInfo - returns object info and an object // Read(Closer). When err != nil, the returned reader is always nil. -func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { if err = checkGetObjArgs(ctx, bucket, object); err != nil { return nil, err } @@ -133,25 +134,25 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // returns no bytes. if HasSuffix(object, SlashSeparator) { var objInfo ObjectInfo - if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil { + if objInfo, err = er.getObjectInfoDir(ctx, bucket, object); err != nil { return nil, toObjectErr(err, bucket, object) } return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts) } - meta, metaArr, onlineDisks, err := xl.getObjectXLMeta(ctx, bucket, object, opts) + fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return nil, toObjectErr(err, bucket, object) } - fn, off, length, nErr := NewGetObjectReader(rs, meta.ToObjectInfo(bucket, object), opts) + fn, off, length, nErr := NewGetObjectReader(rs, fi.ToObjectInfo(bucket, object), opts) if nErr != nil { return nil, nErr } pr, pw := io.Pipe() go func() { - err := xl.getObjectWithXLMeta(ctx, bucket, object, off, length, pw, "", opts, meta, metaArr, onlineDisks) + err := er.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, "", opts, fi, metaArr, onlineDisks) pw.CloseWithError(err) }() @@ -168,7 +169,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // // startOffset indicates the starting read location of the object. // length indicates the total length of the object. -func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { if err := checkGetObjArgs(ctx, bucket, object); err != nil { return err } @@ -192,31 +193,31 @@ func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startO return toObjectErr(err, bucket, object) } - return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts) + return er.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts) } -func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, xlMeta xlMetaV1, metaArr []xlMetaV1, onlineDisks []StorageAPI) error { +func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error { // Reorder online disks based on erasure distribution order. - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Reorder parts metadata based on erasure distribution order. - metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution) + metaArr = shufflePartsMetadata(metaArr, fi.Erasure.Distribution) // For negative length read everything. if length < 0 { - length = xlMeta.Stat.Size - startOffset + length = fi.Size - startOffset } // Reply back invalid range if the input offset and length fall out of range. - if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size { - logger.LogIf(ctx, InvalidRange{startOffset, length, xlMeta.Stat.Size}, logger.Application) - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + if startOffset > fi.Size || startOffset+length > fi.Size { + logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application) + return InvalidRange{startOffset, length, fi.Size} } // Get start part index and offset. - partIndex, partOffset, err := xlMeta.ObjectToPartOffset(ctx, startOffset) + partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset) if err != nil { - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + return InvalidRange{startOffset, length, fi.Size} } // Calculate endOffset according to length @@ -226,13 +227,13 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri } // Get last part index to read given length. - lastPartIndex, _, err := xlMeta.ObjectToPartOffset(ctx, endOffset) + lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset) if err != nil { - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + return InvalidRange{startOffset, length, fi.Size} } var totalBytesRead int64 - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return toObjectErr(err, bucket, object) } @@ -243,10 +244,10 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri break } - partNumber := xlMeta.Parts[partIndex].Number + partNumber := fi.Parts[partIndex].Number // Save the current part name and size. - partSize := xlMeta.Parts[partIndex].Size + partSize := fi.Parts[partIndex].Size partLength := partSize - partOffset // partLength should be adjusted so that we don't write more data than what was requested. @@ -254,7 +255,7 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri partLength = length - totalBytesRead } - tillOffset := erasure.ShardFileTillOffset(partOffset, partLength, partSize) + tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize) // Get the checksums of the current part. readers := make([]io.ReaderAt, len(onlineDisks)) prefer := make([]bool, len(onlineDisks)) @@ -263,21 +264,22 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri continue } checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber) - partPath := pathJoin(object, fmt.Sprintf("part.%d", partNumber)) + partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber)) readers[index] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) // Prefer local disks prefer[index] = disk.Hostname() == "" } - err := erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) - // Note: we should not be defer'ing the following closeBitrotReaders() call as we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time + err = erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) + // Note: we should not be defer'ing the following closeBitrotReaders() call as + // we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time // we return from this function. closeBitrotReaders(readers) if err != nil { if decodeHealErr, ok := err.(*errDecodeHealRequired); ok { healOnce.Do(func() { - go deepHealObject(pathJoin(bucket, object)) + go deepHealObject(bucket, object, fi.VersionID) }) err = decodeHealErr.err } @@ -302,18 +304,18 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri return nil } -// getObject wrapper for xl GetObject -func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { - xlMeta, metaArr, onlineDisks, err := xl.getObjectXLMeta(ctx, bucket, object, opts) +// getObject wrapper for erasure GetObject +func (er erasureObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { + fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return toObjectErr(err, bucket, object) } - return xl.getObjectWithXLMeta(ctx, bucket, object, startOffset, length, writer, etag, opts, xlMeta, metaArr, onlineDisks) + return er.getObjectWithFileInfo(ctx, bucket, object, startOffset, length, writer, etag, opts, fi, metaArr, onlineDisks) } // getObjectInfoDir - This getObjectInfo is specific to object directory lookup. -func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (ObjectInfo, error) { - storageDisks := xl.getDisks() +func (er erasureObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (ObjectInfo, error) { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -325,7 +327,7 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) index := index g.Go(func() error { // Check if 'prefix' is an object on this 'disk'. - entries, err := storageDisks[index].ListDir(bucket, object, 1, "") + entries, err := storageDisks[index].ListDir(bucket, object, 1) if err != nil { return err } @@ -343,60 +345,70 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) } // GetObjectInfo - reads object metadata and replies back ObjectInfo. -func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { - if err := checkGetObjArgs(ctx, bucket, object); err != nil { - return oi, err +func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { + if err = checkGetObjArgs(ctx, bucket, object); err != nil { + return info, err } if HasSuffix(object, SlashSeparator) { - info, err := xl.getObjectInfoDir(ctx, bucket, object) + info, err = er.getObjectInfoDir(ctx, bucket, object) if err != nil { - return oi, toObjectErr(err, bucket, object) + return info, toObjectErr(err, bucket, object) } return info, nil } - info, err := xl.getObjectInfo(ctx, bucket, object, opts) + info, err = er.getObjectInfo(ctx, bucket, object, opts) if err != nil { - return oi, toObjectErr(err, bucket, object) + return info, toObjectErr(err, bucket, object) } return info, nil } -func (xl xlObjects) getObjectXLMeta(ctx context.Context, bucket, object string, opt ObjectOptions) (xlMeta xlMetaV1, metaArr []xlMetaV1, onlineDisks []StorageAPI, err error) { - disks := xl.getDisks() +func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) { + disks := er.getDisks() // Read metadata associated with the object from all disks. - metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object) + metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) - readQuorum, _, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, _, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { - return xlMeta, nil, nil, err + return fi, nil, nil, err } if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { - return xlMeta, nil, nil, toObjectErr(reducedErr, bucket, object) + return fi, nil, nil, toObjectErr(reducedErr, bucket, object) } + // List all online disks. onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) // Pick latest valid metadata. - xlMeta, err = pickValidXLMeta(ctx, metaArr, modTime, readQuorum) + fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum) if err != nil { - return xlMeta, nil, nil, err + return fi, nil, nil, err } - return xlMeta, metaArr, onlineDisks, nil + return fi, metaArr, onlineDisks, nil } // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. -func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string, opt ObjectOptions) (objInfo ObjectInfo, err error) { - xlMeta, _, _, err := xl.getObjectXLMeta(ctx, bucket, object, opt) +func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return objInfo, err } - return xlMeta.ToObjectInfo(bucket, object), nil + + if fi.Deleted { + if opts.VersionID == "" { + return objInfo, toObjectErr(errFileNotFound, bucket, object) + } + // Make sure to return object info to provide extra information. + return fi.ToObjectInfo(bucket, object), toObjectErr(errMethodNotAllowed, bucket, object) + } + + return fi.ToObjectInfo(bucket, object), nil } func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) { @@ -424,6 +436,53 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str g.Wait() } +// Similar to rename but renames data from srcEntry to dstEntry at dataDir +func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dataDir, dstBucket, dstEntry string, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { + dataDir = retainSlash(dataDir) + g := errgroup.WithNErrs(len(disks)) + + // Rename file on all underlying storage disks. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + if err := disks[index].RenameData(srcBucket, srcEntry, dataDir, dstBucket, dstEntry); err != nil { + if !IsErrIgnored(err, ignoredErr...) { + return err + } + } + return nil + }, index) + } + + // Wait for all renames to finish. + errs := g.Wait() + + // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum + // otherwise return failure. Cleanup successful renames. + err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) + if err == errErasureWriteQuorum { + ug := errgroup.WithNErrs(len(disks)) + for index, disk := range disks { + if disk == nil { + continue + } + index := index + ug.Go(func() error { + // Undo all the partial rename operations. + if errs[index] == nil { + _ = disks[index].RenameData(dstBucket, dstEntry, dataDir, srcBucket, srcEntry) + } + return nil + }, index) + } + ug.Wait() + } + return evalDisks(disks, errs), err +} + // rename - common function that renamePart and renameObject use to rename // the respective underlying storage layer representations. func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { @@ -454,10 +513,10 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc // Wait for all renames to finish. errs := g.Wait() - // We can safely allow RenameFile errors up to len(xl.getDisks()) - writeQuorum + // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum // otherwise return failure. Cleanup successful renames. err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if err == errXLWriteQuorum { + if err == errErasureWriteQuorum { // Undo all the partial rename operations. undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) } @@ -466,20 +525,21 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc // PutObject - creates an object upon reading from the input stream // until EOF, erasure codes the data across all disk and additionally -// writes `xl.json` which carries the necessary metadata for future +// writes `xl.meta` which carries the necessary metadata for future // object operations. -func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (er erasureObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { // Validate put object input args. - if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil { + if err = checkPutObjectArgs(ctx, bucket, object, er, data.Size()); err != nil { return ObjectInfo{}, err } - return xl.putObject(ctx, bucket, object, data, opts) + return er.putObject(ctx, bucket, object, data, opts) } -// putObject wrapper for xl PutObject -func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +// putObject wrapper for erasureObjects PutObject +func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { defer ObjectPathUpdated(path.Join(bucket, object)) + data := r.Reader uniqueID := mustGetUUID() @@ -489,7 +549,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, opts.UserDefined = make(map[string]string) } - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Get parity and data drive count based on storage class metadata parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) @@ -508,7 +568,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Delete temporary object in the event of failure. // If PutObject succeeded there would be no temporary // object to delete. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) // This is a special case with size as '0' and object ends with // a slash separator, we treat it like a valid operation and @@ -517,17 +577,11 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). // -- FIXME (this also causes performance issue when disks are down). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) } - if err = xl.putObjectDir(ctx, minioMetaTmpBucket, tempObj, writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - - // Rename the successfully written temporary object to final location. Ignore errFileAccessDenied - // error because it means that the target object dir exists and we want to be close to S3 specification. - if _, err = rename(ctx, storageDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, []error{errFileAccessDenied}); err != nil { + if err = er.putObjectDir(ctx, bucket, object, writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -543,24 +597,30 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). // -- FIXME (this also causes performance issue when disks are down). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { + fmt.Println("I am here") return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) } // Initialize parts metadata - partsMetadata := make([]xlMetaV1, len(xl.getDisks())) + partsMetadata := make([]FileInfo, len(er.getDisks())) - xlMeta := newXLMetaV1(object, dataDrives, parityDrives) + fi := newFileInfo(object, dataDrives, parityDrives) - // Initialize xl meta. + if opts.Versioned { + fi.VersionID = mustGetUUID() + } + fi.DataDir = mustGetUUID() + + // Initialize erasure metadata. for index := range partsMetadata { - partsMetadata[index] = xlMeta + partsMetadata[index] = fi } // Order disks according to erasure distribution - onlineDisks := shuffleDisks(storageDisks, xlMeta.Erasure.Distribution) + onlineDisks := shuffleDisks(storageDisks, fi.Erasure.Distribution) - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -570,20 +630,20 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF - case size == -1 || size >= blockSizeV1: - buffer = xl.bp.Get() - defer xl.bp.Put(buffer) - case size < blockSizeV1: + case size == -1 || size >= fi.Erasure.BlockSize: + buffer = er.bp.Get() + defer er.bp.Put(buffer) + case size < fi.Erasure.BlockSize: // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. - buffer = make([]byte, size, 2*size+int64(erasure.parityBlocks+erasure.dataBlocks-1)) + buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) } - if len(buffer) > int(xlMeta.Erasure.BlockSize) { - buffer = buffer[:xlMeta.Erasure.BlockSize] + if len(buffer) > int(fi.Erasure.BlockSize) { + buffer = buffer[:fi.Erasure.BlockSize] } partName := "part.1" - tempErasureObj := pathJoin(uniqueID, partName) + tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName) writers := make([]io.Writer, len(onlineDisks)) for i, disk := range onlineDisks { @@ -593,7 +653,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) } - n, erasureErr := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) + n, erasureErr := erasure.Encode(ctx, data, writers, buffer, fi.Erasure.DataBlocks+1) closeBitrotWriters(writers) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) @@ -629,37 +689,21 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object)) } - if xl.isObject(bucket, object) { - // Rename if an object already exists to temporary location. - newUniqueID := mustGetUUID() - - // Delete successfully renamed object. - defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false) - - // NOTE: Do not use online disks slice here: the reason is that existing object should be purged - // regardless of `xl.json` status and rolled back in case of errors. Also allow renaming the - // existing object if it is not present in quorum disks so users can overwrite stale objects. - _, err = rename(ctx, storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound}) - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - } - // Fill all the necessary metadata. - // Update `xl.json` content on each disks. + // Update `xl.meta` content on each disks. for index := range partsMetadata { - partsMetadata[index].Meta = opts.UserDefined - partsMetadata[index].Stat.Size = n - partsMetadata[index].Stat.ModTime = modTime + partsMetadata[index].Metadata = opts.UserDefined + partsMetadata[index].Size = n + partsMetadata[index].ModTime = modTime } - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } // Rename the successfully written temporary object to final location. - if onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, nil); err != nil { + if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -667,34 +711,51 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // during this upload, send it to the MRF list. for i := 0; i < len(onlineDisks); i++ { if onlineDisks[i] == nil || storageDisks[i] == nil { - xl.addPartialUpload(bucket, object) + er.addPartialUpload(bucket, object) break } } - // Object info is the same in all disks, so we can pick the first meta - // of the first disk - xlMeta = partsMetadata[0] - - objInfo = ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: xlMeta.Stat.Size, - ModTime: xlMeta.Stat.ModTime, - ETag: xlMeta.Meta["etag"], - ContentType: xlMeta.Meta["content-type"], - ContentEncoding: xlMeta.Meta["content-encoding"], - UserDefined: xlMeta.Meta, + for i := 0; i < len(onlineDisks); i++ { + if onlineDisks[i] == nil { + continue + } + // Object info is the same in all disks, so we can pick + // the first meta from online disk + fi = partsMetadata[i] + break } - return objInfo, nil + return fi.ToObjectInfo(bucket, object), nil +} + +func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo) error { + disks := er.getDisks() + + g := errgroup.WithNErrs(len(disks)) + + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + err := disks[index].DeleteVersion(bucket, object, fi) + if err != nil && err != errVolumeNotFound { + return err + } + return nil + }, index) + } + + // return errors if any during deletion + return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) } // deleteObject - wrapper for delete object, deletes an object from -// all the disks in parallel, including `xl.json` associated with the +// all the disks in parallel, including `xl.meta` associated with the // object. -func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int, isDir bool) error { +func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error { var disks []StorageAPI var err error defer ObjectPathUpdated(path.Join(bucket, object)) @@ -702,18 +763,13 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri tmpObj := mustGetUUID() if bucket == minioMetaTmpBucket { tmpObj = object - disks = xl.getDisks() + disks = er.getDisks() } else { // Rename the current object while requiring write quorum, but also consider // that a non found object in a given disk as a success since it already // confirms that the object doesn't have a part in that disk (already removed) - if isDir { - disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, - []error{errFileNotFound, errFileAccessDenied}) - } else { - disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, - []error{errFileNotFound}) - } + disks, err = rename(ctx, er.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, + []error{errFileNotFound}) if err != nil { return toObjectErr(err, bucket, object) } @@ -727,14 +783,7 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri if disks[index] == nil { return errDiskNotFound } - var err error - if isDir { - // DeleteFile() simply tries to remove a directory - // and will succeed only if that directory is empty. - err = disks[index].DeleteFile(minioMetaTmpBucket, tmpObj) - } else { - err = cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj) - } + err := cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj) if err != nil && err != errVolumeNotFound { return err } @@ -746,124 +795,19 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) } -// deleteObject - wrapper for delete object, deletes an object from -// all the disks in parallel, including `xl.json` associated with the -// object. -func (xl xlObjects) doDeleteObjects(ctx context.Context, bucket string, objects []string, errs []error, writeQuorums []int, isDirs []bool) ([]error, error) { - var tmpObjs = make([]string, len(objects)) - if bucket == minioMetaTmpBucket { - copy(tmpObjs, objects) - } else { - for idx := range objects { - if errs[idx] != nil { - continue - } - - tmpObjs[idx] = mustGetUUID() - var err error - // Rename the current object while requiring - // write quorum, but also consider that a non - // found object in a given disk as a success - // since it already confirms that the object - // doesn't have a part in that disk (already removed) - if isDirs[idx] { - _, err = rename(ctx, xl.getDisks(), bucket, objects[idx], - minioMetaTmpBucket, tmpObjs[idx], true, writeQuorums[idx], - []error{errFileNotFound, errFileAccessDenied}) - } else { - _, err = rename(ctx, xl.getDisks(), bucket, objects[idx], - minioMetaTmpBucket, tmpObjs[idx], true, writeQuorums[idx], - []error{errFileNotFound}) - } - if err != nil { - errs[idx] = err - } - ObjectPathUpdated(path.Join(bucket, objects[idx])) - } - } - - disks := xl.getDisks() - - // Initialize list of errors. - var opErrs = make([]error, len(disks)) - var delObjErrs = make([][]error, len(disks)) - var wg = sync.WaitGroup{} - - // Remove objects in bulk for each disk - for i, d := range disks { - if d == nil { - opErrs[i] = errDiskNotFound - continue - } - wg.Add(1) - go func(index int, disk StorageAPI) { - defer wg.Done() - delObjErrs[index], opErrs[index] = disk.DeletePrefixes(minioMetaTmpBucket, tmpObjs) - if opErrs[index] == errVolumeNotFound || opErrs[index] == errFileNotFound { - opErrs[index] = nil - } - }(i, d) - } - - wg.Wait() - - // Return errors if any during deletion - if err := reduceWriteQuorumErrs(ctx, opErrs, objectOpIgnoredErrs, len(disks)/2+1); err != nil { - return nil, err - } - - // Reduce errors for each object - for objIndex := range objects { - if errs[objIndex] != nil { - continue - } - listErrs := make([]error, len(disks)) - // Iterate over disks to fetch the error - // of deleting of the current object - for i := range delObjErrs { - // delObjErrs[i] is not nil when disks[i] is also not nil - if delObjErrs[i] != nil { - if delObjErrs[i][objIndex] != errFileNotFound { - listErrs[i] = delObjErrs[i][objIndex] - } - } - } - errs[objIndex] = reduceWriteQuorumErrs(ctx, listErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) - } - - return errs, nil -} - -func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +// DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list +// into smaller bulks if some object names are found to be duplicated in the delete list, splitting +// into smaller bulks will avoid holding twice the write lock of the duplicated object names. +func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) writeQuorums := make([]int, len(objects)) - isObjectDirs := make([]bool, len(objects)) for i, object := range objects { - errs[i] = checkDelObjArgs(ctx, bucket, object) + errs[i] = checkDelObjArgs(ctx, bucket, object.ObjectName) } - for i, object := range objects { - isObjectDirs[i] = HasSuffix(object, SlashSeparator) - } - - storageDisks := xl.getDisks() - - for i, object := range objects { - if isObjectDirs[i] { - _, err := xl.getObjectInfoDir(ctx, bucket, object) - if err == errXLReadQuorum { - if isObjectDirDangling(statAllDirs(ctx, storageDisks, bucket, object)) { - // If object is indeed dangling, purge it. - errs[i] = nil - } - } - if err != nil { - errs[i] = toObjectErr(err, bucket, object) - continue - } - } - } + storageDisks := er.getDisks() for i := range objects { if errs[i] != nil { @@ -878,167 +822,175 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects [] writeQuorums[i] = getWriteQuorum(len(storageDisks)) } - return xl.doDeleteObjects(ctx, bucket, objects, errs, writeQuorums, isObjectDirs) -} - -// DeleteObjects deletes objects in bulk, this function will still automatically split objects list -// into smaller bulks if some object names are found to be duplicated in the delete list, splitting -// into smaller bulks will avoid holding twice the write lock of the duplicated object names. -func (xl xlObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - - var ( - i, start, end int - // Deletion result for all objects - deleteErrs []error - // Object names store will be used to check for object name duplication - objectNamesStore = make(map[string]interface{}) - ) - - for { - if i >= len(objects) { - break - } - - object := objects[i] - - _, duplicationFound := objectNamesStore[object] - if duplicationFound { - end = i - 1 - } else { - objectNamesStore[object] = true - end = i - } - - if duplicationFound || i == len(objects)-1 { - errs, err := xl.deleteObjects(ctx, bucket, objects[start:end+1]) - if err != nil { - return nil, err + versions := make([]FileInfo, len(objects)) + for i := range objects { + if objects[i].VersionID == "" { + if opts.Versioned && !HasSuffix(objects[i].ObjectName, SlashSeparator) { + versions[i] = FileInfo{ + Name: objects[i].ObjectName, + VersionID: mustGetUUID(), + ModTime: UTCNow(), + Deleted: true, // delete marker + } + continue } - deleteErrs = append(deleteErrs, errs...) - objectNamesStore = make(map[string]interface{}) } - - if duplicationFound { - // Avoid to increase the index if object - // name is found to be duplicated. - start = i - } else { - i++ + versions[i] = FileInfo{ + Name: objects[i].ObjectName, + VersionID: objects[i].VersionID, } } - return deleteErrs, nil + // Initialize list of errors. + var opErrs = make([]error, len(storageDisks)) + var delObjErrs = make([][]error, len(storageDisks)) + + var wg sync.WaitGroup + // Remove versions in bulk for each disk + for index, disk := range storageDisks { + if disk == nil { + opErrs[index] = errDiskNotFound + continue + } + wg.Add(1) + go func(index int, disk StorageAPI) { + defer wg.Done() + delObjErrs[index] = disk.DeleteVersions(bucket, versions) + }(index, disk) + } + + wg.Wait() + + // Reduce errors for each object + for objIndex := range objects { + if errs[objIndex] != nil { + continue + } + diskErrs := make([]error, len(storageDisks)) + // Iterate over disks to fetch the error + // of deleting of the current object + for i := range delObjErrs { + // delObjErrs[i] is not nil when disks[i] is also not nil + if delObjErrs[i] != nil { + if delObjErrs[i][objIndex] != errFileNotFound { + diskErrs[i] = delObjErrs[i][objIndex] + } + } + } + errs[objIndex] = reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) + if errs[objIndex] == nil { + if versions[objIndex].Deleted { + dobjects[objIndex] = DeletedObject{ + DeleteMarker: versions[objIndex].Deleted, + DeleteMarkerVersionID: versions[objIndex].VersionID, + ObjectName: versions[objIndex].Name, + } + } else { + dobjects[objIndex] = DeletedObject{ + ObjectName: versions[objIndex].Name, + VersionID: versions[objIndex].VersionID, + } + } + } + } + + return dobjects, errs } // DeleteObject - deletes an object, this call doesn't necessary reply // any error as it is not necessary for the handler to reply back a // response to the client request. -func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { +func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkDelObjArgs(ctx, bucket, object); err != nil { - return err + return objInfo, err } - var writeQuorum int - var isObjectDir = HasSuffix(object, SlashSeparator) + storageDisks := er.getDisks() + writeQuorum := len(storageDisks)/2 + 1 - storageDisks := xl.getDisks() - - if isObjectDir { - _, err = xl.getObjectInfoDir(ctx, bucket, object) - if err == errXLReadQuorum { - if isObjectDirDangling(statAllDirs(ctx, storageDisks, bucket, object)) { - // If object is indeed dangling, purge it. - err = nil + if opts.VersionID == "" { + if opts.Versioned && !HasSuffix(object, SlashSeparator) { + fi := FileInfo{ + Name: object, + VersionID: mustGetUUID(), + Deleted: true, + ModTime: UTCNow(), } - } - if err != nil { - return toObjectErr(err, bucket, object) + // Add delete marker, since we don't have any version specified explicitly. + if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi); err != nil { + return objInfo, toObjectErr(err, bucket, object) + } + return fi.ToObjectInfo(bucket, object), nil } } - if isObjectDir { - writeQuorum = getWriteQuorum(len(storageDisks)) - } else { - // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, bucket, object) - // get Quorum for this object - _, writeQuorum, err = objectQuorumFromMeta(ctx, xl, partsMetadata, errs) - if err != nil { - return toObjectErr(err, bucket, object) - } + // Delete the object version on all disks. + if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{ + Name: object, + VersionID: opts.VersionID, + }); err != nil { + return objInfo, toObjectErr(err, bucket, object) } - // Delete the object on all disks. - if err = xl.deleteObject(ctx, bucket, object, writeQuorum, isObjectDir); err != nil { - return toObjectErr(err, bucket, object) - } - - // Success. - return nil -} - -// ListObjectsV2 lists all blobs in bucket filtered by prefix -func (xl xlObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - loi, err := xl.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - listObjectsV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listObjectsV2Info, err + return ObjectInfo{Bucket: bucket, Name: object, VersionID: opts.VersionID}, nil } // Send the successful but partial upload, however ignore // if the channel is blocked by other items. -func (xl xlObjects) addPartialUpload(bucket, key string) { +func (er erasureObjects) addPartialUpload(bucket, key string) { select { - case xl.mrfUploadCh <- partialUpload{bucket: bucket, object: key}: + case er.mrfUploadCh <- partialUpload{bucket: bucket, object: key}: default: } } // PutObjectTags - replace or add tags to an existing object -func (xl xlObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { - disks := xl.getDisks() +func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + disks := er.getDisks() // Read metadata associated with the object from all disks. - metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object) + metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { return toObjectErr(err, bucket, object) } - for i, xlMeta := range metaArr { - // clean xlMeta.Meta of tag key, before updating the new tags - delete(xlMeta.Meta, xhttp.AmzObjectTagging) + // List all online disks. + _, modTime := listOnlineDisks(disks, metaArr, errs) + + // Pick latest valid metadata. + fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) + if err != nil { + return toObjectErr(err, bucket, object) + } + + if fi.Deleted { + if opts.VersionID == "" { + return toObjectErr(errFileNotFound, bucket, object) + } + return toObjectErr(errMethodNotAllowed, bucket, object) + } + + for i, fi := range metaArr { + // clean fi.Meta of tag key, before updating the new tags + delete(fi.Metadata, xhttp.AmzObjectTagging) // Don't update for empty tags if tags != "" { - xlMeta.Meta[xhttp.AmzObjectTagging] = tags + fi.Metadata[xhttp.AmzObjectTagging] = tags } - metaArr[i].Meta = xlMeta.Meta + metaArr[i].Metadata = fi.Metadata } tempObj := mustGetUUID() - // Write unique `xl.json` for each disk. - if disks, err = writeUniqueXLMetadata(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { return toObjectErr(err, bucket, object) } - // Atomically rename `xl.json` from tmp location to destination for each disk. - if _, err = renameXLMetadata(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil { + // Atomically rename metadata from tmp location to destination for each disk. + if _, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil { return toObjectErr(err, bucket, object) } @@ -1046,14 +998,14 @@ func (xl xlObjects) PutObjectTags(ctx context.Context, bucket, object string, ta } // DeleteObjectTags - delete object tags from an existing object -func (xl xlObjects) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return xl.PutObjectTags(ctx, bucket, object, "") +func (er erasureObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return er.PutObjectTags(ctx, bucket, object, "", opts) } // GetObjectTags - get object tags from an existing object -func (xl xlObjects) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { // GetObjectInfo will return tag value as well - oi, err := xl.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) + oi, err := er.GetObjectInfo(ctx, bucket, object, opts) if err != nil { return nil, err } diff --git a/cmd/xl-v1-object_test.go b/cmd/erasure-object_test.go similarity index 55% rename from cmd/xl-v1-object_test.go rename to cmd/erasure-object_test.go index 1e7dfac3d..bf33b6368 100644 --- a/cmd/xl-v1-object_test.go +++ b/cmd/erasure-object_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,16 +20,11 @@ import ( "bytes" "context" "io/ioutil" - "math/rand" "os" - "path" - "reflect" "testing" - "time" humanize "github.com/dustin/go-humanize" "github.com/minio/minio/cmd/config/storageclass" - "github.com/minio/minio/pkg/madmin" ) func TestRepeatPutObjectPart(t *testing.T) { @@ -41,7 +36,7 @@ func TestRepeatPutObjectPart(t *testing.T) { var err error var opts ObjectOptions - objLayer, disks, err = prepareXL16(ctx) + objLayer, disks, err = prepareErasure16(ctx) if err != nil { t.Fatal(err) } @@ -49,7 +44,7 @@ func TestRepeatPutObjectPart(t *testing.T) { // cleaning up of temporary test directories defer removeRoots(disks) - err = objLayer.MakeBucketWithLocation(ctx, "bucket1", "", false) + err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -71,7 +66,7 @@ func TestRepeatPutObjectPart(t *testing.T) { } } -func TestXLDeleteObjectBasic(t *testing.T) { +func TestErasureDeleteObjectBasic(t *testing.T) { testCases := []struct { bucket string object string @@ -91,12 +86,12 @@ func TestXLDeleteObjectBasic(t *testing.T) { defer cancel() // Create an instance of xl backend - xl, fsDirs, err := prepareXL16(ctx) + xl, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } - err = xl.MakeBucketWithLocation(ctx, "bucket", "", false) + err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -104,40 +99,43 @@ func TestXLDeleteObjectBasic(t *testing.T) { // Create object "dir/obj" under bucket "bucket" for Test 7 to pass _, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) if err != nil { - t.Fatalf("XL Object upload failed: %s", err) + t.Fatalf("Erasure Object upload failed: %s", err) } - for i, test := range testCases { - actualErr := xl.DeleteObject(ctx, test.bucket, test.object) - if test.expectedErr != nil && actualErr != test.expectedErr { - t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr) - } - if test.expectedErr == nil && actualErr != nil { - t.Errorf("Test %d: Expected to pass, but failed with %s", i+1, actualErr) - } + for _, test := range testCases { + test := test + t.Run("", func(t *testing.T) { + _, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{}) + if test.expectedErr != nil && actualErr != test.expectedErr { + t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr) + } + if test.expectedErr == nil && actualErr != nil { + t.Errorf("Expected to pass, but failed with %s", actualErr) + } + }) } // Cleanup backend directories removeRoots(fsDirs) } -func TestXLDeleteObjectsXLSet(t *testing.T) { +func TestErasureDeleteObjectsErasureSet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var objs []*xlObjects + var objs []*erasureObjects for i := 0; i < 32; i++ { - obj, fsDirs, err := prepareXL(ctx, 16) + obj, fsDirs, err := prepareErasure(ctx, 16) if err != nil { - t.Fatal("Unable to initialize 'XL' object layer.", err) + t.Fatal("Unable to initialize 'Erasure' object layer.", err) } // Remove all dirs. for _, dir := range fsDirs { defer os.RemoveAll(dir) } - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] objs = append(objs, xl) } - xlSets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} + erasureSets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"} type testCaseType struct { bucket string @@ -152,32 +150,29 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { {bucketName, "obj_4"}, } - err := xlSets.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) if err != nil { t.Fatal(err) } for _, testCase := range testCases { - _, err = xlSets.PutObject(GlobalContext, testCase.bucket, testCase.object, + _, err = erasureSets.PutObject(ctx, testCase.bucket, testCase.object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) if err != nil { - t.Fatalf("XL Object upload failed: %s", err) + t.Fatalf("Erasure Object upload failed: %s", err) } } - toObjectNames := func(testCases []testCaseType) []string { - names := make([]string, len(testCases)) + toObjectNames := func(testCases []testCaseType) []ObjectToDelete { + names := make([]ObjectToDelete, len(testCases)) for i := range testCases { - names[i] = testCases[i].object + names[i] = ObjectToDelete{ObjectName: testCases[i].object} } return names } objectNames := toObjectNames(testCases) - delErrs, err := xlSets.DeleteObjects(GlobalContext, bucketName, objectNames) - if err != nil { - t.Errorf("Failed to call DeleteObjects with the error: `%v`", err) - } + _, delErrs := erasureSets.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{}) for i := range delErrs { if delErrs[i] != nil { @@ -186,7 +181,7 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { } for _, test := range testCases { - _, statErr := xlSets.GetObjectInfo(GlobalContext, test.bucket, test.object, ObjectOptions{}) + _, statErr := erasureSets.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{}) switch statErr.(type) { case ObjectNotFound: default: @@ -195,23 +190,23 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { } } -func TestXLDeleteObjectDiskNotFound(t *testing.T) { +func TestErasureDeleteObjectDiskNotFound(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } // Cleanup backend directories defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -226,16 +221,17 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) { } // for a 16 disk setup, quorum is 9. To simulate disks not found yet // quorum is available, we remove disks leaving quorum disks behind. - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks := xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - for i := range xlDisks[:7] { - xlDisks[i] = newNaughtyDisk(xlDisks[i], nil, errFaultyDisk) + for i := range erasureDisks[:7] { + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() - err = obj.DeleteObject(ctx, bucket, object) + + z.zones[0].erasureDisksMu.Unlock() + _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) if err != nil { t.Fatal(err) } @@ -247,18 +243,19 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) { } // Remove one more disk to 'lose' quorum, by setting it to nil. - xlDisks = xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks = xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - xlDisks[7] = nil - xlDisks[8] = nil - return xlDisks + erasureDisks[7] = nil + erasureDisks[8] = nil + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() - err = obj.DeleteObject(ctx, bucket, object) - // since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error - if err != toObjectErr(errXLReadQuorum, bucket, object) { - t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err) + + z.zones[0].erasureDisksMu.Unlock() + _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) + // since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error + if err != toObjectErr(errErasureWriteQuorum, bucket, object) { + t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } @@ -267,18 +264,18 @@ func TestGetObjectNoQuorum(t *testing.T) { defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } // Cleanup backend directories. defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -293,7 +290,7 @@ func TestGetObjectNoQuorum(t *testing.T) { } // Make 9 disks offline, which leaves less than quorum number of disks - // in a 16 disk XL setup. The original disks are 'replaced' with + // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,2) for f := 0; f < 2; f++ { @@ -301,24 +298,24 @@ func TestGetObjectNoQuorum(t *testing.T) { for i := 0; i <= f; i++ { diskErrors[i] = nil } - xlDisks := xl.getDisks() - for i := range xlDisks[:9] { - switch diskType := xlDisks[i].(type) { + erasureDisks := xl.getDisks() + for i := range erasureDisks[:9] { + switch diskType := erasureDisks[i].(type) { case *naughtyDisk: - xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) default: - xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].xlDisksMu.Lock() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Fetch object from store. err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts) - if err != toObjectErr(errXLReadQuorum, bucket, object) { - t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) + if err != toObjectErr(errErasureReadQuorum, bucket, object) { + t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } } @@ -328,7 +325,7 @@ func TestPutObjectNoQuorum(t *testing.T) { defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } @@ -336,11 +333,11 @@ func TestPutObjectNoQuorum(t *testing.T) { // Cleanup backend directories. defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -355,7 +352,7 @@ func TestPutObjectNoQuorum(t *testing.T) { } // Make 9 disks offline, which leaves less than quorum number of disks - // in a 16 disk XL setup. The original disks are 'replaced' with + // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,3) for f := 0; f < 3; f++ { @@ -363,143 +360,38 @@ func TestPutObjectNoQuorum(t *testing.T) { for i := 0; i <= f; i++ { diskErrors[i] = nil } - xlDisks := xl.getDisks() - for i := range xlDisks[:9] { - switch diskType := xlDisks[i].(type) { + erasureDisks := xl.getDisks() + for i := range erasureDisks[:9] { + switch diskType := erasureDisks[i].(type) { case *naughtyDisk: - xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) default: - xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].xlDisksMu.Lock() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Upload new content to same object "object" _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) - if err != toObjectErr(errXLWriteQuorum, bucket, object) { - t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) + if err != toObjectErr(errErasureWriteQuorum, bucket, object) { + t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } } -// Tests both object and bucket healing. -func TestHealing(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal(err) - } - defer removeRoots(fsDirs) - - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - - // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) - if err != nil { - t.Fatal(err) - } - - bucket := "bucket" - object := "object" - - data := make([]byte, 1*humanize.MiByte) - length := int64(len(data)) - _, err = rand.Read(data) - if err != nil { - t.Fatal(err) - } - - _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{}) - if err != nil { - t.Fatal(err) - } - - disk := xl.getDisks()[0] - xlMetaPreHeal, err := readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // Remove the object - to simulate the case where the disk was down when the object - // was created. - err = os.RemoveAll(path.Join(fsDirs[0], bucket, object)) - if err != nil { - t.Fatal(err) - } - - _, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) - if err != nil { - t.Fatal(err) - } - - xlMetaPostHeal, err := readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // After heal the meta file should be as expected. - if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { - t.Fatal("HealObject failed") - } - - err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "xl.json")) - if err != nil { - t.Fatal(err) - } - - // Write xl.json with different modtime to simulate the case where a disk had - // gone down when an object was replaced by a new object. - xlMetaOutDated := xlMetaPreHeal - xlMetaOutDated.Stat.ModTime = time.Now() - err = writeXLMetadata(ctx, disk, bucket, object, xlMetaOutDated) - if err != nil { - t.Fatal(err) - } - - _, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan}) - if err != nil { - t.Fatal(err) - } - - xlMetaPostHeal, err = readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // After heal the meta file should be as expected. - if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { - t.Fatal("HealObject failed") - } - - // Remove the bucket - to simulate the case where bucket was - // created when the disk was down. - err = os.RemoveAll(path.Join(fsDirs[0], bucket)) - if err != nil { - t.Fatal(err) - } - // This would create the bucket. - _, err = xl.HealBucket(ctx, bucket, false, false) - if err != nil { - t.Fatal(err) - } - // Stat the bucket to make sure that it was created. - _, err = xl.getDisks()[0].StatVol(bucket) - if err != nil { - t.Fatal(err) - } -} - func TestObjectQuorumFromMeta(t *testing.T) { ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta) } func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { + restoreGlobalStorageClass := globalStorageClass + defer func() { + globalStorageClass = restoreGlobalStorageClass + }() + bucket := getRandomBucketName() var opts ObjectOptions @@ -507,45 +399,48 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() + erasureDisks := xl.getDisks() - err := obj.MakeBucketWithLocation(GlobalContext, bucket, globalMinioDefaultRegion, false) + ctx, cancel := context.WithCancel(GlobalContext) + defer cancel() + + err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket %v", err) } // Object for test case 1 - No StorageClass defined, no MetaData in PutObject object1 := "object1" - _, err = obj.PutObject(GlobalContext, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) + _, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts1, errs1 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object1) + parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "") // Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class object2 := "object2" metadata2 := make(map[string]string) metadata2["x-amz-storage-class"] = storageclass.RRS - _, err = obj.PutObject(GlobalContext, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) + _, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts2, errs2 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object2) + parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "") // Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class object3 := "object3" metadata3 := make(map[string]string) metadata3["x-amz-storage-class"] = storageclass.STANDARD - _, err = obj.PutObject(GlobalContext, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) + _, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts3, errs3 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object3) + parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "") // Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class object4 := "object4" @@ -557,12 +452,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) + _, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts4, errs4 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object4) + parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "") // Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class // Reset global storage class flags @@ -575,12 +470,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) + _, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts5, errs5 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object5) + parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "") // Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class object6 := "object6" @@ -592,12 +487,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) + _, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts6, errs6 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object6) + parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "") // Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class // Reset global storage class flags @@ -610,15 +505,15 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) + _, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts7, errs7 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object7) + parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "") tests := []struct { - parts []xlMetaV1 + parts []FileInfo errs []error expectedReadQuorum int expectedWriteQuorum int @@ -632,23 +527,22 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin {parts6, errs6, 8, 9, nil}, {parts7, errs7, 14, 15, nil}, } - for i, tt := range tests { - actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(GlobalContext, *xl, tt.parts, tt.errs) - if tt.expectedError != nil && err == nil { - t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) - return - } - if tt.expectedError == nil && err != nil { - t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) - return - } - if tt.expectedReadQuorum != actualReadQuorum { - t.Errorf("Test %d, Expected Read Quorum %d, got %d", i+1, tt.expectedReadQuorum, actualReadQuorum) - return - } - if tt.expectedWriteQuorum != actualWriteQuorum { - t.Errorf("Test %d, Expected Write Quorum %d, got %d", i+1, tt.expectedWriteQuorum, actualWriteQuorum) - return - } + for _, tt := range tests { + tt := tt + t.(*testing.T).Run("", func(t *testing.T) { + actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, *xl, tt.parts, tt.errs) + if tt.expectedError != nil && err == nil { + t.Errorf("Expected %s, got %s", tt.expectedError, err) + } + if tt.expectedError == nil && err != nil { + t.Errorf("Expected %s, got %s", tt.expectedError, err) + } + if tt.expectedReadQuorum != actualReadQuorum { + t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum) + } + if tt.expectedWriteQuorum != actualWriteQuorum { + t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum) + } + }) } } diff --git a/cmd/xl-sets.go b/cmd/erasure-sets.go similarity index 66% rename from cmd/xl-sets.go rename to cmd/erasure-sets.go index f06f5b27c..2cbaa72f9 100644 --- a/cmd/xl-sets.go +++ b/cmd/erasure-sets.go @@ -23,13 +23,13 @@ import ( "io" "net/http" "sort" - "strings" "sync" "time" + "github.com/dchest/siphash" + "github.com/google/uuid" "github.com/minio/minio-go/v6/pkg/tags" "github.com/minio/minio/cmd/config/storageclass" - xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/dsync" @@ -45,25 +45,25 @@ type diskConnectInfo struct { setIndex int } -// xlSets implements ObjectLayer combining a static list of erasure coded +// erasureSets implements ObjectLayer combining a static list of erasure coded // object sets. NOTE: There is no dynamic scaling allowed or intended in // current design. -type xlSets struct { +type erasureSets struct { GatewayUnsupported - sets []*xlObjects + sets []*erasureObjects // Reference format. - format *formatXLV3 + format *formatErasureV3 - // xlDisks mutex to lock xlDisks. - xlDisksMu sync.RWMutex + // erasureDisks mutex to lock erasureDisks. + erasureDisksMu sync.RWMutex // Re-ordered list of disks per set. - xlDisks [][]StorageAPI + erasureDisks [][]StorageAPI // Distributed locker clients. - xlLockers setsDsyncLockers + erasureLockers setsDsyncLockers // List of endpoints provided on the command line. endpoints Endpoints @@ -83,15 +83,17 @@ type xlSets struct { // Distribution algorithm of choice. distributionAlgo string + deploymentID [16]byte disksStorageInfoCache timedValue // Merge tree walk - pool *MergeWalkPool - poolSplunk *MergeWalkPool + pool *MergeWalkPool + poolSplunk *MergeWalkPool + poolVersions *MergeWalkVersionsPool mrfMU sync.Mutex - mrfUploads map[string]int + mrfUploads map[healSource]int } func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool { @@ -102,15 +104,15 @@ func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool { return disk.IsOnline() } -func (s *xlSets) getDiskMap() map[string]StorageAPI { +func (s *erasureSets) getDiskMap() map[string]StorageAPI { diskMap := make(map[string]StorageAPI) - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() for i := 0; i < s.setCount; i++ { for j := 0; j < s.drivesPerSet; j++ { - disk := s.xlDisks[i][j] + disk := s.erasureDisks[i][j] if disk == nil { continue } @@ -125,13 +127,13 @@ func (s *xlSets) getDiskMap() map[string]StorageAPI { // Initializes a new StorageAPI from the endpoint argument, returns // StorageAPI and also `format` which exists on the disk. -func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) { +func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) { disk, err := newStorageAPI(endpoint) if err != nil { return nil, nil, err } - format, err := loadFormatXL(disk) + format, err := loadFormatErasure(disk) if err != nil { // Close the internal connection to avoid connection leaks. disk.Close() @@ -145,13 +147,13 @@ func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) { // format, after successful validation. // - i'th position is the set index // - j'th position is the disk index in the current set -func findDiskIndexByDiskID(refFormat *formatXLV3, diskID string) (int, int, error) { +func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int, error) { if diskID == offlineDiskUUID { return -1, -1, fmt.Errorf("diskID: %s is offline", diskID) } - for i := 0; i < len(refFormat.XL.Sets); i++ { - for j := 0; j < len(refFormat.XL.Sets[0]); j++ { - if refFormat.XL.Sets[i][j] == diskID { + for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { + if refFormat.Erasure.Sets[i][j] == diskID { return i, j, nil } } @@ -164,29 +166,29 @@ func findDiskIndexByDiskID(refFormat *formatXLV3, diskID string) (int, int, erro // format, after successful validation. // - i'th position is the set index // - j'th position is the disk index in the current set -func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) { - if err := formatXLV3Check(refFormat, format); err != nil { +func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { + if err := formatErasureV3Check(refFormat, format); err != nil { return 0, 0, err } - if format.XL.This == offlineDiskUUID { - return -1, -1, fmt.Errorf("diskID: %s is offline", format.XL.This) + if format.Erasure.This == offlineDiskUUID { + return -1, -1, fmt.Errorf("diskID: %s is offline", format.Erasure.This) } - for i := 0; i < len(refFormat.XL.Sets); i++ { - for j := 0; j < len(refFormat.XL.Sets[0]); j++ { - if refFormat.XL.Sets[i][j] == format.XL.This { + for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { + if refFormat.Erasure.Sets[i][j] == format.Erasure.This { return i, j, nil } } } - return -1, -1, fmt.Errorf("diskID: %s not found", format.XL.This) + return -1, -1, fmt.Errorf("diskID: %s not found", format.Erasure.This) } // connectDisks - attempt to connect all the endpoints, loads format // and re-arranges the disks in proper position. -func (s *xlSets) connectDisks() { +func (s *erasureSets) connectDisks() { var wg sync.WaitGroup diskMap := s.getDiskMap() for _, endpoint := range s.endpoints { @@ -212,14 +214,14 @@ func (s *xlSets) connectDisks() { printEndpointError(endpoint, err) return } - disk.SetDiskID(format.XL.This) - s.xlDisksMu.Lock() - if s.xlDisks[setIndex][diskIndex] != nil { - s.xlDisks[setIndex][diskIndex].Close() + disk.SetDiskID(format.Erasure.This) + s.erasureDisksMu.Lock() + if s.erasureDisks[setIndex][diskIndex] != nil { + s.erasureDisks[setIndex][diskIndex].Close() } - s.xlDisks[setIndex][diskIndex] = disk + s.erasureDisks[setIndex][diskIndex] = disk s.endpointStrings[setIndex*s.drivesPerSet+diskIndex] = disk.String() - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() go func(setIndex int) { // Send a new disk connect event with a timeout select { @@ -235,7 +237,7 @@ func (s *xlSets) connectDisks() { // monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected // endpoints by reconnecting them and making sure to place them into right position in // the set topology, this monitoring happens at a given monitoring interval. -func (s *xlSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { +func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { for { select { case <-ctx.Done(): @@ -248,18 +250,18 @@ func (s *xlSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval } } -func (s *xlSets) GetLockers(setIndex int) func() []dsync.NetLocker { +func (s *erasureSets) GetLockers(setIndex int) func() []dsync.NetLocker { return func() []dsync.NetLocker { lockers := make([]dsync.NetLocker, s.drivesPerSet) - copy(lockers, s.xlLockers[setIndex]) + copy(lockers, s.erasureLockers[setIndex]) return lockers } } -func (s *xlSets) GetEndpoints(setIndex int) func() []string { +func (s *erasureSets) GetEndpoints(setIndex int) func() []string { return func() []string { - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() eps := make([]string, s.drivesPerSet) for i := 0; i < s.drivesPerSet; i++ { @@ -270,12 +272,12 @@ func (s *xlSets) GetEndpoints(setIndex int) func() []string { } // GetDisks returns a closure for a given set, which provides list of disks per set. -func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI { +func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI { return func() []StorageAPI { - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() disks := make([]StorageAPI, s.drivesPerSet) - copy(disks, s.xlDisks[setIndex]) + copy(disks, s.erasureDisks[setIndex]) return disks } } @@ -283,46 +285,47 @@ func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI { const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs. // Initialize new set of erasure coded sets. -func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV3) (*xlSets, error) { - setCount := len(format.XL.Sets) - drivesPerSet := len(format.XL.Sets[0]) +func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3) (*erasureSets, error) { + setCount := len(format.Erasure.Sets) + drivesPerSet := len(format.Erasure.Sets[0]) endpointStrings := make([]string, len(endpoints)) - // Initialize the XL sets instance. - s := &xlSets{ - sets: make([]*xlObjects, setCount), - xlDisks: make([][]StorageAPI, setCount), - xlLockers: make([][]dsync.NetLocker, setCount), + // Initialize the erasure sets instance. + s := &erasureSets{ + sets: make([]*erasureObjects, setCount), + erasureDisks: make([][]StorageAPI, setCount), + erasureLockers: make([][]dsync.NetLocker, setCount), + endpoints: endpoints, + endpointStrings: endpointStrings, setCount: setCount, drivesPerSet: drivesPerSet, format: format, - endpoints: endpoints, - endpointStrings: endpointStrings, disksConnectEvent: make(chan diskConnectInfo), disksConnectDoneCh: make(chan struct{}), - distributionAlgo: format.XL.DistributionAlgo, + distributionAlgo: format.Erasure.DistributionAlgo, + deploymentID: uuid.MustParse(format.ID), pool: NewMergeWalkPool(globalMergeLookupTimeout), poolSplunk: NewMergeWalkPool(globalMergeLookupTimeout), - mrfUploads: make(map[string]int), + poolVersions: NewMergeWalkVersionsPool(globalMergeLookupTimeout), + mrfUploads: make(map[healSource]int), } - mutex := newNSLock(globalIsDistXL) + mutex := newNSLock(globalIsDistErasure) // Initialize byte pool once for all sets, bpool size is set to // setCount * drivesPerSet with each memory upto blockSizeV1. bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2) for i := 0; i < setCount; i++ { - s.xlDisks[i] = make([]StorageAPI, drivesPerSet) - s.xlLockers[i] = make([]dsync.NetLocker, drivesPerSet) + s.erasureDisks[i] = make([]StorageAPI, drivesPerSet) + s.erasureLockers[i] = make([]dsync.NetLocker, drivesPerSet) } for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { endpoint := endpoints[i*drivesPerSet+j] // Rely on endpoints list to initialize, init lockers and available disks. - s.xlLockers[i][j] = newLockAPI(endpoint) - + s.erasureLockers[i][j] = newLockAPI(endpoint) disk := storageDisks[i*drivesPerSet+j] if disk == nil { continue @@ -338,11 +341,11 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA continue } s.endpointStrings[m*drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - // Initialize xl objects for a given set. - s.sets[i] = &xlObjects{ + // Initialize erasure objects for a given set. + s.sets[i] = &erasureObjects{ getDisks: s.GetDisks(i), getLockers: s.GetLockers(i), getEndpoints: s.GetEndpoints(i), @@ -350,9 +353,6 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA bp: bp, mrfUploadCh: make(chan partialUpload, 10000), } - - go s.sets[i].cleanupStaleMultipartUploads(ctx, - GlobalMultipartCleanupInterval, GlobalMultipartExpiry, ctx.Done()) } // Start the disk monitoring and connect routine. @@ -364,7 +364,7 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA } // NewNSLock - initialize a new namespace RWLocker instance. -func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { +func (s *erasureSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { if len(objects) == 1 { return s.getHashedSet(objects[0]).NewNSLock(ctx, bucket, objects...) } @@ -375,7 +375,7 @@ func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string // This only returns disk usage info for Zones to perform placement decision, this call // is not implemented in Object interface and is not meant to be used by other object // layer implementations. -func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo { +func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo { storageUsageInfo := func() StorageInfo { var storageInfo StorageInfo storageInfos := make([]StorageInfo, len(s.sets)) @@ -418,7 +418,7 @@ func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo { } // StorageInfo - combines output of StorageInfo across all erasure coded object sets. -func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { +func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { var storageInfo StorageInfo storageInfos := make([]StorageInfo, len(s.sets)) @@ -516,14 +516,14 @@ func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []er return storageInfo, errs } -func (s *xlSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { +func (s *erasureSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { // Use the zone-level implementation instead. - return NotImplemented{} + return NotImplemented{API: "CrawlAndGetDataUsage"} } // Shutdown shutsdown all erasure coded sets in parallel // returns error upon first error. -func (s *xlSets) Shutdown(ctx context.Context) error { +func (s *erasureSets) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { @@ -544,14 +544,14 @@ func (s *xlSets) Shutdown(ctx context.Context) error { // MakeBucketLocation - creates a new bucket across all sets simultaneously, // then return the first encountered error -func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { g := errgroup.WithNErrs(len(s.sets)) // Create buckets in parallel across all sets. for index := range s.sets { index := index g.Go(func() error { - return s.sets[index].MakeBucketWithLocation(ctx, bucket, location, lockEnabled) + return s.sets[index].MakeBucketWithLocation(ctx, bucket, opts) }, index) } @@ -571,7 +571,17 @@ func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location st // hashes the key returning an integer based on the input algorithm. // This function currently supports // - CRCMOD +// - SIPMOD // - all new algos. +func sipHashMod(key string, cardinality int, id [16]byte) int { + if cardinality <= 0 { + return -1 + } + sip := siphash.New(id[:]) + sip.Write([]byte(key)) + return int(sip.Sum64() % uint64(cardinality)) +} + func crcHashMod(key string, cardinality int) int { if cardinality <= 0 { return -1 @@ -580,10 +590,12 @@ func crcHashMod(key string, cardinality int) int { return int(keyCrc % uint32(cardinality)) } -func hashKey(algo string, key string, cardinality int) int { +func hashKey(algo string, key string, cardinality int, id [16]byte) int { switch algo { - case formatXLVersionV2DistributionAlgo: + case formatErasureVersionV2DistributionAlgoLegacy: return crcHashMod(key, cardinality) + case formatErasureVersionV3DistributionAlgo: + return sipHashMod(key, cardinality, id) default: // Unknown algorithm returns -1, also if cardinality is lesser than 0. return -1 @@ -591,70 +603,53 @@ func hashKey(algo string, key string, cardinality int) int { } // Returns always a same erasure coded set for a given input. -func (s *xlSets) getHashedSetIndex(input string) int { - return hashKey(s.distributionAlgo, input, len(s.sets)) +func (s *erasureSets) getHashedSetIndex(input string) int { + return hashKey(s.distributionAlgo, input, len(s.sets), s.deploymentID) } // Returns always a same erasure coded set for a given input. -func (s *xlSets) getHashedSet(input string) (set *xlObjects) { +func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) { return s.sets[s.getHashedSetIndex(input)] } // GetBucketInfo - returns bucket info from one of the erasure coded set. -func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { +func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { return s.getHashedSet("").GetBucketInfo(ctx, bucket) } // ListObjectsV2 lists all objects in bucket filtered by prefix -func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - listObjectsV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listObjectsV2Info, err +func (s *erasureSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { + return result, NotImplemented{} } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (s *xlSets) IsNotificationSupported() bool { +func (s *erasureSets) IsNotificationSupported() bool { return s.getHashedSet("").IsNotificationSupported() } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (s *xlSets) IsListenBucketSupported() bool { +func (s *erasureSets) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (s *xlSets) IsEncryptionSupported() bool { +func (s *erasureSets) IsEncryptionSupported() bool { return s.getHashedSet("").IsEncryptionSupported() } // IsCompressionSupported returns whether compression is applicable for this layer. -func (s *xlSets) IsCompressionSupported() bool { +func (s *erasureSets) IsCompressionSupported() bool { return s.getHashedSet("").IsCompressionSupported() } -func (s *xlSets) IsTaggingSupported() bool { +func (s *erasureSets) IsTaggingSupported() bool { return true } // DeleteBucket - deletes a bucket on all sets simultaneously, // even if one of the sets fail to delete buckets, we proceed to // undo a successful operation. -func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (s *erasureSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { g := errgroup.WithNErrs(len(s.sets)) // Delete buckets in parallel across all sets. @@ -670,7 +665,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bo // by creating buckets again on all sets which were successfully deleted. for _, err := range errs { if err != nil { - undoDeleteBucketSets(bucket, s.sets, errs) + undoDeleteBucketSets(ctx, bucket, s.sets, errs) return err } } @@ -683,7 +678,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bo } // This function is used to undo a successful DeleteBucket operation. -func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { +func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObjects, errs []error) { g := errgroup.WithNErrs(len(sets)) // Undo previous delete bucket on all underlying sets. @@ -691,7 +686,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { index := index g.Go(func() error { if errs[index] == nil { - return sets[index].MakeBucketWithLocation(GlobalContext, bucket, "", false) + return sets[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) } return nil }, index) @@ -703,7 +698,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { // List all buckets from one of the set, we are not doing merge // sort here just for simplification. As per design it is assumed // that all buckets are present on all sets. -func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { +func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { // Always lists from the same set signified by the empty string. return s.getHashedSet("").ListBuckets(ctx) } @@ -711,83 +706,86 @@ func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err err // --- Object Operations --- // GetObjectNInfo - returns object info and locked object ReadCloser -func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) } // GetObject - reads an object from the hashedSet based on the object name. -func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (s *erasureSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) } // PutObject - writes an object to hashedSet based on the object name. -func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).PutObject(ctx, bucket, object, data, opts) } // GetObjectInfo - reads object metadata from the hashedSet based on the object name. -func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts) } // DeleteObject - deletes an object from the hashedSet based on the object name. -func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) { - return s.getHashedSet(object).DeleteObject(ctx, bucket, object) +func (s *erasureSets) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + return s.getHashedSet(object).DeleteObject(ctx, bucket, object, opts) } // DeleteObjects - bulk delete of objects // Bulk delete is only possible within one set. For that purpose // objects are group by set first, and then bulk delete is invoked // for each set, the error response of each delete will be returned -func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { type delObj struct { // Set index associated to this object setIndex int // Original index from the list of arguments // where this object is passed origIndex int - // Object name - name string + // object to delete + object ObjectToDelete } // Transform []delObj to the list of object names - toNames := func(delObjs []delObj) []string { - names := make([]string, len(delObjs)) + toNames := func(delObjs []delObj) []ObjectToDelete { + objs := make([]ObjectToDelete, len(delObjs)) for i, obj := range delObjs { - names[i] = obj.name + objs[i] = obj.object } - return names + return objs } // The result of delete operation on all passed objects var delErrs = make([]error, len(objects)) + // The result of delete objects + var delObjects = make([]DeletedObject, len(objects)) + // A map between a set and its associated objects var objSetMap = make(map[int][]delObj) // Group objects by set index for i, object := range objects { - index := s.getHashedSetIndex(object) - objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, name: object}) + index := s.getHashedSetIndex(object.ObjectName) + objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, object: object}) } // Invoke bulk delete on objects per set and save // the result of the delete operation for _, objsGroup := range objSetMap { - errs, err := s.getHashedSet(objsGroup[0].name).DeleteObjects(ctx, bucket, toNames(objsGroup)) - if err != nil { - return nil, err - } + dobjects, errs := s.getHashedSet(objsGroup[0].object.ObjectName).DeleteObjects(ctx, bucket, toNames(objsGroup), opts) for i, obj := range objsGroup { delErrs[obj.origIndex] = errs[i] + if delErrs[obj.origIndex] == nil { + delObjects[obj.origIndex] = dobjects[i] + } } } - return delErrs, nil + return delObjects, delErrs } // CopyObject - copies objects from one hashedSet to another hashedSet, on server side. -func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { srcSet := s.getHashedSet(srcObject) dstSet := s.getHashedSet(dstObject) @@ -800,6 +798,29 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } +// FileInfoVersionsCh - file info versions channel +type FileInfoVersionsCh struct { + Ch chan FileInfoVersions + Prev FileInfoVersions + Valid bool +} + +// Pop - pops a cached entry if any, or from the cached channel. +func (f *FileInfoVersionsCh) Pop() (fi FileInfoVersions, ok bool) { + if f.Valid { + f.Valid = false + return f.Prev, true + } // No cached entries found, read from channel + f.Prev, ok = <-f.Ch + return f.Prev, ok +} + +// Push - cache an entry, for Pop() later. +func (f *FileInfoVersionsCh) Push(fi FileInfoVersions) { + f.Prev = fi + f.Valid = true +} + // FileInfoCh - file info channel type FileInfoCh struct { Ch chan FileInfo @@ -830,7 +851,7 @@ func (f *FileInfoCh) Push(fi FileInfo) { // again to list the next entry. It is callers responsibility // if the caller wishes to list N entries to call lexicallySortedEntry // N times until this boolean is 'false'. -func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) (FileInfo, int, bool) { +func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) { for i := range entryChs { entries[i], entriesValid[i] = entryChs[i].Pop() } @@ -844,7 +865,7 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali break } - var lentry FileInfo + var lentry FileInfoVersions var found bool for i, valid := range entriesValid { if !valid { @@ -874,7 +895,7 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali // Entries are duplicated across disks, // we should simply skip such entries. - if lentry.Name == entries[i].Name && lentry.ModTime.Equal(entries[i].ModTime) { + if lentry.Name == entries[i].Name && lentry.LatestModTime.Equal(entries[i].LatestModTime) { lexicallySortedEntryCount++ continue } @@ -887,61 +908,47 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali return lentry, lexicallySortedEntryCount, isTruncated } -// mergeEntriesCh - merges FileInfo channel to entries upto maxKeys. -func mergeEntriesCh(entryChs []FileInfoCh, maxKeys int, ndisks int) (entries FilesInfo) { - var i = 0 - entriesInfos := make([]FileInfo, len(entryChs)) - entriesValid := make([]bool, len(entryChs)) - for { - fi, quorumCount, valid := lexicallySortedEntry(entryChs, entriesInfos, entriesValid) - if !valid { - // We have reached EOF across all entryChs, break the loop. - break - } - - if quorumCount < ndisks-1 { - // Skip entries which are not found on upto ndisks. - continue - } - - entries.Files = append(entries.Files, fi) - i++ - if i == maxKeys { - entries.IsTruncated = isTruncated(entryChs, entriesInfos, entriesValid) - break - } - } - return entries -} - -func isTruncated(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) bool { - for i := range entryChs { - entries[i], entriesValid[i] = entryChs[i].Pop() - } - - var isTruncated = false - for _, valid := range entriesValid { - if !valid { - continue - } - isTruncated = true - break - } - for i := range entryChs { - if entriesValid[i] { - entryChs[i].Push(entries[i]) - } - } - return isTruncated -} - -func (s *xlSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh { +func (s *erasureSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh { return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1) } -// Starts a walk channel across all disks and returns a slice of -// FileInfo channels which can be read from. -func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { +func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh { + return s.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1) +} + +// Starts a walk versions channel across N number of disks and returns a slice. +// FileInfoCh which can be read from. +func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh { + var entryChs []FileInfoVersionsCh + var success int + for _, set := range s.sets { + // Reset for the next erasure set. + success = ndisks + for _, disk := range set.getLoadBalancedDisks() { + if disk == nil { + // Disk can be offline + continue + } + entryCh, err := disk.WalkVersions(bucket, prefix, marker, recursive, endWalkCh) + if err != nil { + // Disk walk returned error, ignore it. + continue + } + entryChs = append(entryChs, FileInfoVersionsCh{ + Ch: entryCh, + }) + success-- + if success == 0 { + break + } + } + } + return entryChs +} + +// Starts a walk channel across n number of disks and returns a slice of +// FileInfoCh which can be read from. +func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { var entryChs []FileInfoCh var success int for _, set := range s.sets { @@ -952,7 +959,7 @@ func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker st // Disk can be offline continue } - entryCh, err := disk.Walk(bucket, prefix, marker, recursive, xlMetaJSONFile, readMetadata, endWalkCh) + entryCh, err := disk.Walk(bucket, prefix, marker, recursive, endWalkCh) if err != nil { // Disk walk returned error, ignore it. continue @@ -969,9 +976,9 @@ func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker st return entryChs } -// Starts a walk channel across all disks and returns a slice of +// Starts a walk channel across n number of disks and returns a slice of // FileInfo channels which can be read from. -func (s *xlSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { +func (s *erasureSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { var entryChs []FileInfoCh var success int for _, set := range s.sets { @@ -999,207 +1006,35 @@ func (s *xlSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, mar return entryChs } -func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - endWalkCh := make(chan struct{}) - defer close(endWalkCh) - - const ndisks = 3 - entryChs := s.startMergeWalksN(GlobalContext, bucket, prefix, "", true, endWalkCh, ndisks) - - var objInfos []ObjectInfo - var eof bool - var prevPrefix string - - entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) - for { - if len(objInfos) == maxKeys { - break - } - - result, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) - if !ok { - eof = true - break - } - - if quorumCount < ndisks-1 { - // Skip entries which are not found on upto ndisks. - continue - } - - var objInfo ObjectInfo - - index := strings.Index(strings.TrimPrefix(result.Name, prefix), delimiter) - if index == -1 { - objInfo = ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: result.Name, - ModTime: result.ModTime, - Size: result.Size, - ContentType: result.Metadata["content-type"], - ContentEncoding: result.Metadata["content-encoding"], - } - - // Extract etag from metadata. - objInfo.ETag = extractETag(result.Metadata) - - // All the parts per object. - objInfo.Parts = result.Parts - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - objInfo.UserDefined = cleanMetadata(result.Metadata) - - // Update storage class - if sc, ok := result.Metadata[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } - } else { - index = len(prefix) + index + len(delimiter) - currPrefix := result.Name[:index] - if currPrefix == prevPrefix { - continue - } - prevPrefix = currPrefix - - objInfo = ObjectInfo{ - Bucket: bucket, - Name: currPrefix, - IsDir: true, - } - } - - if objInfo.Name <= marker { - continue - } - - objInfos = append(objInfos, objInfo) - } - - result := ListObjectsInfo{} - for _, objInfo := range objInfos { - if objInfo.IsDir { - result.Prefixes = append(result.Prefixes, objInfo.Name) - continue - } - result.Objects = append(result.Objects, objInfo) - } - - if !eof { - result.IsTruncated = true - if len(objInfos) > 0 { - result.NextMarker = objInfos[len(objInfos)-1].Name - } - } - - return result, nil -} - -// ListObjects - implements listing of objects across disks, each disk is independently +// ListObjectVersions - implements listing of objects across disks, each disk is indepenently // walked and merged at this layer. Resulting value through the merge process sends // the data in lexically sorted order. -// If partialQuorumOnly is set only objects that does not have full quorum is returned. -func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - if err = checkListObjsArgs(ctx, bucket, prefix, marker, s); err != nil { - return loi, err - } - - // Marker is set validate pre-condition. - if marker != "" { - // Marker not common with prefix is not implemented. Send an empty response - if !HasPrefix(marker, prefix) { - return loi, nil - } - } - - // With max keys of zero we have reached eof, return right here. - if maxKeys == 0 { - return loi, nil - } - - // For delimiter and prefix as '/' we do not list anything at all - // since according to s3 spec we stop at the 'delimiter' - // along // with the prefix. On a flat namespace with 'prefix' - // as '/' we don't have any entries, since all the keys are - // of form 'keyName/...' - if delimiter == SlashSeparator && prefix == SlashSeparator { - return loi, nil - } - - // Over flowing count - reset to maxObjectList. - if maxKeys < 0 || maxKeys > maxObjectList { - maxKeys = maxObjectList - } - - if delimiter != SlashSeparator && delimiter != "" { - // "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter. - return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys) - } - - // Default is recursive, if delimiter is set then list non recursive. - recursive := true - if delimiter == SlashSeparator { - recursive = false - } - - const ndisks = 3 - - entryChs, endWalkCh := s.pool.Release(listParams{bucket: bucket, recursive: recursive, marker: marker, prefix: prefix}) - if entryChs == nil { - endWalkCh = make(chan struct{}) - // start file tree walk across at most randomly 3 disks in a set. - entryChs = s.startMergeWalksN(GlobalContext, bucket, prefix, marker, recursive, endWalkCh, ndisks) - } - - entries := mergeEntriesCh(entryChs, maxKeys, ndisks) - if len(entries.Files) == 0 { - return loi, nil - } - - loi.IsTruncated = entries.IsTruncated - if loi.IsTruncated { - loi.NextMarker = entries.Files[len(entries.Files)-1].Name - } - - for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() - if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { - loi.Prefixes = append(loi.Prefixes, entry.Name) - continue - } - loi.Objects = append(loi.Objects, objInfo) - } - if loi.IsTruncated { - s.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, entryChs, endWalkCh) - } - return loi, nil +func (s *erasureSets) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, err error) { + // Shouldn't be called directly, caller Zones already has an implementation + return loi, NotImplemented{} } // ListObjects - implements listing of objects across disks, each disk is indepenently // walked and merged at this layer. Resulting value through the merge process sends // the data in lexically sorted order. -func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) +func (s *erasureSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + // Shouldn't be called directly, caller Zones already has an implementation + return loi, NotImplemented{} } -func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { +func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { // In list multipart uploads we are going to treat input prefix as the object, // this means that we are not supporting directory navigation. return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) } // Initiate a new multipart upload on a hashedSet based on object name. -func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { +func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, opts) } // Copies a part of an object from source hashedSet to destination hashedSet. -func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, +func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) { destSet := s.getHashedSet(destObject) @@ -1207,27 +1042,27 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB } // PutObjectPart - writes part of an object to hashedSet based on the object name. -func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { +func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) } // GetMultipartInfo - return multipart metadata info uploaded at hashedSet. -func (s *xlSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) { +func (s *erasureSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) { return s.getHashedSet(object).GetMultipartInfo(ctx, bucket, object, uploadID, opts) } // ListObjectParts - lists all uploaded parts to an object in hashedSet. -func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { +func (s *erasureSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID) } // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. -func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) } @@ -1284,7 +1119,7 @@ else fi */ -func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) { +func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs []error) (beforeDrives []madmin.DriveInfo) { beforeDrives = make([]madmin.DriveInfo, len(endpoints)) // Existing formats are available (i.e. ok), so save it in // result, also populate disks to be healed. @@ -1302,7 +1137,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []err beforeDrives[i] = madmin.DriveInfo{ UUID: func() string { if format != nil { - return format.XL.This + return format.Erasure.This } return "" }(), @@ -1316,7 +1151,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []err // Reloads the format from the disk, usually called by a remote peer notifier while // healing in a distributed setup. -func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { +func (s *erasureSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { storageDisks, errs := initStorageDisksWithErrors(s.endpoints) for i, err := range errs { if err != nil && err != errDiskNotFound { @@ -1329,8 +1164,8 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { } }(storageDisks) - formats, sErrs := loadFormatXLAll(storageDisks, false) - if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil { + formats, sErrs := loadFormatErasureAll(storageDisks, false) + if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { return err } @@ -1344,7 +1179,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { } } - refFormat, err := getFormatXLInQuorum(formats) + refFormat, err := getFormatErasureInQuorum(formats) if err != nil { return err } @@ -1358,7 +1193,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { s.format = refFormat // Close all existing disks and reconnect all the disks. - s.xlDisksMu.Lock() + s.erasureDisksMu.Lock() for _, disk := range storageDisks { if disk == nil { continue @@ -1376,14 +1211,14 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { continue } - if s.xlDisks[m][n] != nil { - s.xlDisks[m][n].Close() + if s.erasureDisks[m][n] != nil { + s.erasureDisks[m][n].Close() } s.endpointStrings[m*s.drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() // Restart monitoring loop to monitor reformatted disks again. go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval) @@ -1391,7 +1226,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { return nil } -// If it is a single node XL and all disks are root disks, it is most likely a test setup, else it is a production setup. +// If it is a single node Erasure and all disks are root disks, it is most likely a test setup, else it is a production setup. // On a test setup we allow creation of format.json on root disks to help with dev/testing. func isTestSetup(infos []DiskInfo, errs []error) bool { rootDiskCount := 0 @@ -1450,7 +1285,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI) { } // HealFormat - heals missing `format.json` on fresh unformatted disks. -func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) { +func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) { storageDisks, errs := initStorageDisksWithErrors(s.endpoints) for i, derr := range errs { if derr != nil && derr != errDiskNotFound { @@ -1466,8 +1301,8 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe markRootDisksAsDown(storageDisks) - formats, sErrs := loadFormatXLAll(storageDisks, true) - if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil { + formats, sErrs := loadFormatErasureAll(storageDisks, true) + if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { return madmin.HealResultItem{}, err } @@ -1506,7 +1341,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe return res, errNoHealRequired } - refFormat, err := getFormatXLInQuorum(formats) + refFormat, err := getFormatErasureInQuorum(formats) if err != nil { return res, err } @@ -1522,19 +1357,19 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe // such that we can fill them up with new UUIDs, this looping also // ensures that the replaced disks allocated evenly across all sets. // Making sure that the redundancy is not lost. - for i := range refFormat.XL.Sets { - for j := range refFormat.XL.Sets[i] { - if refFormat.XL.Sets[i][j] == offlineDiskUUID { + for i := range refFormat.Erasure.Sets { + for j := range refFormat.Erasure.Sets[i] { + if refFormat.Erasure.Sets[i][j] == offlineDiskUUID { for l := range newFormatSets[i] { if newFormatSets[i][l] == nil { continue } - if newFormatSets[i][l].XL.This == "" { - newFormatSets[i][l].XL.This = mustGetUUID() - refFormat.XL.Sets[i][j] = newFormatSets[i][l].XL.This + if newFormatSets[i][l].Erasure.This == "" { + newFormatSets[i][l].Erasure.This = mustGetUUID() + refFormat.Erasure.Sets[i][j] = newFormatSets[i][l].Erasure.This for m, v := range res.After.Drives { if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) { - res.After.Drives[m].UUID = newFormatSets[i][l].XL.This + res.After.Drives[m].UUID = newFormatSets[i][l].Erasure.This res.After.Drives[m].State = madmin.DriveStateOk } } @@ -1546,19 +1381,19 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe } if !dryRun { - var tmpNewFormats = make([]*formatXLV3, s.setCount*s.drivesPerSet) + var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.drivesPerSet) for i := range newFormatSets { for j := range newFormatSets[i] { if newFormatSets[i][j] == nil { continue } tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j] - tmpNewFormats[i*s.drivesPerSet+j].XL.Sets = refFormat.XL.Sets + tmpNewFormats[i*s.drivesPerSet+j].Erasure.Sets = refFormat.Erasure.Sets } } // Save formats `format.json` across all disks. - if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil { + if err = saveFormatErasureAll(ctx, storageDisks, tmpNewFormats); err != nil { return madmin.HealResultItem{}, err } @@ -1571,7 +1406,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe s.format = refFormat // Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers. - s.xlDisksMu.Lock() + s.erasureDisksMu.Lock() for _, disk := range storageDisks { if disk == nil { continue @@ -1589,14 +1424,14 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe continue } - if s.xlDisks[m][n] != nil { - s.xlDisks[m][n].Close() + if s.erasureDisks[m][n] != nil { + s.erasureDisks[m][n].Close() } s.endpointStrings[m*s.drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() // Restart our monitoring loop to start monitoring newly formatted disks. go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval) @@ -1606,7 +1441,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe } // HealBucket - heals inconsistent buckets and bucket metadata on all sets. -func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) { +func (s *erasureSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) { // Initialize heal result info result = madmin.HealResultItem{ Type: madmin.HealItemBucket, @@ -1628,21 +1463,21 @@ func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove b // Check if we had quorum to write, if not return an appropriate error. _, afterDriveOnline := result.GetOnlineCounts() if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 { - return result, toObjectErr(errXLWriteQuorum, bucket) + return result, toObjectErr(errErasureWriteQuorum, bucket) } return result, nil } // HealObject - heals inconsistent object on a hashedSet based on object name. -func (s *xlSets) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) { - return s.getHashedSet(object).HealObject(ctx, bucket, object, opts) +func (s *erasureSets) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { + return s.getHashedSet(object).HealObject(ctx, bucket, object, versionID, opts) } // Lists all buckets which need healing. -func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { +func (s *erasureSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { var listBuckets []BucketInfo - var healBuckets = make(map[string]VolInfo) + var healBuckets = map[string]VolInfo{} for _, set := range s.sets { // lists all unique buckets across drives. if err := listAllBuckets(set.getDisks(), healBuckets); err != nil { @@ -1661,29 +1496,35 @@ func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { // to allocate a receive channel for ObjectInfo, upon any unhandled // error walker returns error. Optionally if context.Done() is received // then Walk() stops the walker. -func (s *xlSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { +func (s *erasureSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { if err := checkListObjsArgs(ctx, bucket, prefix, "", s); err != nil { // Upon error close the channel. close(results) return err } - entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done()) + entryChs := s.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done()) entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) go func() { defer close(results) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { return } if quorumCount >= s.drivesPerSet/2 { - results <- entry.ToObjectInfo() // Read quorum exists proceed + // Read quorum exists proceed + for _, version := range entry.Versions { + results <- version.ToObjectInfo(bucket, version.Name) + } + for _, deleted := range entry.Deleted { + results <- deleted.ToObjectInfo(bucket, deleted.Name) + } } // skip entries which do not have quorum } @@ -1694,16 +1535,16 @@ func (s *xlSets) Walk(ctx context.Context, bucket, prefix string, results chan<- // HealObjects - Heal all objects recursively at a specified prefix, any // dangling objects deleted as well automatically. -func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject healObjectFn) error { +func (s *erasureSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { endWalkCh := make(chan struct{}) defer close(endWalkCh) - entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, endWalkCh) + entryChs := s.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh) entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { break } @@ -1716,8 +1557,10 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts ma // Wait and proceed if there are active requests waitForLowHTTPReq(int32(s.drivesPerSet)) - if err := healObject(bucket, entry.Name); err != nil { - return toObjectErr(err, bucket, entry.Name) + for _, version := range entry.Versions { + if err := healObject(bucket, version.Name, version.VersionID); err != nil { + return toObjectErr(err, bucket, version.Name) + } } } @@ -1725,32 +1568,37 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts ma } // PutObjectTags - replace or add tags to an existing object -func (s *xlSets) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { - return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags) +func (s *erasureSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags, opts) } // DeleteObjectTags - delete object tags from an existing object -func (s *xlSets) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return s.getHashedSet(object).DeleteObjectTags(ctx, bucket, object) +func (s *erasureSets) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return s.getHashedSet(object).DeleteObjectTags(ctx, bucket, object, opts) } // GetObjectTags - get object tags from an existing object -func (s *xlSets) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { - return s.getHashedSet(object).GetObjectTags(ctx, bucket, object) +func (s *erasureSets) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { + return s.getHashedSet(object).GetObjectTags(ctx, bucket, object, opts) } // GetMetrics - no op -func (s *xlSets) GetMetrics(ctx context.Context) (*Metrics, error) { +func (s *erasureSets) GetMetrics(ctx context.Context) (*Metrics, error) { logger.LogIf(ctx, NotImplemented{}) return &Metrics{}, NotImplemented{} } +// IsReady - Returns true if atleast n/2 disks (read quorum) are online +func (s *erasureSets) IsReady(_ context.Context) bool { + return false +} + // maintainMRFList gathers the list of successful partial uploads -// from all underlying xl sets and puts them in a global map which +// from all underlying er.sets and puts them in a global map which // should not have more than 10000 entries. -func (s *xlSets) maintainMRFList() { +func (s *erasureSets) maintainMRFList() { var agg = make(chan partialUpload, 10000) - for i, xl := range s.sets { + for i, er := range s.sets { go func(c <-chan partialUpload, setIndex int) { for msg := range c { msg.failedSet = setIndex @@ -1759,7 +1607,7 @@ func (s *xlSets) maintainMRFList() { default: } } - }(xl.mrfUploadCh, i) + }(er.mrfUploadCh, i) } for fUpload := range agg { @@ -1768,14 +1616,17 @@ func (s *xlSets) maintainMRFList() { s.mrfMU.Unlock() continue } - s.mrfUploads[pathJoin(fUpload.bucket, fUpload.object)] = fUpload.failedSet + s.mrfUploads[healSource{ + bucket: fUpload.bucket, + object: fUpload.object, + }] = fUpload.failedSet s.mrfMU.Unlock() } } // healMRFRoutine monitors new disks connection, sweep the MRF list // to find objects related to the new disk that needs to be healed. -func (s *xlSets) healMRFRoutine() { +func (s *erasureSets) healMRFRoutine() { // Wait until background heal state is initialized var bgSeq *healSequence for { @@ -1792,9 +1643,9 @@ func (s *xlSets) healMRFRoutine() { } for e := range s.disksConnectEvent { - // Get the list of objects related the xl set + // Get the list of objects related the er.set // to which the connected disk belongs. - var mrfUploads []string + var mrfUploads []healSource s.mrfMU.Lock() for k, v := range s.mrfUploads { if v == e.setIndex { @@ -1807,7 +1658,7 @@ func (s *xlSets) healMRFRoutine() { for _, u := range mrfUploads { // Send an object to be healed with a timeout select { - case bgSeq.sourceCh <- healSource{path: u}: + case bgSeq.sourceCh <- u: case <-time.After(100 * time.Millisecond): } diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go new file mode 100644 index 000000000..7a66c614f --- /dev/null +++ b/cmd/erasure-sets_test.go @@ -0,0 +1,245 @@ +/* + * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/google/uuid" +) + +var testUUID = uuid.MustParse("f5c58c61-7175-4018-ab5e-a94fe9c2de4e") + +func BenchmarkCrcHash(b *testing.B) { + cases := []struct { + key int + }{ + {16}, + {64}, + {128}, + {256}, + {512}, + {1024}, + } + for _, testCase := range cases { + testCase := testCase + key := randString(testCase.key) + b.Run("", func(b *testing.B) { + b.SetBytes(1024) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + crcHashMod(key, 16) + } + }) + } +} + +func BenchmarkSipHash(b *testing.B) { + cases := []struct { + key int + }{ + {16}, + {64}, + {128}, + {256}, + {512}, + {1024}, + } + for _, testCase := range cases { + testCase := testCase + key := randString(testCase.key) + b.Run("", func(b *testing.B) { + b.SetBytes(1024) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + sipHashMod(key, 16, testUUID) + } + }) + } +} + +// TestSipHashMod - test sip hash. +func TestSipHashMod(t *testing.T) { + testCases := []struct { + objectName string + sipHash int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", 37}, + {"The Shining Script .pdf", 38}, + {"Cost Benefit Analysis (2009-2010).pptx", 59}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 35}, + {"SHØRT", 49}, + {"There are far too many object names, and far too few bucket names!", 8}, + {"a/b/c/", 159}, + {"/a/b/c", 96}, + {string([]byte{0xff, 0xfe, 0xfd}), 147}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + if sipHashElement := hashKey("SIPMOD", testCase.objectName, 200, testUUID); sipHashElement != testCase.sipHash { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.sipHash, sipHashElement) + } + } + + if sipHashElement := hashKey("SIPMOD", "This will fail", -1, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } + + if sipHashElement := hashKey("SIPMOD", "This will fail", 0, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } + + if sipHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } +} + +// TestCrcHashMod - test crc hash. +func TestCrcHashMod(t *testing.T) { + testCases := []struct { + objectName string + crcHash int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", 28}, + {"The Shining Script .pdf", 142}, + {"Cost Benefit Analysis (2009-2010).pptx", 133}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 185}, + {"SHØRT", 97}, + {"There are far too many object names, and far too few bucket names!", 101}, + {"a/b/c/", 193}, + {"/a/b/c", 116}, + {string([]byte{0xff, 0xfe, 0xfd}), 61}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + if crcHashElement := hashKey("CRCMOD", testCase.objectName, 200, testUUID); crcHashElement != testCase.crcHash { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement) + } + } + + if crcHashElement := hashKey("CRCMOD", "This will fail", -1, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } + + if crcHashElement := hashKey("CRCMOD", "This will fail", 0, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } + + if crcHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } +} + +// TestNewErasure - tests initialization of all input disks +// and constructs a valid `Erasure` object +func TestNewErasureSets(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var nDisks = 16 // Maximum disks. + var erasureDisks []string + for i := 0; i < nDisks; i++ { + // Do not attempt to create this path, the test validates + // so that newErasureSets initializes non existing paths + // and successfully returns initialized object layer. + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) + erasureDisks = append(erasureDisks, disk) + defer os.RemoveAll(disk) + } + + endpoints := mustGetNewEndpoints(erasureDisks...) + _, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "") + if err != errInvalidArgument { + t.Fatalf("Expecting error, got %s", err) + } + + _, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "") + if err != errInvalidArgument { + t.Fatalf("Expecting error, got %s", err) + } + + // Initializes all erasure disks + storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") + if err != nil { + t.Fatalf("Unable to format disks for erasure, %s", err) + } + + if _, err := newErasureSets(ctx, endpoints, storageDisks, format); err != nil { + t.Fatalf("Unable to initialize erasure") + } +} + +// TestHashedLayer - tests the hashed layer which will be returned +// consistently for a given object name. +func TestHashedLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var objs []*erasureObjects + for i := 0; i < 16; i++ { + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal("Unable to initialize 'Erasure' object layer.", err) + } + + // Remove all dirs. + for _, dir := range fsDirs { + defer os.RemoveAll(dir) + } + + z := obj.(*erasureZones) + objs = append(objs, z.zones[0].sets[0]) + } + + sets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"} + + testCases := []struct { + objectName string + expectedObj *erasureObjects + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", objs[12]}, + {"The Shining Script .pdf", objs[14]}, + {"Cost Benefit Analysis (2009-2010).pptx", objs[13]}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]}, + {"SHØRT", objs[9]}, + {"There are far too many object names, and far too few bucket names!", objs[13]}, + {"a/b/c/", objs[1]}, + {"/a/b/c", objs[4]}, + {string([]byte{0xff, 0xfe, 0xfd}), objs[13]}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + gotObj := sets.getHashedSet(testCase.objectName) + if gotObj != testCase.expectedObj { + t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj) + } + } +} diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 2f844da87..ad2e197d7 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-zones.go b/cmd/erasure-zones.go similarity index 69% rename from cmd/xl-zones.go rename to cmd/erasure-zones.go index 12d6b7ef1..8f0efa13f 100644 --- a/cmd/xl-zones.go +++ b/cmd/erasure-zones.go @@ -34,25 +34,25 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -type xlZones struct { +type erasureZones struct { GatewayUnsupported - zones []*xlSets + zones []*erasureSets } -func (z *xlZones) SingleZone() bool { +func (z *erasureZones) SingleZone() bool { return len(z.zones) == 1 } // Initialize new zone of erasure sets. -func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) { +func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) { var ( deploymentID string err error - formats = make([]*formatXLV3, len(endpointZones)) + formats = make([]*formatErasureV3, len(endpointZones)) storageDisks = make([][]StorageAPI, len(endpointZones)) - z = &xlZones{zones: make([]*xlSets, len(endpointZones))} + z = &erasureZones{zones: make([]*erasureSets, len(endpointZones))} ) var localDrives []string @@ -64,7 +64,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, localDrives = append(localDrives, endpoint.Path) } } - storageDisks[i], formats[i], err = waitForFormatXL(local, ep.Endpoints, i+1, + storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1, ep.SetCount, ep.DrivesPerSet, deploymentID) if err != nil { return nil, err @@ -72,7 +72,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, if deploymentID == "" { deploymentID = formats[i].ID } - z.zones[i], err = newXLSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) + z.zones[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, return z, nil } -func (z *xlZones) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { +func (z *erasureZones) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { return z.zones[0].NewNSLock(ctx, bucket, objects...) } @@ -102,7 +102,7 @@ func (p zonesAvailableSpace) TotalAvailable() uint64 { return total } -func (z *xlZones) getAvailableZoneIdx(ctx context.Context) int { +func (z *erasureZones) getAvailableZoneIdx(ctx context.Context) int { zones := z.getZonesAvailableSpace(ctx) total := zones.TotalAvailable() if total == 0 { @@ -122,7 +122,7 @@ func (z *xlZones) getAvailableZoneIdx(ctx context.Context) int { panic(fmt.Errorf("reached end of zones (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) } -func (z *xlZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpace { +func (z *erasureZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpace { var zones = make(zonesAvailableSpace, len(z.zones)) storageInfos := make([]StorageInfo, len(z.zones)) @@ -151,7 +151,7 @@ func (z *xlZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpac return zones } -func (z *xlZones) Shutdown(ctx context.Context) error { +func (z *erasureZones) Shutdown(ctx context.Context) error { if z.SingleZone() { return z.zones[0].Shutdown(ctx) } @@ -175,7 +175,7 @@ func (z *xlZones) Shutdown(ctx context.Context) error { return nil } -func (z *xlZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { +func (z *erasureZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { if z.SingleZone() { return z.zones[0].StorageInfo(ctx, local) } @@ -219,9 +219,10 @@ func (z *xlZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []e return storageInfo, errs } -func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { +func (z *erasureZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { ctx, cancel := context.WithCancel(ctx) defer cancel() + var wg sync.WaitGroup var mu sync.Mutex var results []dataUsageCache @@ -231,9 +232,9 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd // Collect for each set in zones. for _, z := range z.zones { - for _, xlObj := range z.sets { + for _, erObj := range z.sets { // Add new buckets. - buckets, err := xlObj.ListBuckets(ctx) + buckets, err := erObj.ListBuckets(ctx) if err != nil { return err } @@ -246,7 +247,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd } wg.Add(1) results = append(results, dataUsageCache{}) - go func(i int, xl *xlObjects) { + go func(i int, erObj *erasureObjects) { updates := make(chan dataUsageCache, 1) defer close(updates) // Start update collector. @@ -259,7 +260,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd } }() // Start crawler. Blocks until done. - err := xl.crawlAndGetDataUsage(ctx, buckets, bf, updates) + err := erObj.crawlAndGetDataUsage(ctx, buckets, bf, updates) if err != nil { logger.LogIf(ctx, err) mu.Lock() @@ -271,7 +272,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd mu.Unlock() return } - }(len(results)-1, xlObj) + }(len(results)-1, erObj) } } updateCloser := make(chan chan struct{}) @@ -325,15 +326,16 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd // MakeBucketWithLocation - creates a new bucket across all zones simultaneously // even if one of the sets fail to create buckets, we proceed all the successful // operations. -func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (z *erasureZones) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { if z.SingleZone() { - if err := z.zones[0].MakeBucketWithLocation(ctx, bucket, location, lockEnabled); err != nil { + if err := z.zones[0].MakeBucketWithLocation(ctx, bucket, opts); err != nil { return err } // If it doesn't exist we get a new, so ignore errors meta := newBucketMetadata(bucket) - if lockEnabled { + if opts.LockEnabled { + meta.VersioningConfigXML = enabledBucketVersioningConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig } if err := meta.Save(ctx, z); err != nil { @@ -349,7 +351,7 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s for index := range z.zones { index := index g.Go(func() error { - return z.zones[index].MakeBucketWithLocation(ctx, bucket, location, lockEnabled) + return z.zones[index].MakeBucketWithLocation(ctx, bucket, opts) }, index) } @@ -363,12 +365,15 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s // If it doesn't exist we get a new, so ignore errors meta := newBucketMetadata(bucket) - if lockEnabled { + if opts.LockEnabled { + meta.VersioningConfigXML = enabledBucketVersioningConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig } + if err := meta.Save(ctx, z); err != nil { return toObjectErr(err, bucket) } + globalBucketMetadataSys.Set(bucket, meta) // Success. @@ -376,7 +381,7 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s } -func (z *xlZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { var nsUnlocker = func() {} // Acquire lock @@ -412,7 +417,7 @@ func (z *xlZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs return nil, ObjectNotFound{Bucket: bucket, Object: object} } -func (z *xlZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { // Lock the object before reading. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalObjectTimeout); err != nil { @@ -435,7 +440,7 @@ func (z *xlZones) GetObject(ctx context.Context, bucket, object string, startOff return ObjectNotFound{Bucket: bucket, Object: object} } -func (z *xlZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { +func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { // Lock the object before reading. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalObjectTimeout); err != nil { @@ -460,7 +465,7 @@ func (z *xlZones) GetObjectInfo(ctx context.Context, bucket, object string, opts } // PutObject - writes an object to least used erasure zone. -func (z *xlZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { +func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { // Lock the object. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetLock(globalObjectTimeout); err != nil { @@ -487,56 +492,65 @@ func (z *xlZones) PutObject(ctx context.Context, bucket string, object string, d return z.zones[z.getAvailableZoneIdx(ctx)].PutObject(ctx, bucket, object, data, opts) } -func (z *xlZones) DeleteObject(ctx context.Context, bucket string, object string) error { +func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { // Acquire a write lock before deleting the object. lk := z.NewNSLock(ctx, bucket, object) - if err := lk.GetLock(globalOperationTimeout); err != nil { - return err + if err = lk.GetLock(globalOperationTimeout); err != nil { + return ObjectInfo{}, err } defer lk.Unlock() if z.SingleZone() { - return z.zones[0].DeleteObject(ctx, bucket, object) + return z.zones[0].DeleteObject(ctx, bucket, object, opts) } for _, zone := range z.zones { - err := zone.DeleteObject(ctx, bucket, object) + objInfo, err = zone.DeleteObject(ctx, bucket, object, opts) + if err == nil { + return objInfo, nil + } if err != nil && !isErrObjectNotFound(err) { - return err + break } } - return nil + return objInfo, err } -func (z *xlZones) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { derrs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) + objNames := make([]string, len(objects)) for i := range derrs { - derrs[i] = checkDelObjArgs(ctx, bucket, objects[i]) + derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName) + objNames[i] = objects[i].ObjectName } // Acquire a bulk write lock across 'objects' - multiDeleteLock := z.NewNSLock(ctx, bucket, objects...) + multiDeleteLock := z.NewNSLock(ctx, bucket, objNames...) if err := multiDeleteLock.GetLock(globalOperationTimeout); err != nil { - return nil, err + for i := range derrs { + derrs[i] = err + } + return nil, derrs } defer multiDeleteLock.Unlock() for _, zone := range z.zones { - errs, err := zone.DeleteObjects(ctx, bucket, objects) - if err != nil { - return nil, err - } + deletedObjects, errs := zone.DeleteObjects(ctx, bucket, objects, opts) for i, derr := range errs { if derrs[i] == nil { if derr != nil && !isErrObjectNotFound(derr) { derrs[i] = derr } } + if derrs[i] == nil { + dobjects[i] = deletedObjects[i] + } } } - return derrs, nil + return dobjects, derrs } -func (z *xlZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { // Check if this request is only metadata update. cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) if !cpSrcDstSame { @@ -574,10 +588,7 @@ func (z *xlZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucke return z.zones[z.getAvailableZoneIdx(ctx)].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } -func (z *xlZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { - if z.SingleZone() { - return z.zones[0].ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) - } +func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { marker := continuationToken if marker == "" { marker = startAfter @@ -598,7 +609,7 @@ func (z *xlZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuatio return listObjectsV2Info, err } -func (z *xlZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { var zonesEntryChs [][]FileInfoCh @@ -710,7 +721,7 @@ func (z *xlZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marke return result, nil } -func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { if strings.Contains(prefix, guidSplunk) { logger.LogIf(ctx, NotImplemented{}) return loi, NotImplemented{} @@ -743,7 +754,7 @@ func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker } for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() + objInfo := entry.ToObjectInfo(bucket, entry.Name) splits := strings.Split(objInfo.Name, guidSplunk) if len(splits) == 0 { loi.Objects = append(loi.Objects, objInfo) @@ -762,7 +773,7 @@ func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker return loi, nil } -func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { loi := ListObjectsInfo{} if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { @@ -834,7 +845,7 @@ func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delim } for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() + objInfo := entry.ToObjectInfo(entry.Volume, entry.Name) if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { loi.Prefixes = append(loi.Prefixes, objInfo.Name) continue @@ -881,6 +892,8 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI var lentry FileInfo var found bool var zoneIndex = -1 + // TODO: following loop can be merged with above + // loop, explore this possibility. for i, entriesValid := range zoneEntriesValid { for j, valid := range entriesValid { if !valid { @@ -928,6 +941,115 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated } +// Calculate least entry across zones and across multiple FileInfoVersions +// channels, returns the least common entry and the total number of times +// we found this entry. Additionally also returns a boolean +// to indicate if the caller needs to call this function +// again to list the next entry. It is callers responsibility +// if the caller wishes to list N entries to call lexicallySortedEntry +// N times until this boolean is 'false'. +func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) (FileInfoVersions, int, int, bool) { + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() + } + } + + var isTruncated = false + for _, entriesValid := range zoneEntriesValid { + for _, valid := range entriesValid { + if !valid { + continue + } + isTruncated = true + break + } + if isTruncated { + break + } + } + + var lentry FileInfoVersions + var found bool + var zoneIndex = -1 + for i, entriesValid := range zoneEntriesValid { + for j, valid := range entriesValid { + if !valid { + continue + } + if !found { + lentry = zoneEntries[i][j] + found = true + zoneIndex = i + continue + } + if zoneEntries[i][j].Name < lentry.Name { + lentry = zoneEntries[i][j] + zoneIndex = i + } + } + } + + // We haven't been able to find any least entry, + // this would mean that we don't have valid entry. + if !found { + return lentry, 0, zoneIndex, isTruncated + } + + lexicallySortedEntryCount := 0 + for i, entriesValid := range zoneEntriesValid { + for j, valid := range entriesValid { + if !valid { + continue + } + + // Entries are duplicated across disks, + // we should simply skip such entries. + if lentry.Name == zoneEntries[i][j].Name && lentry.LatestModTime.Equal(zoneEntries[i][j].LatestModTime) { + lexicallySortedEntryCount++ + continue + } + + // Push all entries which are lexically higher + // and will be returned later in Pop() + zoneEntryChs[i][j].Push(zoneEntries[i][j]) + } + } + + return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated +} + +// mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys. +func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, ndisks int) (entries FilesInfoVersions) { + var i = 0 + var zonesEntriesInfos [][]FileInfoVersions + var zonesEntriesValid [][]bool + for _, entryChs := range zonesEntryChs { + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) + zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + } + for { + fi, quorumCount, _, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + if !ok { + // We have reached EOF across all entryChs, break the loop. + break + } + + if quorumCount < ndisks-1 { + // Skip entries which are not found on upto ndisks. + continue + } + + entries.FilesVersions = append(entries.FilesVersions, fi) + i++ + if i == maxKeys { + entries.IsTruncated = isTruncatedZonesVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + break + } + } + return entries +} + // mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys. func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, ndisks int) (entries FilesInfo) { var i = 0 @@ -966,6 +1088,35 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon } } + var isTruncated = false + for _, entriesValid := range zoneEntriesValid { + for _, valid := range entriesValid { + if valid { + isTruncated = true + break + } + } + if isTruncated { + break + } + } + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + if zoneEntriesValid[i][j] { + zoneEntryChs[i][j].Push(zoneEntries[i][j]) + } + } + } + return isTruncated +} + +func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool { + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() + } + } + var isTruncated = false for _, entriesValid := range zoneEntriesValid { for _, valid := range entriesValid { @@ -989,15 +1140,116 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon return isTruncated } -func (z *xlZones) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - if z.SingleZone() { - return z.zones[0].ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) +func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + loi := ListObjectVersionsInfo{} + + if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { + return loi, err } + // Marker is set validate pre-condition. + if marker != "" { + // Marker not common with prefix is not implemented. Send an empty response + if !HasPrefix(marker, prefix) { + return loi, nil + } + } + + if marker == "" && versionMarker != "" { + return loi, NotImplemented{} + } + + // With max keys of zero we have reached eof, return right here. + if maxKeys == 0 { + return loi, nil + } + + // For delimiter and prefix as '/' we do not list anything at all + // since according to s3 spec we stop at the 'delimiter' + // along // with the prefix. On a flat namespace with 'prefix' + // as '/' we don't have any entries, since all the keys are + // of form 'keyName/...' + if delimiter == SlashSeparator && prefix == SlashSeparator { + return loi, nil + } + + // Over flowing count - reset to maxObjectList. + if maxKeys < 0 || maxKeys > maxObjectList { + maxKeys = maxObjectList + } + + if delimiter != SlashSeparator && delimiter != "" { + return loi, NotImplemented{} + } + + // Default is recursive, if delimiter is set then list non recursive. + recursive := true + if delimiter == SlashSeparator { + recursive = false + } + + var zonesEntryChs [][]FileInfoVersionsCh + var zonesEndWalkCh []chan struct{} + + const ndisks = 3 + for _, zone := range z.zones { + entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix}) + if entryChs == nil { + endWalkCh = make(chan struct{}) + entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, ndisks) + } + zonesEntryChs = append(zonesEntryChs, entryChs) + zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) + } + + entries := mergeZonesEntriesVersionsCh(zonesEntryChs, maxKeys, ndisks) + if len(entries.FilesVersions) == 0 { + return loi, nil + } + + loi.IsTruncated = entries.IsTruncated + if loi.IsTruncated { + loi.NextMarker = entries.FilesVersions[len(entries.FilesVersions)-1].Name + } + + for _, entry := range entries.FilesVersions { + for _, version := range entry.Versions { + objInfo := version.ToObjectInfo(bucket, entry.Name) + if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { + loi.Prefixes = append(loi.Prefixes, objInfo.Name) + continue + } + loi.Objects = append(loi.Objects, objInfo) + } + for _, deleted := range entry.Deleted { + loi.DeleteObjects = append(loi.DeleteObjects, DeletedObjectInfo{ + Bucket: bucket, + Name: entry.Name, + VersionID: deleted.VersionID, + ModTime: deleted.ModTime, + IsLatest: deleted.IsLatest, + }) + } + + } + if loi.IsTruncated { + for i, zone := range z.zones { + zone.poolVersions.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, zonesEntryChs[i], + zonesEndWalkCh[i]) + } + } + return loi, nil +} + +func (z *erasureZones) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + return z.listObjectVersions(ctx, bucket, prefix, marker, versionMarker, delimiter, maxKeys) +} + +func (z *erasureZones) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { return z.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) } -func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (z *erasureZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil { return ListMultipartsInfo{}, err } @@ -1023,7 +1275,7 @@ func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyM } // Initiate a new multipart upload on a hashedSet based on object name. -func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { +func (z *erasureZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil { return "", err } @@ -1035,7 +1287,7 @@ func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string, } // Copies a part of an object from source hashedSet to destination hashedSet. -func (z *xlZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { +func (z *erasureZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil { return PartInfo{}, err } @@ -1045,7 +1297,7 @@ func (z *xlZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dest } // PutObjectPart - writes part of an object to hashedSet based on the object name. -func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { +func (z *erasureZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil { return PartInfo{}, err } @@ -1081,7 +1333,7 @@ func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID st } } -func (z *xlZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { +func (z *erasureZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return MultipartInfo{}, err } @@ -1117,7 +1369,7 @@ func (z *xlZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID } // ListObjectParts - lists all uploaded parts to an object in hashedSet. -func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { +func (z *erasureZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return ListPartsInfo{}, err } @@ -1150,7 +1402,7 @@ func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (z *erasureZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil { return err } @@ -1185,7 +1437,7 @@ func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uplo } // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. -func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil { return objInfo, err } @@ -1212,7 +1464,7 @@ func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, u // Purge any existing object. for _, zone := range z.zones { - zone.DeleteObject(ctx, bucket, object) + zone.DeleteObject(ctx, bucket, object, opts) } for _, zone := range z.zones { @@ -1232,7 +1484,7 @@ func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, u } // GetBucketInfo - returns bucket info from one of the erasure coded zones. -func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { +func (z *erasureZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { if z.SingleZone() { bucketInfo, err = z.zones[0].GetBucketInfo(ctx, bucket) if err != nil { @@ -1264,33 +1516,33 @@ func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (z *xlZones) IsNotificationSupported() bool { +func (z *erasureZones) IsNotificationSupported() bool { return true } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (z *xlZones) IsListenBucketSupported() bool { +func (z *erasureZones) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (z *xlZones) IsEncryptionSupported() bool { +func (z *erasureZones) IsEncryptionSupported() bool { return true } // IsCompressionSupported returns whether compression is applicable for this layer. -func (z *xlZones) IsCompressionSupported() bool { +func (z *erasureZones) IsCompressionSupported() bool { return true } -func (z *xlZones) IsTaggingSupported() bool { +func (z *erasureZones) IsTaggingSupported() bool { return true } // DeleteBucket - deletes a bucket on all zones simultaneously, // even if one of the zones fail to delete buckets, we proceed to // undo a successful operation. -func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (z *erasureZones) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { if z.SingleZone() { return z.zones[0].DeleteBucket(ctx, bucket, forceDelete) } @@ -1311,7 +1563,7 @@ func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete b for _, err := range errs { if err != nil { if _, ok := err.(InsufficientWriteQuorum); ok { - undoDeleteBucketZones(bucket, z.zones, errs) + undoDeleteBucketZones(ctx, bucket, z.zones, errs) } return err @@ -1323,7 +1575,7 @@ func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete b } // This function is used to undo a successful DeleteBucket operation. -func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { +func undoDeleteBucketZones(ctx context.Context, bucket string, zones []*erasureSets, errs []error) { g := errgroup.WithNErrs(len(zones)) // Undo previous delete bucket on all underlying zones. @@ -1331,7 +1583,7 @@ func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { index := index g.Go(func() error { if errs[index] == nil { - return zones[index].MakeBucketWithLocation(GlobalContext, bucket, "", false) + return zones[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) } return nil }, index) @@ -1343,7 +1595,7 @@ func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { // List all buckets from one of the zones, we are not doing merge // sort here just for simplification. As per design it is assumed // that all buckets are present on all zones. -func (z *xlZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { +func (z *erasureZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { if z.SingleZone() { buckets, err = z.zones[0].ListBuckets(ctx) } else { @@ -1368,7 +1620,7 @@ func (z *xlZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err er return buckets, nil } -func (z *xlZones) ReloadFormat(ctx context.Context, dryRun bool) error { +func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error { // Acquire lock on format.json formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile) if err := formatLock.GetRLock(globalHealingTimeout); err != nil { @@ -1384,7 +1636,7 @@ func (z *xlZones) ReloadFormat(ctx context.Context, dryRun bool) error { return nil } -func (z *xlZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { +func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { // Acquire lock on format.json formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile) if err := formatLock.GetLock(globalHealingTimeout); err != nil { @@ -1421,7 +1673,7 @@ func (z *xlZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResul return r, nil } -func (z *xlZones) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { +func (z *erasureZones) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { var r = madmin.HealResultItem{ Type: madmin.HealItemBucket, Bucket: bucket, @@ -1449,18 +1701,16 @@ func (z *xlZones) HealBucket(ctx context.Context, bucket string, dryRun, remove // to allocate a receive channel for ObjectInfo, upon any unhandled // error walker returns error. Optionally if context.Done() is received // then Walk() stops the walker. -func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { +func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil { // Upon error close the channel. close(results) return err } - var zonesEntryChs [][]FileInfoCh - + var zonesEntryChs [][]FileInfoVersionsCh for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, - zone.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done())) + zonesEntryChs = append(zonesEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done())) } var zoneDrivesPerSet []int @@ -1468,10 +1718,10 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) } - var zonesEntriesInfos [][]FileInfo + var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesValid [][]bool for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) } @@ -1479,14 +1729,20 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< defer close(results) for { - entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, - zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) if !ok { + // We have reached EOF across all entryChs, break the loop. return } if quorumCount >= zoneDrivesPerSet[zoneIndex]/2 { - results <- entry.ToObjectInfo() // Read quorum exists proceed + // Read quorum exists proceed + for _, version := range entry.Versions { + results <- version.ToObjectInfo(bucket, version.Name) + } + for _, deleted := range entry.Deleted { + results <- deleted.ToObjectInfo(bucket, deleted.Name) + } } // skip entries which do not have quorum @@ -1496,17 +1752,18 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< return nil } -type healObjectFn func(string, string) error +// HealObjectFn closure function heals the object. +type HealObjectFn func(string, string, string) error -func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject healObjectFn) error { - var zonesEntryChs [][]FileInfoCh +func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { + var zonesEntryChs [][]FileInfoVersionsCh endWalkCh := make(chan struct{}) defer close(endWalkCh) for _, zone := range z.zones { zonesEntryChs = append(zonesEntryChs, - zone.startMergeWalks(ctx, bucket, prefix, "", true, endWalkCh)) + zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh)) } var zoneDrivesPerSet []int @@ -1514,15 +1771,15 @@ func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts m zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) } - var zonesEntriesInfos [][]FileInfo + var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesValid [][]bool for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) } for { - entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) if !ok { break } @@ -1535,17 +1792,19 @@ func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts m // Wait and proceed if there are active requests waitForLowHTTPReq(int32(zoneDrivesPerSet[zoneIndex])) - if err := healObject(bucket, entry.Name); err != nil { - return toObjectErr(err, bucket, entry.Name) + for _, version := range entry.Versions { + if err := healObject(bucket, version.Name, version.VersionID); err != nil { + return toObjectErr(err, bucket, version.Name) + } } } return nil } -func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) { +func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { // Lock the object before healing. Use read lock since healing - // will only regenerate parts & xl.json of outdated disks. + // will only regenerate parts & xl.meta of outdated disks. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalHealingTimeout); err != nil { return madmin.HealResultItem{}, err @@ -1553,10 +1812,10 @@ func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts ma defer lk.RUnlock() if z.SingleZone() { - return z.zones[0].HealObject(ctx, bucket, object, opts) + return z.zones[0].HealObject(ctx, bucket, object, versionID, opts) } for _, zone := range z.zones { - result, err := zone.HealObject(ctx, bucket, object, opts) + result, err := zone.HealObject(ctx, bucket, object, versionID, opts) if err != nil { if isErrObjectNotFound(err) { continue @@ -1571,7 +1830,7 @@ func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts ma } } -func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { +func (z *erasureZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { var healBuckets []BucketInfo for _, zone := range z.zones { bucketsInfo, err := zone.ListBucketsHeal(ctx) @@ -1592,15 +1851,15 @@ func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { } // GetMetrics - no op -func (z *xlZones) GetMetrics(ctx context.Context) (*Metrics, error) { +func (z *erasureZones) GetMetrics(ctx context.Context) (*Metrics, error) { logger.LogIf(ctx, NotImplemented{}) return &Metrics{}, NotImplemented{} } -func (z *xlZones) getZoneAndSet(id string) (int, int, error) { +func (z *erasureZones) getZoneAndSet(id string) (int, int, error) { for zoneIdx := range z.zones { format := z.zones[zoneIdx].format - for setIdx, set := range format.XL.Sets { + for setIdx, set := range format.Erasure.Sets { for _, diskID := range set { if diskID == id { return zoneIdx, setIdx, nil @@ -1611,8 +1870,8 @@ func (z *xlZones) getZoneAndSet(id string) (int, int, error) { return 0, 0, errDiskNotFound } -// IsReady - Returns true all the erasure sets are writable. -func (z *xlZones) IsReady(ctx context.Context) bool { +// IsReady - Returns true, when all the erasure sets are writable. +func (z *erasureZones) IsReady(ctx context.Context) bool { erasureSetUpCount := make([][]int, len(z.zones)) for i := range z.zones { erasureSetUpCount[i] = make([]int, len(z.zones[i].sets)) @@ -1632,7 +1891,7 @@ func (z *xlZones) IsReady(ctx context.Context) bool { for zoneIdx := range erasureSetUpCount { parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD) - diskCount := len(z.zones[zoneIdx].format.XL.Sets[0]) + diskCount := len(z.zones[zoneIdx].format.Erasure.Sets[0]) if parityDrives == 0 { parityDrives = getDefaultParityBlocks(diskCount) } @@ -1651,12 +1910,12 @@ func (z *xlZones) IsReady(ctx context.Context) bool { } // PutObjectTags - replace or add tags to an existing object -func (z *xlZones) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { if z.SingleZone() { - return z.zones[0].PutObjectTags(ctx, bucket, object, tags) + return z.zones[0].PutObjectTags(ctx, bucket, object, tags, opts) } for _, zone := range z.zones { - err := zone.PutObjectTags(ctx, bucket, object, tags) + err := zone.PutObjectTags(ctx, bucket, object, tags, opts) if err != nil { if isErrBucketNotFound(err) { continue @@ -1671,12 +1930,12 @@ func (z *xlZones) PutObjectTags(ctx context.Context, bucket, object string, tags } // DeleteObjectTags - delete object tags from an existing object -func (z *xlZones) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { if z.SingleZone() { - return z.zones[0].DeleteObjectTags(ctx, bucket, object) + return z.zones[0].DeleteObjectTags(ctx, bucket, object, opts) } for _, zone := range z.zones { - err := zone.DeleteObjectTags(ctx, bucket, object) + err := zone.DeleteObjectTags(ctx, bucket, object, opts) if err != nil { if isErrBucketNotFound(err) { continue @@ -1691,12 +1950,12 @@ func (z *xlZones) DeleteObjectTags(ctx context.Context, bucket, object string) e } // GetObjectTags - get object tags from an existing object -func (z *xlZones) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (z *erasureZones) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { if z.SingleZone() { - return z.zones[0].GetObjectTags(ctx, bucket, object) + return z.zones[0].GetObjectTags(ctx, bucket, object, opts) } for _, zone := range z.zones { - tags, err := zone.GetObjectTags(ctx, bucket, object) + tags, err := zone.GetObjectTags(ctx, bucket, object, opts) if err != nil { if isErrBucketNotFound(err) { continue diff --git a/cmd/erasure.go b/cmd/erasure.go index 4a6f31a6a..22e2484f0 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,136 +18,373 @@ package cmd import ( "context" + "fmt" + "sort" "sync" + "time" - "github.com/klauspost/reedsolomon" "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/bpool" + "github.com/minio/minio/pkg/dsync" + "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/sync/errgroup" ) -// Erasure - erasure encoding details. -type Erasure struct { - encoder func() reedsolomon.Encoder - dataBlocks, parityBlocks int - blockSize int64 +// OfflineDisk represents an unavailable disk. +var OfflineDisk StorageAPI // zero value is nil + +// partialUpload is a successful upload of an object +// but not written in all disks (having quorum) +type partialUpload struct { + bucket string + object string + failedSet int } -// NewErasure creates a new ErasureStorage. -func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { - e = Erasure{ - dataBlocks: dataBlocks, - parityBlocks: parityBlocks, - blockSize: blockSize, - } +// erasureObjects - Implements ER object layer. +type erasureObjects struct { + GatewayUnsupported - // Check the parameters for sanity now. - if dataBlocks <= 0 || parityBlocks <= 0 { - return e, reedsolomon.ErrInvShardNum - } + // getDisks returns list of storageAPIs. + getDisks func() []StorageAPI - if dataBlocks+parityBlocks > 256 { - return e, reedsolomon.ErrMaxShardNum - } + // getLockers returns list of remote and local lockers. + getLockers func() []dsync.NetLocker - // Encoder when needed. - var enc reedsolomon.Encoder - var once sync.Once - e.encoder = func() reedsolomon.Encoder { - once.Do(func() { - e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize()))) - if err != nil { - // Error conditions should be checked above. - panic(err) - } - enc = e - }) - return enc - } - return + // getEndpoints returns list of endpoint strings belonging this set. + // some may be local and some remote. + getEndpoints func() []string + + // Locker mutex map. + nsMutex *nsLockMap + + // Byte pools used for temporary i/o buffers. + bp *bpool.BytePoolCap + + mrfUploadCh chan partialUpload } -// EncodeData encodes the given data and returns the erasure-coded data. -// It returns an error if the erasure coding failed. -func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { - if len(data) == 0 { - return make([][]byte, e.dataBlocks+e.parityBlocks), nil - } - encoded, err := e.encoder().Split(data) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - if err = e.encoder().Encode(encoded); err != nil { - logger.LogIf(ctx, err) - return nil, err - } - return encoded, nil +// NewNSLock - initialize a new namespace RWLocker instance. +func (er erasureObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { + return er.nsMutex.NewNSLock(ctx, er.getLockers, bucket, objects...) } -// DecodeDataBlocks decodes the given erasure-coded data. -// It only decodes the data blocks but does not verify them. -// It returns an error if the decoding failed. -func (e *Erasure) DecodeDataBlocks(data [][]byte) error { - var isZero = 0 - for _, b := range data[:] { - if len(b) == 0 { - isZero++ - break - } - } - if isZero == 0 || isZero == len(data) { - // If all are zero, payload is 0 bytes. - return nil - } - return e.encoder().ReconstructData(data) -} - -// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. -// It returns an error if the decoding failed. -func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { - needsReconstruction := false - for _, b := range data { - if b == nil { - needsReconstruction = true - break - } - } - if !needsReconstruction { - return nil - } - if err := e.encoder().Reconstruct(data); err != nil { - logger.LogIf(ctx, err) - return err - } +// Shutdown function for object storage interface. +func (er erasureObjects) Shutdown(ctx context.Context) error { + // Add any object layer shutdown activities here. + closeStorageDisks(er.getDisks()) return nil } -// ShardSize - returns actual shared size from erasure blockSize. -func (e *Erasure) ShardSize() int64 { - return ceilFrac(e.blockSize, int64(e.dataBlocks)) +// byDiskTotal is a collection satisfying sort.Interface. +type byDiskTotal []DiskInfo + +func (d byDiskTotal) Len() int { return len(d) } +func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d byDiskTotal) Less(i, j int) bool { + return d[i].Total < d[j].Total } -// ShardFileSize - returns final erasure size from original size. -func (e *Erasure) ShardFileSize(totalLength int64) int64 { - if totalLength == 0 { - return 0 +// getDisksInfo - fetch disks info across all other storage API. +func getDisksInfo(disks []StorageAPI, local bool) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) { + disksInfo = make([]DiskInfo, len(disks)) + onlineDisks = make(madmin.BackendDisks) + offlineDisks = make(madmin.BackendDisks) + + for _, disk := range disks { + if disk == OfflineDisk { + continue + } + peerAddr := disk.Hostname() + if _, ok := offlineDisks[peerAddr]; !ok { + offlineDisks[peerAddr] = 0 + } + if _, ok := onlineDisks[peerAddr]; !ok { + onlineDisks[peerAddr] = 0 + } } - if totalLength == -1 { - return -1 + + g := errgroup.WithNErrs(len(disks)) + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == OfflineDisk { + // Storage disk is empty, perhaps ignored disk or not available. + return errDiskNotFound + } + info, err := disks[index].DiskInfo() + if err != nil { + if !IsErr(err, baseErrs...) { + reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String()) + ctx := logger.SetReqInfo(GlobalContext, reqInfo) + logger.LogIf(ctx, err) + } + return err + } + disksInfo[index] = info + return nil + }, index) } - numShards := totalLength / e.blockSize - lastBlockSize := totalLength % int64(e.blockSize) - lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks)) - return numShards*e.ShardSize() + lastShardSize + + errs = g.Wait() + // Wait for the routines. + for i, diskInfoErr := range errs { + if disks[i] == OfflineDisk { + continue + } + if diskInfoErr != nil { + offlineDisks[disks[i].Hostname()]++ + continue + } + onlineDisks[disks[i].Hostname()]++ + } + + // Iterate over the passed endpoints arguments and check + // if there are still disks missing from the offline/online lists + // and update them accordingly. + missingOfflineDisks := make(map[string]int) + for _, zone := range globalEndpoints { + for _, endpoint := range zone.Endpoints { + // if local is set and endpoint is not local + // we are not interested in remote disks. + if local && !endpoint.IsLocal { + continue + } + + if _, ok := offlineDisks[endpoint.Host]; !ok { + missingOfflineDisks[endpoint.Host]++ + } + } + } + for missingDisk, n := range missingOfflineDisks { + onlineDisks[missingDisk] = 0 + offlineDisks[missingDisk] = n + } + + // Success. + return disksInfo, errs, onlineDisks, offlineDisks } -// ShardFileTillOffset - returns the effectiv eoffset where erasure reading begins. -func (e *Erasure) ShardFileTillOffset(startOffset, length, totalLength int64) int64 { - shardSize := e.ShardSize() - shardFileSize := e.ShardFileSize(totalLength) - endShard := (startOffset + int64(length)) / e.blockSize - tillOffset := endShard*shardSize + shardSize - if tillOffset > shardFileSize { - tillOffset = shardFileSize +// Get an aggregated storage info across all disks. +func getStorageInfo(disks []StorageAPI, local bool) (StorageInfo, []error) { + disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, local) + + // Sort so that the first element is the smallest. + sort.Sort(byDiskTotal(disksInfo)) + + // Combine all disks to get total usage + usedList := make([]uint64, len(disksInfo)) + totalList := make([]uint64, len(disksInfo)) + availableList := make([]uint64, len(disksInfo)) + mountPaths := make([]string, len(disksInfo)) + + for i, di := range disksInfo { + usedList[i] = di.Used + totalList[i] = di.Total + availableList[i] = di.Free + mountPaths[i] = di.MountPath } - return tillOffset + + storageInfo := StorageInfo{ + Used: usedList, + Total: totalList, + Available: availableList, + MountPaths: mountPaths, + } + + storageInfo.Backend.Type = BackendErasure + storageInfo.Backend.OnlineDisks = onlineDisks + storageInfo.Backend.OfflineDisks = offlineDisks + + return storageInfo, errs +} + +// StorageInfo - returns underlying storage statistics. +func (er erasureObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { + disks := er.getDisks() + if local { + var localDisks []StorageAPI + for _, disk := range disks { + if disk != nil { + if disk.IsLocal() { + // Append this local disk since local flag is true + localDisks = append(localDisks, disk) + } + } + } + disks = localDisks + } + return getStorageInfo(disks, local) +} + +// GetMetrics - is not implemented and shouldn't be called. +func (er erasureObjects) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} + +// CrawlAndGetDataUsage collects usage from all buckets. +// updates are sent as different parts of the underlying +// structure has been traversed. +func (er erasureObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { + return NotImplemented{API: "CrawlAndGetDataUsage"} +} + +// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. +// Updates are sent on a regular basis and the caller *must* consume them. +func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error { + var disks []StorageAPI + + for _, d := range er.getLoadBalancedDisks() { + if d == nil || !d.IsOnline() { + continue + } + disks = append(disks, d) + } + if len(disks) == 0 || len(buckets) == 0 { + return nil + } + + // Load bucket totals + oldCache := dataUsageCache{} + err := oldCache.load(ctx, er, dataUsageCacheName) + if err != nil { + return err + } + + // New cache.. + cache := dataUsageCache{ + Info: dataUsageCacheInfo{ + Name: dataUsageRoot, + NextCycle: oldCache.Info.NextCycle, + }, + Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)), + } + + // Put all buckets into channel. + bucketCh := make(chan BucketInfo, len(buckets)) + // Add new buckets first + for _, b := range buckets { + if oldCache.find(b.Name) == nil { + bucketCh <- b + } + } + // Add existing buckets. + for _, b := range buckets { + e := oldCache.find(b.Name) + if e != nil { + bucketCh <- b + cache.replace(b.Name, dataUsageRoot, *e) + } + } + + close(bucketCh) + bucketResults := make(chan dataUsageEntryInfo, len(disks)) + + // Start async collector/saver. + // This goroutine owns the cache. + var saverWg sync.WaitGroup + saverWg.Add(1) + go func() { + const updateTime = 30 * time.Second + t := time.NewTicker(updateTime) + defer t.Stop() + defer saverWg.Done() + var lastSave time.Time + + saveLoop: + for { + select { + case <-ctx.Done(): + // Return without saving. + return + case <-t.C: + if cache.Info.LastUpdate.Equal(lastSave) { + continue + } + logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName)) + updates <- cache.clone() + lastSave = cache.Info.LastUpdate + case v, ok := <-bucketResults: + if !ok { + break saveLoop + } + cache.replace(v.Name, v.Parent, v.Entry) + cache.Info.LastUpdate = time.Now() + } + } + // Save final state... + cache.Info.NextCycle++ + cache.Info.LastUpdate = time.Now() + logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName)) + updates <- cache + }() + + // Start one crawler per disk + var wg sync.WaitGroup + wg.Add(len(disks)) + for i := range disks { + go func(i int) { + defer wg.Done() + disk := disks[i] + + for bucket := range bucketCh { + select { + case <-ctx.Done(): + return + default: + } + + // Load cache for bucket + cacheName := pathJoin(bucket.Name, dataUsageCacheName) + cache := dataUsageCache{} + logger.LogIf(ctx, cache.load(ctx, er, cacheName)) + if cache.Info.Name == "" { + cache.Info.Name = bucket.Name + } + if cache.Info.Name != bucket.Name { + logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) + cache.Info = dataUsageCacheInfo{ + Name: bucket.Name, + LastUpdate: time.Time{}, + NextCycle: 0, + } + } + + // Calc usage + before := cache.Info.LastUpdate + cache, err = disk.CrawlAndGetDataUsage(ctx, cache) + if err != nil { + logger.LogIf(ctx, err) + if cache.Info.LastUpdate.After(before) { + logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + } + continue + } + + var root dataUsageEntry + if r := cache.root(); r != nil { + root = cache.flatten(*r) + } + bucketResults <- dataUsageEntryInfo{ + Name: cache.Info.Name, + Parent: dataUsageRoot, + Entry: root, + } + // Save cache + logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + } + }(i) + } + wg.Wait() + close(bucketResults) + saverWg.Wait() + + return nil +} + +// IsReady - shouldn't be called will panic. +func (er erasureObjects) IsReady(ctx context.Context) bool { + logger.CriticalIf(ctx, NotImplemented{}) + return true } diff --git a/cmd/erasure_test.go b/cmd/erasure_test.go index 4caa6f53c..71a88d574 100644 --- a/cmd/erasure_test.go +++ b/cmd/erasure_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -130,7 +130,7 @@ func newErasureTestSetup(dataBlocks int, parityBlocks int, blockSize int64) (*er disks := make([]StorageAPI, len(diskPaths)) var err error for i := range diskPaths { - disks[i], diskPaths[i], err = newPosixTestSetup() + disks[i], diskPaths[i], err = newXLStorageTestSetup() if err != nil { return nil, err } diff --git a/cmd/format-disk-cache.go b/cmd/format-disk-cache.go index 3dec6e56a..ad98c319e 100644 --- a/cmd/format-disk-cache.go +++ b/cmd/format-disk-cache.go @@ -366,7 +366,7 @@ func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats [ func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile, destDir string, metadata map[string]string) error { st, err := os.Stat(oldfile) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) return err } readCloser, err := readCacheFileStream(oldfile, 0, st.Size()) diff --git a/cmd/format-xl.go b/cmd/format-erasure.go similarity index 66% rename from cmd/format-xl.go rename to cmd/format-erasure.go index 8d7ad9f24..3c3a6d1b9 100644 --- a/cmd/format-xl.go +++ b/cmd/format-erasure.go @@ -36,20 +36,23 @@ import ( ) const ( - // Represents XL backend. - formatBackendXL = "xl" + // Represents Erasure backend. + formatBackendErasure = "xl" - // formatXLV1.XL.Version - version '1'. - formatXLVersionV1 = "1" + // formatErasureV1.Erasure.Version - version '1'. + formatErasureVersionV1 = "1" - // formatXLV2.XL.Version - version '2'. - formatXLVersionV2 = "2" + // formatErasureV2.Erasure.Version - version '2'. + formatErasureVersionV2 = "2" - // formatXLV3.XL.Version - version '3'. - formatXLVersionV3 = "3" + // formatErasureV3.Erasure.Version - version '3'. + formatErasureVersionV3 = "3" - // Distribution algorithm used. - formatXLVersionV2DistributionAlgo = "CRCMOD" + // Distribution algorithm used, legacy + formatErasureVersionV2DistributionAlgoLegacy = "CRCMOD" + + // Distributed algorithm used, current + formatErasureVersionV3DistributionAlgo = "SIPMOD" ) // Offline disk UUID represents an offline disk. @@ -68,34 +71,34 @@ var formatCriticalErrors = map[error]struct{}{ } // Used to detect the version of "xl" format. -type formatXLVersionDetect struct { - XL struct { +type formatErasureVersionDetect struct { + Erasure struct { Version string `json:"version"` } `json:"xl"` } // Represents the V1 backend disk structure version // under `.minio.sys` and actual data namespace. -// formatXLV1 - structure holds format config version '1'. -type formatXLV1 struct { +// formatErasureV1 - structure holds format config version '1'. +type formatErasureV1 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. Disk string `json:"disk"` // Disk field carries assigned disk uuid. // JBOD field carries the input disk order generated the first // time when fresh disks were supplied. JBOD []string `json:"jbod"` - } `json:"xl"` // XL field holds xl format. + } `json:"xl"` // Erasure field holds xl format. } // Represents the V2 backend disk structure version // under `.minio.sys` and actual data namespace. -// formatXLV2 - structure holds format config version '2'. +// formatErasureV2 - structure holds format config version '2'. // The V2 format to support "large bucket" support where a bucket // can span multiple erasure sets. -type formatXLV2 struct { +type formatErasureV2 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. This string `json:"this"` // This field carries assigned disk uuid. // Sets field carries the input disk order generated the first @@ -108,13 +111,13 @@ type formatXLV2 struct { } `json:"xl"` } -// formatXLV3 struct is same as formatXLV2 struct except that formatXLV3.XL.Version is "3" indicating +// formatErasureV3 struct is same as formatErasureV2 struct except that formatErasureV3.Erasure.Version is "3" indicating // the simplified multipart backend which is a flat hierarchy now. // In .minio.sys/multipart we have: -// sha256(bucket/object)/uploadID/[xl.json, part.1, part.2 ....] -type formatXLV3 struct { +// sha256(bucket/object)/uploadID/[xl.meta, part.1, part.2 ....] +type formatErasureV3 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. This string `json:"this"` // This field carries assigned disk uuid. // Sets field carries the input disk order generated the first @@ -127,40 +130,40 @@ type formatXLV3 struct { } `json:"xl"` } -func (f *formatXLV3) Clone() *formatXLV3 { +func (f *formatErasureV3) Clone() *formatErasureV3 { b, err := json.Marshal(f) if err != nil { panic(err) } - var dst formatXLV3 + var dst formatErasureV3 if err = json.Unmarshal(b, &dst); err != nil { panic(err) } return &dst } -// Returns formatXL.XL.Version -func newFormatXLV3(numSets int, setLen int) *formatXLV3 { - format := &formatXLV3{} +// Returns formatErasure.Erasure.Version +func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 { + format := &formatErasureV3{} format.Version = formatMetaVersionV1 - format.Format = formatBackendXL + format.Format = formatBackendErasure format.ID = mustGetUUID() - format.XL.Version = formatXLVersionV3 - format.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo - format.XL.Sets = make([][]string, numSets) + format.Erasure.Version = formatErasureVersionV3 + format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgo + format.Erasure.Sets = make([][]string, numSets) for i := 0; i < numSets; i++ { - format.XL.Sets[i] = make([]string, setLen) + format.Erasure.Sets[i] = make([]string, setLen) for j := 0; j < setLen; j++ { - format.XL.Sets[i][j] = mustGetUUID() + format.Erasure.Sets[i][j] = mustGetUUID() } } return format } -// Returns format XL version after reading `format.json`, returns -// successfully the version only if the backend is XL. -func formatGetBackendXLVersion(formatPath string) (string, error) { +// Returns format Erasure version after reading `format.json`, returns +// successfully the version only if the backend is Erasure. +func formatGetBackendErasureVersion(formatPath string) (string, error) { meta := &formatMetaV1{} b, err := ioutil.ReadFile(formatPath) if err != nil { @@ -172,42 +175,42 @@ func formatGetBackendXLVersion(formatPath string) (string, error) { if meta.Version != formatMetaVersionV1 { return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version) } - if meta.Format != formatBackendXL { - return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendXL) + if meta.Format != formatBackendErasure { + return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendErasure) } - // XL backend found, proceed to detect version. - format := &formatXLVersionDetect{} + // Erasure backend found, proceed to detect version. + format := &formatErasureVersionDetect{} if err = json.Unmarshal(b, format); err != nil { return "", err } - return format.XL.Version, nil + return format.Erasure.Version, nil } // Migrates all previous versions to latest version of `format.json`, // this code calls migration in sequence, such as V1 is migrated to V2 // first before it V2 migrates to V3. -func formatXLMigrate(export string) error { +func formatErasureMigrate(export string) error { formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - version, err := formatGetBackendXLVersion(formatPath) + version, err := formatGetBackendErasureVersion(formatPath) if err != nil { return err } switch version { - case formatXLVersionV1: - if err = formatXLMigrateV1ToV2(export, version); err != nil { + case formatErasureVersionV1: + if err = formatErasureMigrateV1ToV2(export, version); err != nil { return err } // Migrate successful v1 => v2, proceed to v2 => v3 - version = formatXLVersionV2 + version = formatErasureVersionV2 fallthrough - case formatXLVersionV2: - if err = formatXLMigrateV2ToV3(export, version); err != nil { + case formatErasureVersionV2: + if err = formatErasureMigrateV2ToV3(export, version); err != nil { return err } // Migrate successful v2 => v3, v3 is latest // version = formatXLVersionV3 fallthrough - case formatXLVersionV3: + case formatErasureVersionV3: // v3 is the latest version, return. return nil } @@ -216,14 +219,14 @@ func formatXLMigrate(export string) error { // Migrates version V1 of format.json to version V2 of format.json, // migration fails upon any error. -func formatXLMigrateV1ToV2(export, version string) error { - if version != formatXLVersionV1 { - return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV1, version) +func formatErasureMigrateV1ToV2(export, version string) error { + if version != formatErasureVersionV1 { + return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV1, version) } formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - formatV1 := &formatXLV1{} + formatV1 := &formatErasureV1{} b, err := ioutil.ReadFile(formatPath) if err != nil { return err @@ -232,15 +235,15 @@ func formatXLMigrateV1ToV2(export, version string) error { return err } - formatV2 := &formatXLV2{} + formatV2 := &formatErasureV2{} formatV2.Version = formatMetaVersionV1 - formatV2.Format = formatBackendXL - formatV2.XL.Version = formatXLVersionV2 - formatV2.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo - formatV2.XL.This = formatV1.XL.Disk - formatV2.XL.Sets = make([][]string, 1) - formatV2.XL.Sets[0] = make([]string, len(formatV1.XL.JBOD)) - copy(formatV2.XL.Sets[0], formatV1.XL.JBOD) + formatV2.Format = formatBackendErasure + formatV2.Erasure.Version = formatErasureVersionV2 + formatV2.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoLegacy + formatV2.Erasure.This = formatV1.Erasure.Disk + formatV2.Erasure.Sets = make([][]string, 1) + formatV2.Erasure.Sets[0] = make([]string, len(formatV1.Erasure.JBOD)) + copy(formatV2.Erasure.Sets[0], formatV1.Erasure.JBOD) b, err = json.Marshal(formatV2) if err != nil { @@ -250,13 +253,13 @@ func formatXLMigrateV1ToV2(export, version string) error { } // Migrates V2 for format.json to V3 (Flat hierarchy for multipart) -func formatXLMigrateV2ToV3(export, version string) error { - if version != formatXLVersionV2 { - return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV2, version) +func formatErasureMigrateV2ToV3(export, version string) error { + if version != formatErasureVersionV2 { + return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV2, version) } formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - formatV2 := &formatXLV2{} + formatV2 := &formatErasureV2{} b, err := ioutil.ReadFile(formatPath) if err != nil { return err @@ -276,13 +279,13 @@ func formatXLMigrateV2ToV3(export, version string) error { // format-V2 struct is exactly same as format-V1 except that version is "3" // which indicates the simplified multipart backend. - formatV3 := formatXLV3{} + formatV3 := formatErasureV3{} formatV3.Version = formatV2.Version formatV3.Format = formatV2.Format - formatV3.XL = formatV2.XL + formatV3.Erasure = formatV2.Erasure - formatV3.XL.Version = formatXLVersionV3 + formatV3.Erasure.Version = formatErasureVersionV3 b, err = json.Marshal(formatV3) if err != nil { @@ -303,7 +306,7 @@ func countErrs(errs []error, err error) int { } // Does all errors indicate we need to initialize all disks?. -func shouldInitXLDisks(errs []error) bool { +func shouldInitErasureDisks(errs []error) bool { return countErrs(errs, errUnformattedDisk) == len(errs) } @@ -312,13 +315,13 @@ func quorumUnformattedDisks(errs []error) bool { return countErrs(errs, errUnformattedDisk) >= (len(errs)/2)+1 } -// loadFormatXLAll - load all format config from all input disks in parallel. -func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []error) { +// loadFormatErasureAll - load all format config from all input disks in parallel. +func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasureV3, []error) { // Initialize list of errors. g := errgroup.WithNErrs(len(storageDisks)) // Initialize format configs. - var formats = make([]*formatXLV3, len(storageDisks)) + var formats = make([]*formatErasureV3, len(storageDisks)) // Load format from each disk in parallel for index := range storageDisks { @@ -327,7 +330,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err if storageDisks[index] == nil { return errDiskNotFound } - format, err := loadFormatXL(storageDisks[index]) + format, err := loadFormatErasure(storageDisks[index]) if err != nil { return err } @@ -335,7 +338,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err if !heal { // If no healing required, make the disks valid and // online. - storageDisks[index].SetDiskID(format.XL.This) + storageDisks[index].SetDiskID(format.Erasure.This) } return nil }, index) @@ -345,12 +348,12 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err return formats, g.Wait() } -func saveFormatXL(disk StorageAPI, format interface{}, diskID string) error { +func saveFormatErasure(disk StorageAPI, format interface{}, diskID string) error { if format == nil || disk == nil { return errDiskNotFound } - if err := makeFormatXLMetaVolumes(disk); err != nil { + if err := makeFormatErasureMetaVolumes(disk); err != nil { return err } @@ -398,8 +401,8 @@ func isHiddenDirectories(vols ...VolInfo) bool { return true } -// loadFormatXL - loads format.json from disk. -func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { +// loadFormatErasure - loads format.json from disk. +func loadFormatErasure(disk StorageAPI) (format *formatErasureV3, err error) { buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile) if err != nil { // 'file not found' and 'volume not found' as @@ -421,7 +424,7 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { } // Try to decode format json into formatConfigV1 struct. - format = &formatXLV3{} + format = &formatErasureV3{} if err = json.Unmarshal(buf, format); err != nil { return nil, err } @@ -430,56 +433,56 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { return format, nil } -// Valid formatXL basic versions. -func checkFormatXLValue(formatXL *formatXLV3) error { +// Valid formatErasure basic versions. +func checkFormatErasureValue(formatErasure *formatErasureV3) error { // Validate format version and format type. - if formatXL.Version != formatMetaVersionV1 { - return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version) + if formatErasure.Version != formatMetaVersionV1 { + return fmt.Errorf("Unsupported version of backend format [%s] found", formatErasure.Version) } - if formatXL.Format != formatBackendXL { - return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format) + if formatErasure.Format != formatBackendErasure { + return fmt.Errorf("Unsupported backend format [%s] found", formatErasure.Format) } - if formatXL.XL.Version != formatXLVersionV3 { - return fmt.Errorf("Unsupported XL backend format found [%s]", formatXL.XL.Version) + if formatErasure.Erasure.Version != formatErasureVersionV3 { + return fmt.Errorf("Unsupported Erasure backend format found [%s]", formatErasure.Erasure.Version) } return nil } // Check all format values. -func checkFormatXLValues(formats []*formatXLV3, drivesPerSet int) error { - for i, formatXL := range formats { - if formatXL == nil { +func checkFormatErasureValues(formats []*formatErasureV3, drivesPerSet int) error { + for i, formatErasure := range formats { + if formatErasure == nil { continue } - if err := checkFormatXLValue(formatXL); err != nil { + if err := checkFormatErasureValue(formatErasure); err != nil { return err } - if len(formats) != len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0]) { + if len(formats) != len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]) { return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)", - humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0])) + humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0])) } // Only if custom erasure drive count is set, // we should fail here other proceed to honor what // is present on the disk. - if globalCustomErasureDriveCount && len(formatXL.XL.Sets[0]) != drivesPerSet { - return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatXL.XL.Sets[0]), drivesPerSet) + if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != drivesPerSet { + return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), drivesPerSet) } } return nil } -// Get Deployment ID for the XL sets from format.json. +// Get Deployment ID for the Erasure sets from format.json. // This need not be in quorum. Even if one of the format.json // file has this value, we assume it is valid. // If more than one format.json's have different id, it is considered a corrupt // backend format. -func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (string, error) { +func formatErasureGetDeploymentID(refFormat *formatErasureV3, formats []*formatErasureV3) (string, error) { var deploymentID string for _, format := range formats { if format == nil || format.ID == "" { continue } - if reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { + if reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { // Found an ID in one of the format.json file // Set deploymentID for the first time. if deploymentID == "" { @@ -494,11 +497,11 @@ func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (stri return deploymentID, nil } -// formatXLFixDeploymentID - Add deployment id if it is not present. -func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) { +// formatErasureFixDeploymentID - Add deployment id if it is not present. +func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) (err error) { // Attempt to load all `format.json` from all disks. var sErrs []error - formats, sErrs := loadFormatXLAll(storageDisks, false) + formats, sErrs := loadFormatErasureAll(storageDisks, false) for i, sErr := range sErrs { if _, ok := formatCriticalErrors[sErr]; ok { return config.ErrCorruptedBackend(err).Hint(fmt.Sprintf("Clear any pre-existing content on %s", endpoints[i])) @@ -506,13 +509,13 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref } for index := range formats { - // If the XL sets do not match, set those formats to nil, + // If the Erasure sets do not match, set those formats to nil, // We do not have to update the ID on those format.json file. - if formats[index] != nil && !reflect.DeepEqual(formats[index].XL.Sets, refFormat.XL.Sets) { + if formats[index] != nil && !reflect.DeepEqual(formats[index].Erasure.Sets, refFormat.Erasure.Sets) { formats[index] = nil } } - refFormat.ID, err = formatXLGetDeploymentID(refFormat, formats) + refFormat.ID, err = formatErasureGetDeploymentID(refFormat, formats) if err != nil { return err } @@ -534,12 +537,12 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref } // Deployment ID needs to be set on all the disks. // Save `format.json` across all disks. - return saveFormatXLAll(GlobalContext, storageDisks, formats) + return saveFormatErasureAll(GlobalContext, storageDisks, formats) } // Update only the valid local disks which have not been updated before. -func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) error { +func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) error { // If this server was down when the deploymentID was updated // then we make sure that we update the local disks with the deploymentID. @@ -550,7 +553,7 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI index := index g.Go(func() error { if endpoints[index].IsLocal && storageDisks[index] != nil && storageDisks[index].IsOnline() { - format, err := loadFormatXL(storageDisks[index]) + format, err := loadFormatErasure(storageDisks[index]) if err != nil { // Disk can be offline etc. // ignore the errors seen here. @@ -559,11 +562,11 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI if format.ID != "" { return nil } - if !reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { + if !reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { return nil } format.ID = refFormat.ID - if err := saveFormatXL(storageDisks[index], format, format.XL.This); err != nil { + if err := saveFormatErasure(storageDisks[index], format, format.Erasure.This); err != nil { logger.LogIf(GlobalContext, err) return fmt.Errorf("Unable to save format.json, %w", err) } @@ -579,15 +582,15 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI return nil } -// Get backend XL format in quorum `format.json`. -func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) { +// Get backend Erasure format in quorum `format.json`. +func getFormatErasureInQuorum(formats []*formatErasureV3) (*formatErasureV3, error) { formatHashes := make([]string, len(formats)) for i, format := range formats { if format == nil { continue } h := sha256.New() - for _, set := range format.XL.Sets { + for _, set := range format.Erasure.Sets { for _, diskID := range set { h.Write([]byte(diskID)) } @@ -613,55 +616,55 @@ func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) { } if maxCount < len(formats)/2 { - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } for i, hash := range formatHashes { if hash == maxHash { format := formats[i].Clone() - format.XL.This = "" + format.Erasure.This = "" return format, nil } } - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } -func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error { +func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) error { tmpFormat := format.Clone() - this := tmpFormat.XL.This - tmpFormat.XL.This = "" - if len(reference.XL.Sets) != len(format.XL.Sets) { - return fmt.Errorf("Expected number of sets %d, got %d", len(reference.XL.Sets), len(format.XL.Sets)) + this := tmpFormat.Erasure.This + tmpFormat.Erasure.This = "" + if len(reference.Erasure.Sets) != len(format.Erasure.Sets) { + return fmt.Errorf("Expected number of sets %d, got %d", len(reference.Erasure.Sets), len(format.Erasure.Sets)) } // Make sure that the sets match. - for i := range reference.XL.Sets { - if len(reference.XL.Sets[i]) != len(format.XL.Sets[i]) { + for i := range reference.Erasure.Sets { + if len(reference.Erasure.Sets[i]) != len(format.Erasure.Sets[i]) { return fmt.Errorf("Each set should be of same size, expected %d got %d", - len(reference.XL.Sets[i]), len(format.XL.Sets[i])) + len(reference.Erasure.Sets[i]), len(format.Erasure.Sets[i])) } - for j := range reference.XL.Sets[i] { - if reference.XL.Sets[i][j] != format.XL.Sets[i][j] { + for j := range reference.Erasure.Sets[i] { + if reference.Erasure.Sets[i][j] != format.Erasure.Sets[i][j] { return fmt.Errorf("UUID on positions %d:%d do not match with, expected %s got %s", - i, j, reference.XL.Sets[i][j], format.XL.Sets[i][j]) + i, j, reference.Erasure.Sets[i][j], format.Erasure.Sets[i][j]) } } } // Make sure that the diskID is found in the set. - for i := 0; i < len(tmpFormat.XL.Sets); i++ { - for j := 0; j < len(tmpFormat.XL.Sets[i]); j++ { - if this == tmpFormat.XL.Sets[i][j] { + for i := 0; i < len(tmpFormat.Erasure.Sets); i++ { + for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ { + if this == tmpFormat.Erasure.Sets[i][j] { return nil } } } - return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.XL.Sets) + return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.Erasure.Sets) } // Initializes meta volume only on local storage disks. -func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatXLV3) error { +func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error { // Compute the local disks eligible for meta volumes (re)initialization var disksToInit []StorageAPI @@ -682,7 +685,7 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX // goroutine will return its own instance of index variable. index := index g.Go(func() error { - return makeFormatXLMetaVolumes(disksToInit[index]) + return makeFormatErasureMetaVolumes(disksToInit[index]) }, index) } @@ -698,15 +701,15 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX return nil } -// saveFormatXLAll - populates `format.json` on disks in its order. -func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error { +// saveFormatErasureAll - populates `format.json` on disks in its order. +func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(storageDisks)) // Write `format.json` to all disks. for index := range storageDisks { index := index g.Go(func() error { - return saveFormatXL(storageDisks[index], formats[index], formats[index].XL.This) + return saveFormatErasure(storageDisks[index], formats[index], formats[index].Erasure.This) }, index) } @@ -745,9 +748,9 @@ func initStorageDisksWithErrors(endpoints Endpoints) ([]StorageAPI, []error) { return storageDisks, g.Wait() } -// formatXLV3ThisEmpty - find out if '.This' field is empty +// formatErasureV3ThisEmpty - find out if '.This' field is empty // in any of the input `formats`, if yes return true. -func formatXLV3ThisEmpty(formats []*formatXLV3) bool { +func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool { for _, format := range formats { if format == nil { continue @@ -756,18 +759,18 @@ func formatXLV3ThisEmpty(formats []*formatXLV3) bool { // V1 to V2 to V3, in a scenario such as this we only need to handle // single sets since we never used to support multiple sets in releases // with V1 format version. - if len(format.XL.Sets) > 1 { + if len(format.Erasure.Sets) > 1 { continue } - if format.XL.This == "" { + if format.Erasure.This == "" { return true } } return false } -// fixFormatXLV3 - fix format XL configuration on all disks. -func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatXLV3) error { +// fixFormatErasureV3 - fix format Erasure configuration on all disks. +func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(formats)) for i := range formats { i := i @@ -779,12 +782,12 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo // V1 to V2 to V3, in a scenario such as this we only need to handle // single sets since we never used to support multiple sets in releases // with V1 format version. - if len(formats[i].XL.Sets) > 1 { + if len(formats[i].Erasure.Sets) > 1 { return nil } - if formats[i].XL.This == "" { - formats[i].XL.This = formats[i].XL.Sets[0][i] - if err := saveFormatXL(storageDisks[i], formats[i], formats[i].XL.This); err != nil { + if formats[i].Erasure.This == "" { + formats[i].Erasure.This = formats[i].Erasure.Sets[0][i] + if err := saveFormatErasure(storageDisks[i], formats[i], formats[i].Erasure.This); err != nil { return err } } @@ -800,10 +803,10 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo } -// initFormatXL - save XL format configuration on all disks. -func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatXLV3, error) { - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, len(storageDisks)) +// initFormatErasure - save Erasure format configuration on all disks. +func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatErasureV3, error) { + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, len(storageDisks)) wantAtMost := ecDrivesNoConfig(drivesPerSet) for i := 0; i < setCount; i++ { @@ -811,7 +814,7 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv for j := 0; j < drivesPerSet; j++ { disk := storageDisks[i*drivesPerSet+j] newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] if deploymentID != "" { newFormat.ID = deploymentID } @@ -843,11 +846,11 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv } // Save formats `format.json` across all disks. - if err := saveFormatXLAll(ctx, storageDisks, formats); err != nil { + if err := saveFormatErasureAll(ctx, storageDisks, formats); err != nil { return nil, err } - return getFormatXLInQuorum(formats) + return getFormatErasureInQuorum(formats) } // ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set. @@ -866,8 +869,8 @@ func ecDrivesNoConfig(drivesPerSet int) int { return ecDrives } -// Make XL backend meta volumes. -func makeFormatXLMetaVolumes(disk StorageAPI) error { +// Make Erasure backend meta volumes. +func makeFormatErasureMetaVolumes(disk StorageAPI) error { if disk == nil { return errDiskNotFound } @@ -878,14 +881,14 @@ func makeFormatXLMetaVolumes(disk StorageAPI) error { // Get all UUIDs which are present in reference format should // be present in the list of formats provided, those are considered // as online UUIDs. -func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs []string) { +func getOnlineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (onlineUUIDs []string) { for _, format := range formats { if format == nil { continue } - for _, set := range refFormat.XL.Sets { + for _, set := range refFormat.Erasure.Sets { for _, uuid := range set { - if format.XL.This == uuid { + if format.Erasure.This == uuid { onlineUUIDs = append(onlineUUIDs, uuid) } } @@ -897,13 +900,13 @@ func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs [ // Look for all UUIDs which are not present in reference format // but are present in the onlineUUIDs list, construct of list such // offline UUIDs. -func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs []string) { +func getOfflineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (offlineUUIDs []string) { onlineUUIDs := getOnlineUUIDs(refFormat, formats) - for i, set := range refFormat.XL.Sets { + for i, set := range refFormat.Erasure.Sets { for j, uuid := range set { var found bool for _, onlineUUID := range onlineUUIDs { - if refFormat.XL.Sets[i][j] == onlineUUID { + if refFormat.Erasure.Sets[i][j] == onlineUUID { found = true } } @@ -916,13 +919,13 @@ func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs } // Mark all UUIDs that are offline. -func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) { +func markUUIDsOffline(refFormat *formatErasureV3, formats []*formatErasureV3) { offlineUUIDs := getOfflineUUIDs(refFormat, formats) - for i, set := range refFormat.XL.Sets { + for i, set := range refFormat.Erasure.Sets { for j := range set { for _, offlineUUID := range offlineUUIDs { - if refFormat.XL.Sets[i][j] == offlineUUID { - refFormat.XL.Sets[i][j] = offlineDiskUUID + if refFormat.Erasure.Sets[i][j] == offlineUUID { + refFormat.Erasure.Sets[i][j] = offlineDiskUUID } } } @@ -930,29 +933,29 @@ func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) { } // Initialize a new set of set formats which will be written to all disks. -func newHealFormatSets(refFormat *formatXLV3, setCount, drivesPerSet int, formats []*formatXLV3, errs []error) [][]*formatXLV3 { - newFormats := make([][]*formatXLV3, setCount) - for i := range refFormat.XL.Sets { - newFormats[i] = make([]*formatXLV3, drivesPerSet) +func newHealFormatSets(refFormat *formatErasureV3, setCount, drivesPerSet int, formats []*formatErasureV3, errs []error) [][]*formatErasureV3 { + newFormats := make([][]*formatErasureV3, setCount) + for i := range refFormat.Erasure.Sets { + newFormats[i] = make([]*formatErasureV3, drivesPerSet) } - for i := range refFormat.XL.Sets { - for j := range refFormat.XL.Sets[i] { + for i := range refFormat.Erasure.Sets { + for j := range refFormat.Erasure.Sets[i] { if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil { - newFormats[i][j] = &formatXLV3{} + newFormats[i][j] = &formatErasureV3{} newFormats[i][j].Version = refFormat.Version newFormats[i][j].ID = refFormat.ID newFormats[i][j].Format = refFormat.Format - newFormats[i][j].XL.Version = refFormat.XL.Version - newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo + newFormats[i][j].Erasure.Version = refFormat.Erasure.Version + newFormats[i][j].Erasure.DistributionAlgo = refFormat.Erasure.DistributionAlgo } if errs[i*drivesPerSet+j] == errUnformattedDisk { - newFormats[i][j].XL.This = "" - newFormats[i][j].XL.Sets = nil + newFormats[i][j].Erasure.This = "" + newFormats[i][j].Erasure.Sets = nil continue } if errs[i*drivesPerSet+j] == nil { - newFormats[i][j].XL.This = formats[i*drivesPerSet+j].XL.This - newFormats[i][j].XL.Sets = nil + newFormats[i][j].Erasure.This = formats[i*drivesPerSet+j].Erasure.This + newFormats[i][j].Erasure.Sets = nil } } } diff --git a/cmd/format-xl_test.go b/cmd/format-erasure_test.go similarity index 61% rename from cmd/format-xl_test.go rename to cmd/format-erasure_test.go index 10f5adde8..656d59a63 100644 --- a/cmd/format-xl_test.go +++ b/cmd/format-erasure_test.go @@ -26,13 +26,13 @@ import ( // Test get offline/online uuids. func TestGetUUIDs(t *testing.T) { - fmtV2 := newFormatXLV3(4, 16) - formats := make([]*formatXLV3, 64) + fmtV2 := newFormatErasureV3(4, 16) + formats := make([]*formatErasureV3, 64) for i := 0; i < 4; i++ { for j := 0; j < 16; j++ { newFormat := *fmtV2 - newFormat.XL.This = fmtV2.XL.Sets[i][j] + newFormat.Erasure.This = fmtV2.Erasure.Sets[i][j] formats[i*16+j] = &newFormat } } @@ -62,9 +62,9 @@ func TestGetUUIDs(t *testing.T) { markUUIDsOffline(fmtV2, formats) gotCount = 0 - for i := range fmtV2.XL.Sets { - for j := range fmtV2.XL.Sets[i] { - if fmtV2.XL.Sets[i][j] == offlineDiskUUID { + for i := range fmtV2.Erasure.Sets { + for j := range fmtV2.Erasure.Sets[i] { + if fmtV2.Erasure.Sets[i][j] == offlineDiskUUID { gotCount++ } } @@ -74,16 +74,16 @@ func TestGetUUIDs(t *testing.T) { } } -// tests fixFormatXLV3 - fix format.json on all disks. +// tests fixFormatErasureV3 - fix format.json on all disks. func TestFixFormatV3(t *testing.T) { - xlDirs, err := getRandomDisks(8) + erasureDirs, err := getRandomDisks(8) if err != nil { t.Fatal(err) } - for _, xlDir := range xlDirs { - defer os.RemoveAll(xlDir) + for _, erasureDir := range erasureDirs { + defer os.RemoveAll(erasureDir) } - endpoints := mustGetNewEndpoints(xlDirs...) + endpoints := mustGetNewEndpoints(erasureDirs...) storageDisks, errs := initStorageDisksWithErrors(endpoints) for _, err := range errs { @@ -92,46 +92,46 @@ func TestFixFormatV3(t *testing.T) { } } - format := newFormatXLV3(1, 8) - formats := make([]*formatXLV3, 8) + format := newFormatErasureV3(1, 8) + formats := make([]*formatErasureV3, 8) for j := 0; j < 8; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[0][j] + newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat } - if err = initXLMetaVolumesInLocalDisks(storageDisks, formats); err != nil { + if err = initErasureMetaVolumesInLocalDisks(storageDisks, formats); err != nil { t.Fatal(err) } formats[1] = nil - expThis := formats[2].XL.This - formats[2].XL.This = "" - if err := fixFormatXLV3(storageDisks, endpoints, formats); err != nil { + expThis := formats[2].Erasure.This + formats[2].Erasure.This = "" + if err := fixFormatErasureV3(storageDisks, endpoints, formats); err != nil { t.Fatal(err) } - newFormats, errs := loadFormatXLAll(storageDisks, false) + newFormats, errs := loadFormatErasureAll(storageDisks, false) for _, err := range errs { if err != nil && err != errUnformattedDisk { t.Fatal(err) } } - gotThis := newFormats[2].XL.This + gotThis := newFormats[2].Erasure.This if expThis != gotThis { t.Fatalf("expected uuid %s, got %s", expThis, gotThis) } } -// tests formatXLV3ThisEmpty conditions. -func TestFormatXLEmpty(t *testing.T) { - format := newFormatXLV3(1, 16) - formats := make([]*formatXLV3, 16) +// tests formatErasureV3ThisEmpty conditions. +func TestFormatErasureEmpty(t *testing.T) { + format := newFormatErasureV3(1, 16) + formats := make([]*formatErasureV3, 16) for j := 0; j < 16; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[0][j] + newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat } @@ -139,18 +139,18 @@ func TestFormatXLEmpty(t *testing.T) { // empty should return false. formats[0] = nil - if ok := formatXLV3ThisEmpty(formats); ok { + if ok := formatErasureV3ThisEmpty(formats); ok { t.Fatalf("expected value false, got %t", ok) } - formats[2].XL.This = "" - if ok := formatXLV3ThisEmpty(formats); !ok { + formats[2].Erasure.This = "" + if ok := formatErasureV3ThisEmpty(formats); !ok { t.Fatalf("expected value true, got %t", ok) } } // Tests xl format migration. -func TestFormatXLMigrate(t *testing.T) { +func TestFormatErasureMigrate(t *testing.T) { // Get test root. rootPath, err := getTestRoot() if err != nil { @@ -158,12 +158,12 @@ func TestFormatXLMigrate(t *testing.T) { } defer os.RemoveAll(rootPath) - m := &formatXLV1{} - m.Format = formatBackendXL + m := &formatErasureV1{} + m.Format = formatBackendErasure m.Version = formatMetaVersionV1 - m.XL.Version = formatXLVersionV1 - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = formatErasureVersionV1 + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err := json.Marshal(m) if err != nil { @@ -178,43 +178,43 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err != nil { + if err = formatErasureMigrate(rootPath); err != nil { t.Fatal(err) } - migratedVersion, err := formatGetBackendXLVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) + migratedVersion, err := formatGetBackendErasureVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) if err != nil { t.Fatal(err) } - if migratedVersion != formatXLVersionV3 { - t.Fatalf("expected version: %s, got: %s", formatXLVersionV3, migratedVersion) + if migratedVersion != formatErasureVersionV3 { + t.Fatalf("expected version: %s, got: %s", formatErasureVersionV3, migratedVersion) } b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) if err != nil { t.Fatal(err) } - formatV3 := &formatXLV3{} + formatV3 := &formatErasureV3{} if err = json.Unmarshal(b, formatV3); err != nil { t.Fatal(err) } - if formatV3.XL.This != m.XL.Disk { - t.Fatalf("expected disk uuid: %s, got: %s", m.XL.Disk, formatV3.XL.This) + if formatV3.Erasure.This != m.Erasure.Disk { + t.Fatalf("expected disk uuid: %s, got: %s", m.Erasure.Disk, formatV3.Erasure.This) } - if len(formatV3.XL.Sets) != 1 { - t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.XL.Sets)) + if len(formatV3.Erasure.Sets) != 1 { + t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.Erasure.Sets)) } - if !reflect.DeepEqual(formatV3.XL.Sets[0], m.XL.JBOD) { - t.Fatalf("expected disk uuid: %v, got: %v", m.XL.JBOD, formatV3.XL.Sets[0]) + if !reflect.DeepEqual(formatV3.Erasure.Sets[0], m.Erasure.JBOD) { + t.Fatalf("expected disk uuid: %v, got: %v", m.Erasure.JBOD, formatV3.Erasure.Sets[0]) } - m = &formatXLV1{} + m = &formatErasureV1{} m.Format = "unknown" m.Version = formatMetaVersionV1 - m.XL.Version = formatXLVersionV1 - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = formatErasureVersionV1 + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err = json.Marshal(m) if err != nil { @@ -225,16 +225,16 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err == nil { + if err = formatErasureMigrate(rootPath); err == nil { t.Fatal("Expected to fail with unexpected backend format") } - m = &formatXLV1{} - m.Format = formatBackendXL + m = &formatErasureV1{} + m.Format = formatBackendErasure m.Version = formatMetaVersionV1 - m.XL.Version = "30" - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = "30" + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err = json.Marshal(m) if err != nil { @@ -245,25 +245,25 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err == nil { + if err = formatErasureMigrate(rootPath); err == nil { t.Fatal("Expected to fail with unexpected backend format version number") } } // Tests check format xl value. -func TestCheckFormatXLValue(t *testing.T) { +func TestCheckFormatErasureValue(t *testing.T) { testCases := []struct { - format *formatXLV3 + format *formatErasureV3 success bool }{ - // Invalid XL format version "2". + // Invalid Erasure format version "2". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "2", - Format: "XL", + Format: "Erasure", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -274,14 +274,14 @@ func TestCheckFormatXLValue(t *testing.T) { }, false, }, - // Invalid XL format "Unknown". + // Invalid Erasure format "Unknown". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "1", Format: "Unknown", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -292,14 +292,14 @@ func TestCheckFormatXLValue(t *testing.T) { }, false, }, - // Invalid XL format version "0". + // Invalid Erasure format version "0". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "1", - Format: "XL", + Format: "Erasure", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -314,65 +314,65 @@ func TestCheckFormatXLValue(t *testing.T) { // Valid all test cases. for i, testCase := range testCases { - if err := checkFormatXLValue(testCase.format); err != nil && testCase.success { + if err := checkFormatErasureValue(testCase.format); err != nil && testCase.success { t.Errorf("Test %d: Expected failure %s", i+1, err) } } } -// Tests getFormatXLInQuorum() -func TestGetFormatXLInQuorumCheck(t *testing.T) { +// Tests getFormatErasureInQuorum() +func TestGetFormatErasureInQuorumCheck(t *testing.T) { setCount := 2 drivesPerSet := 16 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 32) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 32) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } // Return a format from list of formats in quorum. - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } // Check if the reference format and input formats are same. - if err = formatXLV3Check(quorumFormat, formats[0]); err != nil { + if err = formatErasureV3Check(quorumFormat, formats[0]); err != nil { t.Fatal(err) } // QuorumFormat has .This field empty on purpose, expect a failure. - if err = formatXLV3Check(formats[0], quorumFormat); err == nil { + if err = formatErasureV3Check(formats[0], quorumFormat); err == nil { t.Fatal("Unexpected success") } formats[0] = nil - quorumFormat, err = getFormatXLInQuorum(formats) + quorumFormat, err = getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } badFormat := *quorumFormat - badFormat.XL.Sets = nil - if err = formatXLV3Check(quorumFormat, &badFormat); err == nil { + badFormat.Erasure.Sets = nil + if err = formatErasureV3Check(quorumFormat, &badFormat); err == nil { t.Fatal("Unexpected success") } badFormatUUID := *quorumFormat - badFormatUUID.XL.Sets[0][0] = "bad-uuid" - if err = formatXLV3Check(quorumFormat, &badFormatUUID); err == nil { + badFormatUUID.Erasure.Sets[0][0] = "bad-uuid" + if err = formatErasureV3Check(quorumFormat, &badFormatUUID); err == nil { t.Fatal("Unexpected success") } badFormatSetSize := *quorumFormat - badFormatSetSize.XL.Sets[0] = nil - if err = formatXLV3Check(quorumFormat, &badFormatSetSize); err == nil { + badFormatSetSize.Erasure.Sets[0] = nil + if err = formatErasureV3Check(quorumFormat, &badFormatSetSize); err == nil { t.Fatal("Unexpected success") } @@ -381,36 +381,36 @@ func TestGetFormatXLInQuorumCheck(t *testing.T) { formats[i] = nil } } - if _, err = getFormatXLInQuorum(formats); err == nil { + if _, err = getFormatErasureInQuorum(formats); err == nil { t.Fatal("Unexpected success") } } -// Tests formatXLGetDeploymentID() -func TestGetXLID(t *testing.T) { +// Tests formatErasureGetDeploymentID() +func TestGetErasureID(t *testing.T) { setCount := 2 drivesPerSet := 8 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 16) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 16) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } // Return a format from list of formats in quorum. - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } // Check if the reference format and input formats are same. var id string - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } @@ -419,15 +419,15 @@ func TestGetXLID(t *testing.T) { } formats[0] = nil - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } if id == "" { t.Fatal("ID cannot be empty.") } - formats[1].XL.Sets[0][0] = "bad-uuid" - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + formats[1].Erasure.Sets[0][0] = "bad-uuid" + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } @@ -436,7 +436,7 @@ func TestGetXLID(t *testing.T) { } formats[2].ID = "bad-id" - if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { + if _, err = formatErasureGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { t.Fatal("Unexpected Success") } } @@ -446,19 +446,19 @@ func TestNewFormatSets(t *testing.T) { setCount := 2 drivesPerSet := 16 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 32) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 32) errs := make([]error, 32) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } diff --git a/cmd/format-fs.go b/cmd/format-fs.go index 1fab6dc62..13faff731 100644 --- a/cmd/format-fs.go +++ b/cmd/format-fs.go @@ -75,7 +75,7 @@ func newFormatFSV1() (format *formatFSV1) { } // Returns the field formatMetaV1.Format i.e the string "fs" which is never likely to change. -// We do not use this function in XL to get the format as the file is not fcntl-locked on XL. +// We do not use this function in Erasure to get the format as the file is not fcntl-locked on Erasure. func formatMetaGetFormatBackendFS(r io.ReadSeeker) (string, error) { format := &formatMetaV1{} if err := jsonLoad(r, format); err != nil { diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go index b312a5a92..9aed0a29c 100644 --- a/cmd/fs-v1-helpers.go +++ b/cmd/fs-v1-helpers.go @@ -42,7 +42,7 @@ func fsRemoveFile(ctx context.Context, filePath string) (err error) { } if err = os.Remove((filePath)); err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -186,37 +186,11 @@ func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) { return fi, nil } -// Is a one place function which converts all os.PathError -// into a more FS object layer friendly form, converts -// known errors into their typed form for top level -// interpretation. -func osErrToFSFileErr(err error) error { - if err == nil { - return nil - } - if os.IsNotExist(err) { - return errFileNotFound - } - if os.IsPermission(err) { - return errFileAccessDenied - } - if isSysErrNotDir(err) { - return errFileNotFound - } - if isSysErrPathNotFound(err) { - return errFileNotFound - } - if isSysErrTooManyFiles(err) { - return errTooManyOpenFiles - } - return err -} - // Lookup if directory exists, returns directory attributes upon success. func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { fi, err := fsStat(ctx, statDir) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -232,7 +206,7 @@ func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) { fi, err := fsStat(ctx, statFile) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -267,13 +241,13 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos fr, err := os.Open(readPath) if err != nil { - return nil, 0, osErrToFSFileErr(err) + return nil, 0, osErrToFileErr(err) } // Stat to get the size of the file at path. st, err := fr.Stat() if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -327,7 +301,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf [] } writer, err := lock.Open(filePath, flags, 0666) if err != nil { - return 0, osErrToFSFileErr(err) + return 0, osErrToFileErr(err) } defer writer.Close() @@ -399,7 +373,7 @@ func fsSimpleRenameFile(ctx context.Context, sourcePath, destPath string) error if err := os.Rename(sourcePath, destPath); err != nil { logger.LogIf(ctx, err) - return osErrToFSFileErr(err) + return osErrToFileErr(err) } return nil diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go index 972616ce2..ed0d906f7 100644 --- a/cmd/fs-v1-helpers_test.go +++ b/cmd/fs-v1-helpers_test.go @@ -28,10 +28,10 @@ import ( ) func TestFSRenameFile(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -53,10 +53,10 @@ func TestFSRenameFile(t *testing.T) { } func TestFSStats(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -170,11 +170,11 @@ func TestFSStats(t *testing.T) { if testCase.srcPath != "" { if _, err := fsStatFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestErasureStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } else { if _, err := fsStatVolume(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestFS case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } } @@ -182,9 +182,9 @@ func TestFSStats(t *testing.T) { func TestFSCreateAndOpen(t *testing.T) { // Setup test environment. - _, path, err := newPosixTestSetup() + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -246,10 +246,10 @@ func TestFSCreateAndOpen(t *testing.T) { } func TestFSDeletes(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -349,10 +349,10 @@ func TestFSDeletes(t *testing.T) { } func BenchmarkFSDeleteFile(b *testing.B) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - b.Fatalf("Unable to create posix test setup, %s", err) + b.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -383,10 +383,10 @@ func BenchmarkFSDeleteFile(b *testing.B) { // Tests fs removes. func TestFSRemoves(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -500,10 +500,10 @@ func TestFSRemoves(t *testing.T) { } func TestFSRemoveMeta(t *testing.T) { - // create posix test setup - _, fsPath, err := newPosixTestSetup() + // create xlStorage test setup + _, fsPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(fsPath) diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go index 1d8cb7ba5..71c9a2fa2 100644 --- a/cmd/fs-v1-metadata_test.go +++ b/cmd/fs-v1-metadata_test.go @@ -31,7 +31,7 @@ func TestFSV1MetadataObjInfo(t *testing.T) { if objInfo.Size != 0 { t.Fatal("Unexpected object info value for Size", objInfo.Size) } - if objInfo.ModTime != timeSentinel { + if !objInfo.ModTime.Equal(timeSentinel) { t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) } if objInfo.IsDir { @@ -53,7 +53,7 @@ func TestReadFSMetadata(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected err: ", err) } if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { @@ -88,7 +88,7 @@ func TestWriteFSMetadata(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected err: ", err) } if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 9cf8c3755..37b78a82e 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -252,6 +252,14 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { + if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { + return pi, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: srcOpts.VersionID, + } + } + if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { return pi, toObjectErr(err) } @@ -269,6 +277,14 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d // written to '.minio.sys/tmp' location and safely renamed to // '.minio.sys/multipart' for reach parts. func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return pi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + data := r.Reader if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { return pi, toObjectErr(err, bucket) diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index 91d947f55..bb1a662f7 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -40,7 +40,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) { // Create a context we can cancel. ctx, cancel := context.WithCancel(GlobalContext) - obj.MakeBucketWithLocation(ctx, bucketName, "", false) + obj.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) if err != nil { @@ -81,7 +81,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -106,7 +106,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { data := []byte("12345") dataLen := int64(len(data)) - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -139,7 +139,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -172,7 +172,7 @@ func TestCompleteMultipartUpload(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -204,7 +204,7 @@ func TestAbortMultipartUpload(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -235,7 +235,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go index f648ab852..5788f6c72 100644 --- a/cmd/fs-v1-rwpool_test.go +++ b/cmd/fs-v1-rwpool_test.go @@ -46,10 +46,10 @@ func TestRWPoolLongPath(t *testing.T) { // Tests all RWPool methods. func TestRWPool(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 07c385ec2..784a623cf 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -346,7 +346,7 @@ func (fs *FSObjects) crawlBucket(ctx context.Context, bucket string, cache dataU } oi := fsMeta.ToObjectInfo(bucket, object, fi) - sz := item.applyActions(ctx, fs, actionMeta{oi: oi, meta: fsMeta.Meta}) + sz := item.applyActions(ctx, fs, actionMeta{oi: oi}) if sz >= 0 { return sz, nil } @@ -382,10 +382,9 @@ func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileI return st, nil } -// MakeBucketWithLocation - create a new bucket, returns if it -// already exists. -func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +// MakeBucketWithLocation - create a new bucket, returns if it already exists. +func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return NotImplemented{} } @@ -581,6 +580,14 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, forceDelet // if source object and destination object are same we only // update metadata. func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { + if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { + return oi, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: srcOpts.VersionID, + } + } + cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) @@ -649,6 +656,13 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu // GetObjectNInfo - returns object info and a reader for object // content. func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return nil, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } if err = checkGetObjArgs(ctx, bucket, object); err != nil { return nil, err } @@ -746,6 +760,14 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, // startOffset indicates the starting read location of the object. // length indicates the total length of the object. func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + if err = checkGetObjArgs(ctx, bucket, object); err != nil { return err } @@ -948,6 +970,13 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s // GetObjectInfo - reads object metadata and replies back ObjectInfo. func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return oi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } atomic.AddInt64(&fs.activeIOCount, 1) defer func() { @@ -998,6 +1027,10 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin // Additionally writes `fs.json` which carries the necessary metadata // for future object operations. func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) { + if opts.Versioned { + return objInfo, NotImplemented{} + } + if err := checkPutObjectArgs(ctx, bucket, object, fs, r.Size()); err != nil { return ObjectInfo{}, err } @@ -1146,26 +1179,45 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string // DeleteObjects - deletes an object from a bucket, this operation is destructive // and there are no rollbacks supported. -func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = fs.DeleteObject(ctx, bucket, object) + if object.VersionID != "" { + errs[idx] = NotImplemented{} + continue + } + _, errs[idx] = fs.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil || isErrObjectNotFound(errs[idx]) { + dobjects[idx] = DeletedObject{ + ObjectName: object.ObjectName, + } + errs[idx] = nil + } } - return errs, nil + return dobjects, errs } // DeleteObject - deletes an object from a bucket, this operation is destructive // and there are no rollbacks supported. -func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) error { +func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return objInfo, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + // Acquire a write lock before deleting the object. lk := fs.NewNSLock(ctx, bucket, object) - if err := lk.GetLock(globalOperationTimeout); err != nil { - return err + if err = lk.GetLock(globalOperationTimeout); err != nil { + return objInfo, err } defer lk.Unlock() - if err := checkDelObjArgs(ctx, bucket, object); err != nil { - return err + if err = checkDelObjArgs(ctx, bucket, object); err != nil { + return objInfo, err } defer ObjectPathUpdated(path.Join(bucket, object)) @@ -1175,8 +1227,8 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er atomic.AddInt64(&fs.activeIOCount, -1) }() - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return toObjectErr(err, bucket) + if _, err = fs.statBucketDir(ctx, bucket); err != nil { + return objInfo, toObjectErr(err, bucket) } minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) @@ -1189,23 +1241,23 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er } if lerr != nil && lerr != errFileNotFound { logger.LogIf(ctx, lerr) - return toObjectErr(lerr, bucket, object) + return objInfo, toObjectErr(lerr, bucket, object) } } // Delete the object. - if err := fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { - return toObjectErr(err, bucket, object) + if err = fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { + return objInfo, toObjectErr(err, bucket, object) } if bucket != minioMetaBucket { // Delete the metadata object. - err := fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) + err = fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) if err != nil && err != errFileNotFound { - return toObjectErr(err, bucket, object) + return objInfo, toObjectErr(err, bucket, object) } } - return nil + return ObjectInfo{Bucket: bucket, Name: object}, nil } // Returns function "listDir" of the type listDirFunc. @@ -1313,6 +1365,11 @@ func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lo return extractETag(fsMeta.Meta), nil } +// ListObjectVersions not implemented for FS mode. +func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { + return loi, NotImplemented{} +} + // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // state for future re-entrant list requests. func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { @@ -1327,7 +1384,14 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de } // GetObjectTags - get object tags from an existing object -func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return nil, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } oi, err := fs.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) if err != nil { return nil, err @@ -1337,7 +1401,15 @@ func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) ( } // PutObjectTags - replace or add tags to an existing object -func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) fsMeta := fsMetaV1{} wlk, err := fs.rwPool.Write(fsMetaPath) @@ -1369,30 +1441,30 @@ func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, t } // DeleteObjectTags - delete object tags from an existing object -func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return fs.PutObjectTags(ctx, bucket, object, "") +func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return fs.PutObjectTags(ctx, bucket, object, "", opts) } -// ReloadFormat - no-op for fs, Valid only for XL. +// ReloadFormat - no-op for fs, Valid only for Erasure. func (fs *FSObjects) ReloadFormat(ctx context.Context, dryRun bool) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// HealFormat - no-op for fs, Valid only for XL. +// HealFormat - no-op for fs, Valid only for Erasure. func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) return madmin.HealResultItem{}, NotImplemented{} } -// HealObject - no-op for fs. Valid only for XL. -func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) ( +// HealObject - no-op for fs. Valid only for Erasure. +func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) ( res madmin.HealResultItem, err error) { logger.LogIf(ctx, NotImplemented{}) return res, NotImplemented{} } -// HealBucket - no-op for fs, Valid only for XL. +// HealBucket - no-op for fs, Valid only for Erasure. func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) @@ -1408,13 +1480,13 @@ func (fs *FSObjects) Walk(ctx context.Context, bucket, prefix string, results ch return fsWalk(ctx, fs, bucket, prefix, fs.listDirFactory(), results, fs.getObjectInfo, fs.getObjectInfo) } -// HealObjects - no-op for fs. Valid only for XL. -func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { +// HealObjects - no-op for fs. Valid only for Erasure. +func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// ListBucketsHeal - list all buckets to be healed. Valid only for XL +// ListBucketsHeal - list all buckets to be healed. Valid only for Erasure func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { logger.LogIf(ctx, NotImplemented{}) return []BucketInfo{}, NotImplemented{} diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index 62da095fc..ae55fb7ac 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -36,7 +36,7 @@ func TestFSParentDirIsObject(t *testing.T) { bucketName := "testbucket" objectName := "object" - if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } objectContent := "12345" @@ -124,7 +124,7 @@ func TestFSShutdown(t *testing.T) { fs := obj.(*FSObjects) objectContent := "12345" - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{}) return fs, disk } @@ -138,7 +138,7 @@ func TestFSShutdown(t *testing.T) { // Test Shutdown with faulty disk fs, disk = prepareTest() - fs.DeleteObject(GlobalContext, bucketName, objectName) + fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}) os.RemoveAll(disk) if err := fs.Shutdown(GlobalContext); err != nil { t.Fatal("Got unexpected fs shutdown error: ", err) @@ -155,12 +155,12 @@ func TestFSGetBucketInfo(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - err := obj.MakeBucketWithLocation(GlobalContext, "a", "", false) + err := obj.MakeBucketWithLocation(GlobalContext, "a", BucketOptions{}) if !isSameType(err, BucketNameInvalid{}) { t.Fatal("BucketNameInvalid error not returned") } - err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) if err != nil { t.Fatal(err) } @@ -199,7 +199,7 @@ func TestFSPutObject(t *testing.T) { bucketName := "bucket" objectName := "1/2/3/4/object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } @@ -267,33 +267,33 @@ func TestFSDeleteObject(t *testing.T) { bucketName := "bucket" objectName := "object" - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) // Test with invalid bucket name - if err := fs.DeleteObject(GlobalContext, "fo", objectName); !isSameType(err, BucketNameInvalid{}) { + if _, err := fs.DeleteObject(GlobalContext, "fo", objectName, ObjectOptions{}); !isSameType(err, BucketNameInvalid{}) { t.Fatal("Unexpected error: ", err) } // Test with bucket does not exist - if err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject"); !isSameType(err, BucketNotFound{}) { + if _, err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject", ObjectOptions{}); !isSameType(err, BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with invalid object name - if err := fs.DeleteObject(GlobalContext, bucketName, "\\"); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { + if _, err := fs.DeleteObject(GlobalContext, bucketName, "\\", ObjectOptions{}); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { t.Fatal("Unexpected error: ", err) } // Test with object does not exist. - if err := fs.DeleteObject(GlobalContext, bucketName, "foooobject"); !isSameType(err, ObjectNotFound{}) { + if _, err := fs.DeleteObject(GlobalContext, bucketName, "foooobject", ObjectOptions{}); !isSameType(err, ObjectNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with valid condition - if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { + if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { t.Fatal("Unexpected error: ", err) } // Delete object should err disk not found. os.RemoveAll(disk) - if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { + if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { if !isSameType(err, BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } @@ -311,7 +311,7 @@ func TestFSDeleteBucket(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) if err != nil { t.Fatal("Unexpected error: ", err) } @@ -330,7 +330,7 @@ func TestFSDeleteBucket(t *testing.T) { t.Fatal("Unexpected error: ", err) } - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) // Delete bucket should get error disk not found. os.RemoveAll(disk) @@ -351,7 +351,7 @@ func TestFSListBuckets(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected error: ", err) } @@ -389,7 +389,7 @@ func TestFSHealObject(t *testing.T) { defer os.RemoveAll(disk) obj := initFSObjects(disk, t) - _, err := obj.HealObject(GlobalContext, "bucket", "object", madmin.HealOpts{}) + _, err := obj.HealObject(GlobalContext, "bucket", "object", "", madmin.HealOpts{}) if err == nil || !isSameType(err, NotImplemented{}) { t.Fatalf("Heal Object should return NotImplemented error ") } diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go index 0c31952f3..fbbc1a89b 100644 --- a/cmd/gateway-common.go +++ b/cmd/gateway-common.go @@ -55,42 +55,6 @@ var ( IsStringEqual = isStringEqual ) -// StatInfo - alias for statInfo -type StatInfo struct { - statInfo -} - -// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors. -func AnonErrToObjectErr(statusCode int, params ...string) error { - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - switch statusCode { - case http.StatusNotFound: - if object != "" { - return ObjectNotFound{bucket, object} - } - return BucketNotFound{Bucket: bucket} - case http.StatusBadRequest: - if object != "" { - return ObjectNameInvalid{bucket, object} - } - return BucketNameInvalid{Bucket: bucket} - case http.StatusForbidden: - fallthrough - case http.StatusUnauthorized: - return AllAccessDisabled{bucket, object} - } - - return errUnexpected -} - // FromMinioClientMetadata converts minio metadata to map[string]string func FromMinioClientMetadata(metadata map[string][]string) map[string]string { mm := map[string]string{} diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index 927a83c89..9823fec26 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -26,6 +26,7 @@ import ( bucketsse "github.com/minio/minio/pkg/bucket/encryption" "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/madmin" ) @@ -88,6 +89,12 @@ func (a GatewayUnsupported) GetMultipartInfo(ctx context.Context, bucket string, return MultipartInfo{}, NotImplemented{} } +// ListObjectVersions returns all object parts for specified object in specified bucket +func (a GatewayUnsupported) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + logger.LogIf(ctx, NotImplemented{}) + return ListObjectVersionsInfo{}, NotImplemented{} +} + // ListObjectParts returns all object parts for specified object in specified bucket func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) { logger.LogIf(ctx, NotImplemented{}) @@ -121,33 +128,45 @@ func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket strin return NotImplemented{} } -// SetBucketLifecycle sets lifecycle on bucket +// SetBucketVersioning enables versioning on a bucket. +func (a GatewayUnsupported) SetBucketVersioning(ctx context.Context, bucket string, v *versioning.Versioning) error { + logger.LogIf(ctx, NotImplemented{}) + return NotImplemented{} +} + +// GetBucketVersioning retrieves versioning configuration of a bucket. +func (a GatewayUnsupported) GetBucketVersioning(ctx context.Context, bucket string) (*versioning.Versioning, error) { + logger.LogIf(ctx, NotImplemented{}) + return nil, NotImplemented{} +} + +// SetBucketLifecycle enables lifecycle policies on a bucket. func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// GetBucketLifecycle will get lifecycle on bucket +// GetBucketLifecycle retrieves lifecycle configuration of a bucket. func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) { return nil, NotImplemented{} } -// DeleteBucketLifecycle deletes all lifecycle on bucket +// DeleteBucketLifecycle deletes all lifecycle policies on a bucket func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error { return NotImplemented{} } -// GetBucketSSEConfig returns bucket encryption config on given bucket +// GetBucketSSEConfig returns bucket encryption config on a bucket func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) { return nil, NotImplemented{} } -// SetBucketSSEConfig sets bucket encryption config on given bucket +// SetBucketSSEConfig sets bucket encryption config on a bucket func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error { return NotImplemented{} } -// DeleteBucketSSEConfig deletes bucket encryption config on given bucket +// DeleteBucketSSEConfig deletes bucket encryption config on a bucket func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error { return NotImplemented{} } @@ -173,7 +192,7 @@ func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []Buck } // HealObject - Not implemented stub -func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { +func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { return h, NotImplemented{} } @@ -188,7 +207,7 @@ func (a GatewayUnsupported) Walk(ctx context.Context, bucket, prefix string, res } // HealObjects - Not implemented stub -func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { +func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { return NotImplemented{} } @@ -205,19 +224,19 @@ func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*Metrics, error) { } // PutObjectTags - not implemented. -func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } // GetObjectTags - not implemented. -func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { logger.LogIf(ctx, NotImplemented{}) return nil, NotImplemented{} } // DeleteObjectTags - not implemented. -func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index b2ebf49c1..d5534a753 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -553,8 +553,8 @@ func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.Storag } // MakeBucketWithLocation - Create a new container on azure backend. -func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -966,21 +966,30 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des // DeleteObject - Deletes a blob on azure container, uses Azure // equivalent `BlobURL.Delete`. -func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string) error { +func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { blob := a.client.NewContainerURL(bucket).NewBlobURL(object) _, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) if err != nil { - return azureToObjectError(err, bucket, object) + return minio.ObjectInfo{}, azureToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = a.DeleteObject(ctx, bucket, object) + _, errs[idx] = a.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. diff --git a/cmd/gateway/azure/gateway-azure_test.go b/cmd/gateway/azure/gateway-azure_test.go index 449aa7d63..3fd55e2df 100644 --- a/cmd/gateway/azure/gateway-azure_test.go +++ b/cmd/gateway/azure/gateway-azure_test.go @@ -243,43 +243,6 @@ func TestAzureCodesToObjectError(t *testing.T) { } } -func TestAnonErrToObjectErr(t *testing.T) { - testCases := []struct { - name string - statusCode int - params []string - wantErr error - }{ - {"ObjectNotFound", - http.StatusNotFound, - []string{"testBucket", "testObject"}, - minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNotFound", - http.StatusNotFound, - []string{"testBucket", ""}, - minio.BucketNotFound{Bucket: "testBucket"}, - }, - {"ObjectNameInvalid", - http.StatusBadRequest, - []string{"testBucket", "testObject"}, - minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNameInvalid", - http.StatusBadRequest, - []string{"testBucket", ""}, - minio.BucketNameInvalid{Bucket: "testBucket"}, - }, - } - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { - t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) - } - }) - } -} - func TestCheckAzureUploadID(t *testing.T) { invalidUploadIDs := []string{ "123456789abcdefg", diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index 00df5c1df..33437b547 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -421,14 +421,15 @@ func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageI } // MakeBucketWithLocation - Create a new container on GCS backend. -func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } bkt := l.client.Bucket(bucket) // we'll default to the us multi-region in case of us-east-1 + location := opts.Location if location == "us-east-1" { location = "us" } @@ -958,22 +959,31 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject } // DeleteObject - Deletes a blob in bucket -func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error { +func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { err := l.client.Bucket(bucket).Object(object).Delete(ctx) if err != nil { logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, object) + return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) + _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // NewMultipartUpload - upload object in multiple parts diff --git a/cmd/gateway/hdfs/gateway-hdfs.go b/cmd/gateway/hdfs/gateway-hdfs.go index d7c74734d..eb2fe40ad 100644 --- a/cmd/gateway/hdfs/gateway-hdfs.go +++ b/cmd/gateway/hdfs/gateway-hdfs.go @@ -75,7 +75,7 @@ EXAMPLES: {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 + {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 @@ -283,8 +283,8 @@ func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, forceDele return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket) } -func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -439,16 +439,26 @@ func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continu }, nil } -func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string) error { - return hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) +func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { + err := hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, err } -func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = n.DeleteObject(ctx, bucket, object) + _, errs[idx] = n.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go index 7cb172da1..a89af2a0e 100644 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ b/cmd/gateway/s3/gateway-s3-sse.go @@ -258,8 +258,8 @@ func getPartMetaPath(object, uploadID string, partID int) string { } // deletes the custom dare metadata file saved at the backend -func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) error { - return l.s3Objects.DeleteObject(ctx, bucket, metaFileName) +func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) (minio.ObjectInfo, error) { + return l.s3Objects.DeleteObject(ctx, bucket, metaFileName, minio.ObjectOptions{}) } func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { @@ -381,14 +381,14 @@ func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObje // DeleteObject deletes a blob in bucket // For custom gateway encrypted large objects, cleans up encrypted content and metadata files // from the backend. -func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string) error { - +func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { // Get dare meta json if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil { - return l.s3Objects.DeleteObject(ctx, bucket, object) + logger.LogIf(minio.GlobalContext, err) + return l.s3Objects.DeleteObject(ctx, bucket, object, opts) } // delete encrypted object - l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object)) + l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object), opts) return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) } @@ -446,7 +446,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri } if opts.ServerSideEncryption == nil { defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) + defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined}) } @@ -470,7 +470,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri } objInfo = gwMeta.ToObjectInfo(bucket, object) // delete any unencrypted content of the same name created previously - l.s3Objects.DeleteObject(ctx, bucket, object) + l.s3Objects.DeleteObject(ctx, bucket, object, opts) return objInfo, nil } @@ -586,7 +586,7 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, return minio.InvalidUploadID{UploadID: uploadID} } for _, obj := range loi.Objects { - if err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name); err != nil { + if _, err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name, minio.ObjectOptions{}); err != nil { return minio.ErrorRespToObjectError(err) } startAfter = obj.Name @@ -608,7 +608,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje if e == nil { // delete any encrypted version of object that might exist defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) + defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) } return oi, e } @@ -640,7 +640,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje } //delete any unencrypted version of object that might be on the backend - defer l.s3Objects.DeleteObject(ctx, bucket, object) + defer l.s3Objects.DeleteObject(ctx, bucket, object, opts) // Save the final object size and modtime. gwMeta.Stat.Size = objectSize @@ -665,7 +665,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje break } startAfter = obj.Name - l.s3Objects.DeleteObject(ctx, bucket, obj.Name) + l.s3Objects.DeleteObject(ctx, bucket, obj.Name, opts) } continuationToken = loi.NextContinuationToken if !loi.IsTruncated || done { @@ -716,7 +716,7 @@ func (l *s3EncObjects) cleanupStaleEncMultipartUploadsOnGW(ctx context.Context, for _, b := range buckets { expParts := l.getStalePartsForBucket(ctx, b.Name, expiry) for k := range expParts { - l.s3Objects.DeleteObject(ctx, b.Name, k) + l.s3Objects.DeleteObject(ctx, b.Name, k, minio.ObjectOptions{}) } } } @@ -783,7 +783,7 @@ func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string, forceDel } } for k := range expParts { - l.s3Objects.DeleteObject(ctx, bucket, k) + l.s3Objects.DeleteObject(ctx, bucket, k, minio.ObjectOptions{}) } err := l.Client.RemoveBucket(bucket) if err != nil { diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go index 61ab1ee2e..4f3d7887d 100644 --- a/cmd/gateway/s3/gateway-s3.go +++ b/cmd/gateway/s3/gateway-s3.go @@ -287,8 +287,8 @@ func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageIn } // MakeBucket creates a new container on S3 backend. -func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -302,7 +302,7 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location if s3utils.CheckValidBucketName(bucket) != nil { return minio.BucketNameInvalid{Bucket: bucket} } - err := l.Client.MakeBucket(bucket, location) + err := l.Client.MakeBucket(bucket, opts.Location) if err != nil { return minio.ErrorRespToObjectError(err, bucket) } @@ -518,21 +518,30 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject } // DeleteObject deletes a blob in bucket -func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { +func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { err := l.Client.RemoveObject(bucket, object) if err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) + return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) + _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // ListMultipartUploads lists all multipart uploads. @@ -700,11 +709,10 @@ func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error } // GetObjectTags gets the tags set on the object -func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string) (*tags.Tags, error) { +func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (*tags.Tags, error) { var err error var tagObj *tags.Tags var tagStr string - var opts minio.ObjectOptions if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil { return nil, minio.ErrorRespToObjectError(err, bucket, object) @@ -721,7 +729,7 @@ func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object str } // PutObjectTags attaches the tags to the object -func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string) error { +func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string, opts minio.ObjectOptions) error { tagObj, err := tags.Parse(tagStr, true) if err != nil { return minio.ErrorRespToObjectError(err, bucket, object) @@ -733,7 +741,7 @@ func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, ta } // DeleteObjectTags removes the tags attached to the object -func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string, opts minio.ObjectOptions) error { if err := l.Client.RemoveObjectTagging(bucket, object); err != nil { return minio.ErrorRespToObjectError(err, bucket, object) } diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 0b07ac62f..85953c171 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -103,7 +103,7 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool { length := len(key) + len(header.Get(key)) size += length for _, prefix := range userMetadataKeyPrefixes { - if HasPrefix(key, prefix) { + if strings.HasPrefix(strings.ToLower(key), prefix) { usersize += length break } @@ -444,74 +444,75 @@ func setIgnoreResourcesHandler(h http.Handler) http.Handler { return resourceHandler{h} } +var supportedDummyBucketAPIs = map[string][]string{ + "acl": {http.MethodPut, http.MethodGet}, + "cors": {http.MethodGet}, + "website": {http.MethodGet, http.MethodDelete}, + "logging": {http.MethodGet}, + "accelerate": {http.MethodGet}, + "replication": {http.MethodGet}, + "requestPayment": {http.MethodGet}, +} + +// List of not implemented bucket queries +var notImplementedBucketResourceNames = map[string]struct{}{ + "cors": {}, + "metrics": {}, + "website": {}, + "logging": {}, + "inventory": {}, + "accelerate": {}, + "replication": {}, + "requestPayment": {}, +} + // Checks requests for not implemented Bucket resources func ignoreNotImplementedBucketResources(req *http.Request) bool { for name := range req.URL.Query() { - // Enable PutBucketACL, GetBucketACL, GetBucketCors, - // GetBucketWebsite, GetBucketAcccelerate, - // GetBucketRequestPayment, GetBucketLogging, - // GetBucketLifecycle, GetBucketReplication, - // GetBucketTagging, GetBucketVersioning, - // DeleteBucketTagging, and DeleteBucketWebsite - // dummy calls specifically. - if name == "acl" && req.Method == http.MethodPut { - return false - } - if ((name == "acl" || - name == "cors" || - name == "website" || - name == "accelerate" || - name == "requestPayment" || - name == "logging" || - name == "lifecycle" || - name == "replication" || - name == "tagging" || - name == "versioning") && req.Method == http.MethodGet) || - ((name == "tagging" || - name == "website") && req.Method == http.MethodDelete) { - return false + methods, ok := supportedDummyBucketAPIs[name] + if ok { + for _, method := range methods { + if method == req.Method { + return false + } + } } - if notImplementedBucketResourceNames[name] { + if _, ok := notImplementedBucketResourceNames[name]; ok { return true } } return false } +var supportedDummyObjectAPIs = map[string][]string{ + "acl": {http.MethodGet, http.MethodPut}, +} + +// List of not implemented object APIs +var notImplementedObjectResourceNames = map[string]struct{}{ + "restore": {}, + "torrent": {}, +} + // Checks requests for not implemented Object resources func ignoreNotImplementedObjectResources(req *http.Request) bool { for name := range req.URL.Query() { - // Enable Get/PutObjectACL dummy call specifically. - if name == "acl" && (req.Method == http.MethodGet || req.Method == http.MethodPut) { - return false + methods, ok := supportedDummyObjectAPIs[name] + if ok { + for _, method := range methods { + if method == req.Method { + return false + } + } } - if notImplementedObjectResourceNames[name] { + if _, ok := notImplementedObjectResourceNames[name]; ok { return true } } return false } -// List of not implemented bucket queries -var notImplementedBucketResourceNames = map[string]bool{ - "accelerate": true, - "cors": true, - "inventory": true, - "logging": true, - "metrics": true, - "replication": true, - "requestPayment": true, - "versioning": true, - "website": true, -} - -// List of not implemented object queries -var notImplementedObjectResourceNames = map[string]bool{ - "restore": true, - "torrent": true, -} - // Resource handler ServeHTTP() wrapper func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bucketName, objectName := request2BucketObjectName(r) diff --git a/cmd/generic-handlers_test.go b/cmd/generic-handlers_test.go index b91ce282c..1a6f4f11f 100644 --- a/cmd/generic-handlers_test.go +++ b/cmd/generic-handlers_test.go @@ -199,12 +199,16 @@ var containsReservedMetadataTests = []struct { } func TestContainsReservedMetadata(t *testing.T) { - for i, test := range containsReservedMetadataTests { - if contains := containsReservedMetadata(test.header); contains && !test.shouldFail { - t.Errorf("Test %d: contains reserved header but should not fail", i) - } else if !contains && test.shouldFail { - t.Errorf("Test %d: does not contain reserved header but failed", i) - } + for _, test := range containsReservedMetadataTests { + test := test + t.Run("", func(t *testing.T) { + contains := containsReservedMetadata(test.header) + if contains && !test.shouldFail { + t.Errorf("contains reserved header but should not fail") + } else if !contains && test.shouldFail { + t.Errorf("does not contain reserved header but failed") + } + }) } } diff --git a/cmd/global-heal.go b/cmd/global-heal.go index 7309eb7db..d11f591c8 100644 --- a/cmd/global-heal.go +++ b/cmd/global-heal.go @@ -79,7 +79,7 @@ func getLocalBackgroundHealStatus() madmin.BgHealState { } // healErasureSet lists and heals all objects in a specific erasure set -func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesPerSet int) error { +func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, drivesPerSet int) error { buckets, err := xlObj.ListBuckets(ctx) if err != nil { return err @@ -105,32 +105,34 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP for _, bucket := range buckets { // Heal current bucket bgSeq.sourceCh <- healSource{ - path: bucket.Name, + bucket: bucket.Name, } - var entryChs []FileInfoCh + var entryChs []FileInfoVersionsCh for _, disk := range xlObj.getLoadBalancedDisks() { if disk == nil { // Disk can be offline continue } - entryCh, err := disk.Walk(bucket.Name, "", "", true, xlMetaJSONFile, readMetadata, ctx.Done()) + + entryCh, err := disk.WalkVersions(bucket.Name, "", "", true, ctx.Done()) if err != nil { // Disk walk returned error, ignore it. continue } - entryChs = append(entryChs, FileInfoCh{ + + entryChs = append(entryChs, FileInfoVersionsCh{ Ch: entryCh, }) } entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { - return nil + break } if quorumCount == drivesPerSet { @@ -138,8 +140,12 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP continue } - bgSeq.sourceCh <- healSource{ - path: pathJoin(bucket.Name, entry.Name), + for _, version := range entry.Versions { + bgSeq.sourceCh <- healSource{ + bucket: bucket.Name, + object: version.Name, + versionID: version.VersionID, + } } } } @@ -148,13 +154,15 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP } // deepHealObject heals given object path in deep to fix bitrot. -func deepHealObject(objectPath string) { +func deepHealObject(bucket, object, versionID string) { // Get background heal sequence to send elements to heal bgSeq, _ := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) bgSeq.sourceCh <- healSource{ - path: objectPath, - opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan}, + bucket: bucket, + object: object, + versionID: versionID, + opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan}, } } @@ -172,7 +180,7 @@ func durationToNextHealRound(lastHeal time.Time) time.Duration { } // Healing leader will take the charge of healing all erasure sets -func execLeaderTasks(ctx context.Context, z *xlZones) { +func execLeaderTasks(ctx context.Context, z *erasureZones) { // So that we don't heal immediately, but after one month. lastScanTime := UTCNow() // Get background heal sequence to send elements to heal @@ -211,7 +219,7 @@ func execLeaderTasks(ctx context.Context, z *xlZones) { } func startGlobalHeal(ctx context.Context, objAPI ObjectLayer) { - zones, ok := objAPI.(*xlZones) + zones, ok := objAPI.(*erasureZones) if !ok { return } diff --git a/cmd/globals.go b/cmd/globals.go index fae732fc9..585380222 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -61,8 +61,8 @@ const ( globalNetBSDOSName = "netbsd" globalMacOSName = "darwin" globalMinioModeFS = "mode-server-fs" - globalMinioModeXL = "mode-server-xl" - globalMinioModeDistXL = "mode-server-distributed-xl" + globalMinioModeErasure = "mode-server-xl" + globalMinioModeDistErasure = "mode-server-distributed-xl" globalMinioModeGatewayPrefix = "mode-gateway-" // Add new global values here. @@ -107,13 +107,13 @@ var globalCLIContext = struct { var ( // Indicates set drive count. - globalXLSetDriveCount int + globalErasureSetDriveCount int // Indicates if the running minio server is distributed setup. - globalIsDistXL = false + globalIsDistErasure = false // Indicates if the running minio server is an erasure-code backend. - globalIsXL = false + globalIsErasure = false // Indicates if the running minio is in gateway mode. globalIsGateway = false @@ -215,6 +215,7 @@ var ( globalBucketObjectLockSys *BucketObjectLockSys globalBucketQuotaSys *BucketQuotaSys + globalBucketVersioningSys *BucketVersioningSys // Disk cache drives globalCacheConfig cache.Config diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index cb2baab90..f013ea614 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -445,7 +445,7 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) { // gets host name for current node func getHostName(r *http.Request) (hostName string) { - if globalIsDistXL { + if globalIsDistErasure { hostName = GetLocalPeer(globalEndpoints) } else { hostName = r.Host diff --git a/cmd/http-tracer.go b/cmd/http-tracer.go index 01ac76a26..fe97dcaa1 100644 --- a/cmd/http-tracer.go +++ b/cmd/http-tracer.go @@ -114,7 +114,7 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ reqBodyRecorder = &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders} r.Body = ioutil.NopCloser(reqBodyRecorder) t.NodeName = r.Host - if globalIsDistXL { + if globalIsDistErasure { t.NodeName = GetLocalPeer(globalEndpoints) } // strip port from the host address diff --git a/cmd/http/headers.go b/cmd/http/headers.go index 969db491b..a9c40185b 100644 --- a/cmd/http/headers.go +++ b/cmd/http/headers.go @@ -56,6 +56,10 @@ const ( // S3 storage class AmzStorageClass = "x-amz-storage-class" + // S3 object version ID + AmzVersionID = "x-amz-version-id" + AmzDeleteMarker = "x-amz-delete-marker" + // S3 object tagging AmzObjectTagging = "X-Amz-Tagging" AmzTagCount = "x-amz-tagging-count" diff --git a/cmd/iam.go b/cmd/iam.go index 18081ae68..dfbc0a01e 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -469,7 +469,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) { } // These messages only meant primarily for distributed setup, so only log during distributed setup. - if globalIsDistXL { + if globalIsDistErasure { logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. lock acquired") } diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index 147994b86..d925aeef3 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -270,10 +270,10 @@ func lockMaintenance(ctx context.Context, interval time.Duration) error { } // Read locks we assume quorum for be N/2 success - quorum := globalXLSetDriveCount / 2 + quorum := globalErasureSetDriveCount / 2 if nlrip.lri.Writer { // For write locks we need N/2+1 success - quorum = globalXLSetDriveCount/2 + 1 + quorum = globalErasureSetDriveCount/2 + 1 } // less than the quorum, we have locks expired. diff --git a/cmd/merge-walk-pool.go b/cmd/merge-walk-pool.go index b30be139b..309c1bca9 100644 --- a/cmd/merge-walk-pool.go +++ b/cmd/merge-walk-pool.go @@ -26,6 +26,13 @@ const ( globalMergeLookupTimeout = time.Minute * 1 // 1 minutes. ) +// mergeWalkVersions - represents the go routine that does the merge walk versions. +type mergeWalkVersions struct { + entryChs []FileInfoVersionsCh + endWalkCh chan struct{} // To signal when mergeWalk go-routine should end. + endTimerCh chan<- struct{} // To signal when timer go-routine should end. +} + // mergeWalk - represents the go routine that does the merge walk. type mergeWalk struct { entryChs []FileInfoCh @@ -33,6 +40,103 @@ type mergeWalk struct { endTimerCh chan<- struct{} // To signal when timer go-routine should end. } +// MergeWalkVersionsPool - pool of mergeWalk go routines. +// A mergeWalk is added to the pool by Set() and removed either by +// doing a Release() or if the concerned timer goes off. +// mergeWalkPool's purpose is to maintain active mergeWalk go-routines in a map so that +// it can be looked up across related list calls. +type MergeWalkVersionsPool struct { + sync.Mutex + pool map[listParams][]mergeWalkVersions + timeOut time.Duration +} + +// NewMergeWalkVersionsPool - initialize new tree walk pool for versions. +func NewMergeWalkVersionsPool(timeout time.Duration) *MergeWalkVersionsPool { + tPool := &MergeWalkVersionsPool{ + pool: make(map[listParams][]mergeWalkVersions), + timeOut: timeout, + } + return tPool +} + +// Release - similar to mergeWalkPool.Release but for versions. +func (t *MergeWalkVersionsPool) Release(params listParams) ([]FileInfoVersionsCh, chan struct{}) { + t.Lock() + defer t.Unlock() + walks, ok := t.pool[params] // Pick the valid walks. + if !ok || len(walks) == 0 { + // Release return nil if params not found. + return nil, nil + } + + // Pop out the first valid walk entry. + walk := walks[0] + walks = walks[1:] + if len(walks) > 0 { + t.pool[params] = walks + } else { + delete(t.pool, params) + } + walk.endTimerCh <- struct{}{} + return walk.entryChs, walk.endWalkCh +} + +// Set - similar to mergeWalkPool.Set but for file versions +func (t *MergeWalkVersionsPool) Set(params listParams, resultChs []FileInfoVersionsCh, endWalkCh chan struct{}) { + t.Lock() + defer t.Unlock() + + // Should be a buffered channel so that Release() never blocks. + endTimerCh := make(chan struct{}, 1) + + walkInfo := mergeWalkVersions{ + entryChs: resultChs, + endWalkCh: endWalkCh, + endTimerCh: endTimerCh, + } + + // Append new walk info. + t.pool[params] = append(t.pool[params], walkInfo) + + // Timer go-routine which times out after t.timeOut seconds. + go func(endTimerCh <-chan struct{}, walkInfo mergeWalkVersions) { + select { + // Wait until timeOut + case <-time.After(t.timeOut): + // Timeout has expired. Remove the mergeWalk from mergeWalkPool and + // end the mergeWalk go-routine. + t.Lock() + walks, ok := t.pool[params] + if ok { + // Trick of filtering without allocating + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + nwalks := walks[:0] + // Look for walkInfo, remove it from the walks list. + for _, walk := range walks { + if !reflect.DeepEqual(walk, walkInfo) { + nwalks = append(nwalks, walk) + } + } + if len(nwalks) == 0 { + // No more mergeWalk go-routines associated with listParams + // hence remove map entry. + delete(t.pool, params) + } else { + // There are more mergeWalk go-routines associated with listParams + // hence save the list in the map. + t.pool[params] = nwalks + } + } + // Signal the mergeWalk go-routine to die. + close(endWalkCh) + t.Unlock() + case <-endTimerCh: + return + } + }(endTimerCh, walkInfo) +} + // MergeWalkPool - pool of mergeWalk go routines. // A mergeWalk is added to the pool by Set() and removed either by // doing a Release() or if the concerned timer goes off. @@ -84,7 +188,7 @@ func (t *MergeWalkPool) Release(params listParams) ([]FileInfoCh, chan struct{}) // 1) time.After() expires after t.timeOut seconds. // The expiration is needed so that the mergeWalk go-routine resources are freed after a timeout // if the S3 client does only partial listing of objects. -// 2) Relase() signals the timer go-routine to end on endTimerCh. +// 2) Release() signals the timer go-routine to end on endTimerCh. // During listing the timer should not timeout and end the mergeWalk go-routine, hence the // timer go-routine should be ended. func (t *MergeWalkPool) Set(params listParams, resultChs []FileInfoCh, endWalkCh chan struct{}) { diff --git a/cmd/metrics.go b/cmd/metrics.go index b534cf8af..b5c8ec87b 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -97,7 +97,7 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { // collects healing specific metrics for MinIO instance in Prometheus specific format // and sends to given channel func healingMetricsPrometheus(ch chan<- prometheus.Metric) { - if !globalIsXL { + if !globalIsErasure { return } bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 23f0b32a2..42df2a2c5 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -45,11 +45,11 @@ type RWLocker interface { } // newNSLock - return a new name space lock map. -func newNSLock(isDistXL bool) *nsLockMap { +func newNSLock(isDistErasure bool) *nsLockMap { nsMutex := nsLockMap{ - isDistXL: isDistXL, + isDistErasure: isDistErasure, } - if isDistXL { + if isDistErasure { return &nsMutex } nsMutex.lockMap = make(map[string]*nsLock) @@ -66,9 +66,9 @@ type nsLock struct { // Unlock, RLock and RUnlock. type nsLockMap struct { // Indicates if namespace is part of a distributed setup. - isDistXL bool - lockMap map[string]*nsLock - lockMapMutex sync.Mutex + isDistErasure bool + lockMap map[string]*nsLock + lockMapMutex sync.Mutex } // Lock the namespace resource. @@ -190,7 +190,7 @@ type localLockInstance struct { // volume, path and operation ID. func (n *nsLockMap) NewNSLock(ctx context.Context, lockersFn func() []dsync.NetLocker, volume string, paths ...string) RWLocker { opsID := mustGetUUID() - if n.isDistXL { + if n.isDistErasure { drwmutex := dsync.NewDRWMutex(ctx, &dsync.Dsync{ GetLockersFn: lockersFn, }, pathsJoinPrefix(volume, paths...)...) diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go index 4ad887936..98601250b 100644 --- a/cmd/naughty-disk_test.go +++ b/cmd/naughty-disk_test.go @@ -142,18 +142,25 @@ func (d *naughtyDisk) WalkSplunk(volume, path, marker string, endWalkCh <-chan s return d.disk.WalkSplunk(volume, path, marker, endWalkCh) } -func (d *naughtyDisk) Walk(volume, path, marker string, recursive bool, leafFile string, readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { +func (d *naughtyDisk) WalkVersions(volume, path, marker string, recursive bool, endWalkVersionsCh <-chan struct{}) (chan FileInfoVersions, error) { if err := d.calcError(); err != nil { return nil, err } - return d.disk.Walk(volume, path, marker, recursive, leafFile, readMetadataFn, endWalkCh) + return d.disk.WalkVersions(volume, path, marker, recursive, endWalkVersionsCh) } -func (d *naughtyDisk) ListDir(volume, path string, count int, leafFile string) (entries []string, err error) { +func (d *naughtyDisk) Walk(volume, path, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if err := d.calcError(); err != nil { + return nil, err + } + return d.disk.Walk(volume, path, marker, recursive, endWalkCh) +} + +func (d *naughtyDisk) ListDir(volume, path string, count int) (entries []string, err error) { if err := d.calcError(); err != nil { return []string{}, err } - return d.disk.ListDir(volume, path, count, leafFile) + return d.disk.ListDir(volume, path, count) } func (d *naughtyDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { @@ -184,6 +191,13 @@ func (d *naughtyDisk) AppendFile(volume, path string, buf []byte) error { return d.disk.AppendFile(volume, path, buf) } +func (d *naughtyDisk) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error { + if err := d.calcError(); err != nil { + return err + } + return d.disk.RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath) +} + func (d *naughtyDisk) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { if err := d.calcError(); err != nil { return err @@ -191,11 +205,18 @@ func (d *naughtyDisk) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) return d.disk.RenameFile(srcVolume, srcPath, dstVolume, dstPath) } -func (d *naughtyDisk) StatFile(volume string, path string) (file FileInfo, err error) { +func (d *naughtyDisk) CheckParts(volume string, path string, fi FileInfo) (err error) { if err := d.calcError(); err != nil { - return FileInfo{}, err + return err } - return d.disk.StatFile(volume, path) + return d.disk.CheckParts(volume, path, fi) +} + +func (d *naughtyDisk) CheckFile(volume string, path string) (err error) { + if err := d.calcError(); err != nil { + return err + } + return d.disk.CheckFile(volume, path) } func (d *naughtyDisk) DeleteFile(volume string, path string) (err error) { @@ -205,19 +226,36 @@ func (d *naughtyDisk) DeleteFile(volume string, path string) (err error) { return d.disk.DeleteFile(volume, path) } -func (d *naughtyDisk) DeleteFileBulk(volume string, paths []string) ([]error, error) { - errs := make([]error, len(paths)) - for idx, path := range paths { - errs[idx] = d.disk.DeleteFile(volume, path) +func (d *naughtyDisk) DeleteVersions(volume string, versions []FileInfo) []error { + if err := d.calcError(); err != nil { + errs := make([]error, len(versions)) + for i := range errs { + errs[i] = err + } + return errs } - return errs, nil + return d.disk.DeleteVersions(volume, versions) } -func (d *naughtyDisk) DeletePrefixes(volume string, paths []string) ([]error, error) { +func (d *naughtyDisk) WriteMetadata(volume, path string, fi FileInfo) (err error) { if err := d.calcError(); err != nil { - return nil, err + return err } - return d.disk.DeletePrefixes(volume, paths) + return d.disk.WriteMetadata(volume, path, fi) +} + +func (d *naughtyDisk) DeleteVersion(volume string, path string, fi FileInfo) (err error) { + if err := d.calcError(); err != nil { + return err + } + return d.disk.DeleteVersion(volume, path, fi) +} + +func (d *naughtyDisk) ReadVersion(volume string, path string, versionID string) (fi FileInfo, err error) { + if err := d.calcError(); err != nil { + return FileInfo{}, err + } + return d.disk.ReadVersion(volume, path, versionID) } func (d *naughtyDisk) WriteAll(volume string, path string, reader io.Reader) (err error) { @@ -234,9 +272,9 @@ func (d *naughtyDisk) ReadAll(volume string, path string) (buf []byte, err error return d.disk.ReadAll(volume, path) } -func (d *naughtyDisk) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { +func (d *naughtyDisk) VerifyFile(volume, path string, fi FileInfo) error { if err := d.calcError(); err != nil { return err } - return d.disk.VerifyFile(volume, path, size, algo, sum, shardSize) + return d.disk.VerifyFile(volume, path, fi) } diff --git a/cmd/notification.go b/cmd/notification.go index 37fa8e323..c68eeabf3 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -628,7 +628,7 @@ func (sys *NotificationSys) load(buckets []BucketInfo, objAPI ObjectLayer) error return nil } -// Init - initializes notification system from notification.xml and listener.json of all buckets. +// Init - initializes notification system from notification.xml and listenxl.meta of all buckets. func (sys *NotificationSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error { if objAPI == nil { return errServerNotInitialized @@ -1247,7 +1247,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event { }, Object: event.Object{ Key: keyName, - VersionID: "1", + VersionID: args.Object.VersionID, Sequencer: uniqueID, }, }, diff --git a/cmd/obdinfo.go b/cmd/obdinfo.go index 2278dded2..b57d81a99 100644 --- a/cmd/obdinfo.go +++ b/cmd/obdinfo.go @@ -34,7 +34,7 @@ import ( func getLocalCPUOBDInfo(ctx context.Context, r *http.Request) madmin.ServerCPUOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -103,8 +103,9 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin } } wg.Wait() + addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(endpointZones) } if parallel { @@ -121,7 +122,7 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin func getLocalMemOBD(ctx context.Context, r *http.Request) madmin.ServerMemOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -150,7 +151,7 @@ func getLocalMemOBD(ctx context.Context, r *http.Request) madmin.ServerMemOBDInf func getLocalProcOBD(ctx context.Context, r *http.Request) madmin.ServerProcOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -371,7 +372,7 @@ func getLocalProcOBD(ctx context.Context, r *http.Request) madmin.ServerProcOBDI func getLocalOsInfoOBD(ctx context.Context, r *http.Request) madmin.ServerOsOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/obdinfo_freebsd.go b/cmd/obdinfo_freebsd.go index 845b17ff0..8aab2895f 100644 --- a/cmd/obdinfo_freebsd.go +++ b/cmd/obdinfo_freebsd.go @@ -26,7 +26,7 @@ import ( func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/obdinfo_other.go b/cmd/obdinfo_other.go index 44645bbe5..0b323b3af 100644 --- a/cmd/obdinfo_other.go +++ b/cmd/obdinfo_other.go @@ -30,7 +30,7 @@ import ( func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index 6811d851d..55eb73659 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -82,11 +82,11 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string // Depending on the disk type network or local, initialize storage API. func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) { if endpoint.IsLocal { - storage, err := newPosix(endpoint.Path, endpoint.Host) + storage, err := newXLStorage(endpoint.Path, endpoint.Host) if err != nil { return nil, err } - return &posixDiskIDCheck{storage: storage}, nil + return &xlStorageDiskIDCheck{storage: storage}, nil } return newStorageRESTClient(endpoint), nil @@ -105,7 +105,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) } // If it's a directory, list and call delFunc() for each entry. - entries, err := storage.ListDir(volume, entryPath, -1, "") + entries, err := storage.ListDir(volume, entryPath, -1) // If entryPath prefix never existed, safe to ignore. if err == errFileNotFound { return nil @@ -165,7 +165,7 @@ func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter // ignore quorum error as it might be an entry from an outdated disk. if IsErrIgnored(err, []error{ errFileNotFound, - errXLReadQuorum, + errErasureReadQuorum, }...) { continue } @@ -358,7 +358,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d // ignore quorum error as it might be an entry from an outdated disk. if IsErrIgnored(err, []error{ errFileNotFound, - errXLReadQuorum, + errErasureReadQuorum, }...) { continue } diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 926d306ae..815092a9b 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -160,6 +160,17 @@ type ObjectInfo struct { // Hex encoded unique entity tag of the object. ETag string + // Version ID of this object. + VersionID string + + // IsLatest indicates if this is the latest current version + // latest can be true for delete marker or a version. + IsLatest bool + + // DeleteMarker indicates if the versionId corresponds + // to a delete marker on an object. + DeleteMarker bool + // A standard MIME type describing the format of the object. ContentType string @@ -317,6 +328,53 @@ type ListMultipartsInfo struct { EncodingType string // Not supported yet. } +// DeletedObjectInfo - container for list objects versions deleted objects. +type DeletedObjectInfo struct { + // Name of the bucket. + Bucket string + + // Name of the object. + Name string + + // Date and time when the object was last modified. + ModTime time.Time + + // Version ID of this object. + VersionID string + + // Indicates the deleted marker is latest + IsLatest bool +} + +// ListObjectVersionsInfo - container for list objects versions. +type ListObjectVersionsInfo struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response is true), + // you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, + // MinIO always returns NextMarker. + NextMarker string + + // NextVersionIDMarker may be set of IsTruncated is true + NextVersionIDMarker string + + // List of objects info for this request. + Objects []ObjectInfo + + // List of deleted objects for this request. + DeleteObjects []DeletedObjectInfo + + // List of prefixes for this request. + Prefixes []string +} + // ListObjectsInfo - container for list objects. type ListObjectsInfo struct { // Indicates whether the returned list objects response is truncated. A diff --git a/cmd/object-api-deleteobject_test.go b/cmd/object-api-deleteobject_test.go index c3844fad8..fbf537b80 100644 --- a/cmd/object-api-deleteobject_test.go +++ b/cmd/object-api-deleteobject_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -// Wrapper for calling DeleteObject tests for both XL multiple disks and single node setup. +// Wrapper for calling DeleteObject tests for both Erasure multiple disks and single node setup. func TestDeleteObject(t *testing.T) { ExecObjectLayerTest(t, testDeleteObject) } @@ -74,7 +74,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { "dir/", []string{"dir/object1", "object0"}, }, - // Test 4: Remove an empty directory and checks it is really removed + // Test 5: Remove an empty directory and checks it is really removed { "bucket5", []objectUpload{{"object0", "content"}, {"dir/", ""}}, @@ -84,8 +84,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } for i, testCase := range testCases { - - err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } @@ -99,16 +98,17 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } } - // TODO: check the error in the future - _ = obj.DeleteObject(context.Background(), testCase.bucketName, testCase.pathToDelete) + _, _ = obj.DeleteObject(context.Background(), testCase.bucketName, testCase.pathToDelete, ObjectOptions{}) result, err := obj.ListObjects(context.Background(), testCase.bucketName, "", "", "", 1000) if err != nil { t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, err.Error()) + continue } if len(result.Objects) != len(testCase.objectsAfterDelete) { - t.Errorf("Test %d: %s: mismatch number of objects after delete, expected = %d, found = %d", i+1, instanceType, len(testCase.objectsAfterDelete), len(result.Objects)) + t.Errorf("Test %d: %s: mismatch number of objects after delete, expected = %v, found = %v", i+1, instanceType, testCase.objectsAfterDelete, result.Objects) + continue } for idx := range result.Objects { diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index fdc09c90b..5a7c041a5 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -66,6 +66,28 @@ func toObjectErr(err error, params ...string) error { Object: params[1], } } + case errFileVersionNotFound: + switch len(params) { + case 2: + err = VersionNotFound{ + Bucket: params[0], + Object: params[1], + } + case 3: + err = VersionNotFound{ + Bucket: params[0], + Object: params[1], + VersionID: params[2], + } + } + case errMethodNotAllowed: + switch len(params) { + case 2: + err = MethodNotAllowed{ + Bucket: params[0], + Object: params[1], + } + } case errFileNotFound: switch len(params) { case 2: @@ -101,9 +123,9 @@ func toObjectErr(err error, params ...string) error { Object: params[1], } } - case errXLReadQuorum: + case errErasureReadQuorum: err = InsufficientReadQuorum{} - case errXLWriteQuorum: + case errErasureWriteQuorum: err = InsufficientWriteQuorum{} case io.ErrUnexpectedEOF, io.ErrShortWrite: err = IncompleteBody{} @@ -150,8 +172,9 @@ func (e InsufficientWriteQuorum) Error() string { // GenericError - generic object layer error. type GenericError struct { - Bucket string - Object string + Bucket string + Object string + VersionID string } // BucketNotFound bucket does not exist. @@ -182,18 +205,32 @@ func (e BucketNotEmpty) Error() string { return "Bucket not empty: " + e.Bucket } +// VersionNotFound object does not exist. +type VersionNotFound GenericError + +func (e VersionNotFound) Error() string { + return "Version not found: " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")" +} + // ObjectNotFound object does not exist. type ObjectNotFound GenericError func (e ObjectNotFound) Error() string { - return "Object not found: " + e.Bucket + "#" + e.Object + return "Object not found: " + e.Bucket + "/" + e.Object +} + +// MethodNotAllowed on the object +type MethodNotAllowed GenericError + +func (e MethodNotAllowed) Error() string { + return "Method not allowed: " + e.Bucket + "/" + e.Object } // ObjectAlreadyExists object already exists. type ObjectAlreadyExists GenericError func (e ObjectAlreadyExists) Error() string { - return "Object: " + e.Bucket + "#" + e.Object + " already exists" + return "Object: " + e.Bucket + "/" + e.Object + " already exists" } // ObjectExistsAsDirectory object already exists as a directory. @@ -323,17 +360,17 @@ type ObjectNamePrefixAsSlash GenericError // Error returns string an error formatted as the given text. func (e ObjectNameInvalid) Error() string { - return "Object name invalid: " + e.Bucket + "#" + e.Object + return "Object name invalid: " + e.Bucket + "/" + e.Object } // Error returns string an error formatted as the given text. func (e ObjectNameTooLong) Error() string { - return "Object name too long: " + e.Bucket + "#" + e.Object + return "Object name too long: " + e.Bucket + "/" + e.Object } // Error returns string an error formatted as the given text. func (e ObjectNamePrefixAsSlash) Error() string { - return "Object name contains forward slash as pefix: " + e.Bucket + "#" + e.Object + return "Object name contains forward slash as pefix: " + e.Bucket + "/" + e.Object } // AllAccessDisabled All access to this object has been disabled @@ -349,7 +386,7 @@ type IncompleteBody GenericError // Error returns string an error formatted as the given text. func (e IncompleteBody) Error() string { - return e.Bucket + "#" + e.Object + "has incomplete body" + return e.Bucket + "/" + e.Object + "has incomplete body" } // InvalidRange - invalid range typed error. @@ -445,9 +482,14 @@ func (e InvalidETag) Error() string { } // NotImplemented If a feature is not implemented -type NotImplemented struct{} +type NotImplemented struct { + API string +} func (e NotImplemented) Error() string { + if e.API != "" { + return e.API + " is Not Implemented" + } return "Not Implemented" } diff --git a/cmd/object-api-getobject_test.go b/cmd/object-api-getobject_test.go index 3cded55fd..ecb10a711 100644 --- a/cmd/object-api-getobject_test.go +++ b/cmd/object-api-getobject_test.go @@ -29,7 +29,7 @@ import ( humanize "github.com/dustin/go-humanize" ) -// Wrapper for calling GetObject tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup. func TestGetObject(t *testing.T) { ExecObjectLayerTest(t, testGetObject) } @@ -42,7 +42,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { emptyDirName := "test-empty-dir/" // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -113,7 +113,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { {"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")}, // Test case - 5. // Case with invalid object names. - {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")}, + {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")}, // Test case - 6. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF}, // Test case with start offset set to 0 and length set to size of the object. @@ -194,7 +194,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [ // Setup for the tests. bucketName := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -292,19 +292,19 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [ } -// Wrapper for calling GetObject tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup. func TestGetObjectDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testGetObjectDiskNotFound) } // ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated. -// Before the Get Object call XL disks are moved so that the quorum just holds. +// Before the Get Object call Erasure disks are moved so that the quorum just holds. func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { // Setup for the tests. bucketName := getRandomBucketName() objectName := "test-object" // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -376,7 +376,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str {"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")}, // Test case - 5. // Case with invalid object names. - {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")}, + {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")}, // Test case - 7. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF}, // Test case with start offset set to 0 and length set to size of the object. @@ -446,16 +446,16 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str // Benchmarks for ObjectLayer.GetObject(). // The intent is to benchmark GetObject for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkGetObjectVerySmallFS - Benchmark FS.GetObject() for object size of 10 bytes. func BenchmarkGetObjectVerySmallFS(b *testing.B) { benchmarkGetObject(b, "FS", 10) } -// BenchmarkGetObjectVerySmallXL - Benchmark XL.GetObject() for object size of 10 bytes. -func BenchmarkGetObjectVerySmallXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10) +// BenchmarkGetObjectVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes. +func BenchmarkGetObjectVerySmallErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10) } // BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB. @@ -463,9 +463,9 @@ func BenchmarkGetObject10KbFS(b *testing.B) { benchmarkGetObject(b, "FS", 10*humanize.KiByte) } -// BenchmarkGetObject10KbXL - Benchmark XL.GetObject() for object size of 10KB. -func BenchmarkGetObject10KbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10*humanize.KiByte) +// BenchmarkGetObject10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB. +func BenchmarkGetObject10KbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10*humanize.KiByte) } // BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB. @@ -473,9 +473,9 @@ func BenchmarkGetObject100KbFS(b *testing.B) { benchmarkGetObject(b, "FS", 100*humanize.KiByte) } -// BenchmarkGetObject100KbXL - Benchmark XL.GetObject() for object size of 100KB. -func BenchmarkGetObject100KbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 100*humanize.KiByte) +// BenchmarkGetObject100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB. +func BenchmarkGetObject100KbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 100*humanize.KiByte) } // BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB. @@ -483,9 +483,9 @@ func BenchmarkGetObject1MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 1*humanize.MiByte) } -// BenchmarkGetObject1MbXL - Benchmark XL.GetObject() for object size of 1MB. -func BenchmarkGetObject1MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 1*humanize.MiByte) +// BenchmarkGetObject1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB. +func BenchmarkGetObject1MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 1*humanize.MiByte) } // BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB. @@ -493,9 +493,9 @@ func BenchmarkGetObject5MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 5*humanize.MiByte) } -// BenchmarkGetObject5MbXL - Benchmark XL.GetObject() for object size of 5MB. -func BenchmarkGetObject5MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 5*humanize.MiByte) +// BenchmarkGetObject5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB. +func BenchmarkGetObject5MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 5*humanize.MiByte) } // BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB. @@ -503,9 +503,9 @@ func BenchmarkGetObject10MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 10*humanize.MiByte) } -// BenchmarkGetObject10MbXL - Benchmark XL.GetObject() for object size of 10MB. -func BenchmarkGetObject10MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10*humanize.MiByte) +// BenchmarkGetObject10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB. +func BenchmarkGetObject10MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10*humanize.MiByte) } // BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB. @@ -514,9 +514,9 @@ func BenchmarkGetObject25MbFS(b *testing.B) { } -// BenchmarkGetObject25MbXL - Benchmark XL.GetObject() for object size of 25MB. -func BenchmarkGetObject25MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 25*humanize.MiByte) +// BenchmarkGetObject25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB. +func BenchmarkGetObject25MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 25*humanize.MiByte) } // BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB. @@ -524,9 +524,9 @@ func BenchmarkGetObject50MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 50*humanize.MiByte) } -// BenchmarkGetObject50MbXL - Benchmark XL.GetObject() for object size of 50MB. -func BenchmarkGetObject50MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 50*humanize.MiByte) +// BenchmarkGetObject50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB. +func BenchmarkGetObject50MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 50*humanize.MiByte) } // parallel benchmarks for ObjectLayer.GetObject() . @@ -536,9 +536,9 @@ func BenchmarkGetObjectParallelVerySmallFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10) } -// BenchmarkGetObjectParallelVerySmallXL - Benchmark XL.GetObject() for object size of 10 bytes. -func BenchmarkGetObjectParallelVerySmallXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10) +// BenchmarkGetObjectParallelVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes. +func BenchmarkGetObjectParallelVerySmallErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10) } // BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB. @@ -546,9 +546,9 @@ func BenchmarkGetObjectParallel10KbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10*humanize.KiByte) } -// BenchmarkGetObjectParallel10KbXL - Benchmark XL.GetObject() for object size of 10KB. -func BenchmarkGetObjectParallel10KbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10*humanize.KiByte) +// BenchmarkGetObjectParallel10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB. +func BenchmarkGetObjectParallel10KbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10*humanize.KiByte) } // BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB. @@ -556,9 +556,9 @@ func BenchmarkGetObjectParallel100KbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 100*humanize.KiByte) } -// BenchmarkGetObjectParallel100KbXL - Benchmark XL.GetObject() for object size of 100KB. -func BenchmarkGetObjectParallel100KbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 100*humanize.KiByte) +// BenchmarkGetObjectParallel100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB. +func BenchmarkGetObjectParallel100KbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 100*humanize.KiByte) } // BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB. @@ -566,9 +566,9 @@ func BenchmarkGetObjectParallel1MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 1*humanize.MiByte) } -// BenchmarkGetObjectParallel1MbXL - Benchmark XL.GetObject() for object size of 1MB. -func BenchmarkGetObjectParallel1MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 1*humanize.MiByte) +// BenchmarkGetObjectParallel1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB. +func BenchmarkGetObjectParallel1MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 1*humanize.MiByte) } // BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB. @@ -576,9 +576,9 @@ func BenchmarkGetObjectParallel5MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 5*humanize.MiByte) } -// BenchmarkGetObjectParallel5MbXL - Benchmark XL.GetObject() for object size of 5MB. -func BenchmarkGetObjectParallel5MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 5*humanize.MiByte) +// BenchmarkGetObjectParallel5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB. +func BenchmarkGetObjectParallel5MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 5*humanize.MiByte) } // BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB. @@ -586,9 +586,9 @@ func BenchmarkGetObjectParallel10MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10*humanize.MiByte) } -// BenchmarkGetObjectParallel10MbXL - Benchmark XL.GetObject() for object size of 10MB. -func BenchmarkGetObjectParallel10MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10*humanize.MiByte) +// BenchmarkGetObjectParallel10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB. +func BenchmarkGetObjectParallel10MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10*humanize.MiByte) } // BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB. @@ -597,9 +597,9 @@ func BenchmarkGetObjectParallel25MbFS(b *testing.B) { } -// BenchmarkGetObjectParallel25MbXL - Benchmark XL.GetObject() for object size of 25MB. -func BenchmarkGetObjectParallel25MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 25*humanize.MiByte) +// BenchmarkGetObjectParallel25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB. +func BenchmarkGetObjectParallel25MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 25*humanize.MiByte) } // BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB. @@ -607,7 +607,7 @@ func BenchmarkGetObjectParallel50MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 50*humanize.MiByte) } -// BenchmarkGetObjectParallel50MbXL - Benchmark XL.GetObject() for object size of 50MB. -func BenchmarkGetObjectParallel50MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 50*humanize.MiByte) +// BenchmarkGetObjectParallel50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB. +func BenchmarkGetObjectParallel50MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 50*humanize.MiByte) } diff --git a/cmd/object-api-getobjectinfo_test.go b/cmd/object-api-getobjectinfo_test.go index ba44d9d1f..73c656162 100644 --- a/cmd/object-api-getobjectinfo_test.go +++ b/cmd/object-api-getobjectinfo_test.go @@ -22,7 +22,7 @@ import ( "testing" ) -// Wrapper for calling GetObjectInfo tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObjectInfo tests for both Erasure multiple disks and single node setup. func TestGetObjectInfo(t *testing.T) { ExecObjectLayerTest(t, testGetObjectInfo) } @@ -30,7 +30,7 @@ func TestGetObjectInfo(t *testing.T) { // Testing GetObjectInfo(). func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) { // This bucket is used for testing getObjectInfo operations. - err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 2be7f223d..52f79eca2 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -23,7 +23,6 @@ import ( "github.com/minio/minio-go/v6/pkg/encrypt" "github.com/minio/minio-go/v6/pkg/tags" - "github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/madmin" ) @@ -32,16 +31,25 @@ import ( type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool // GetObjectInfoFn is the signature of GetObjectInfo function. -type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) +type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) -// ObjectOptions represents object options for ObjectLayer operations +// ObjectOptions represents object options for ObjectLayer object operations type ObjectOptions struct { ServerSideEncryption encrypt.ServerSide + Versioned bool + VersionID string UserDefined map[string]string PartNumber int CheckCopyPrecondFn CheckCopyPreconditionFn } +// BucketOptions represents bucket options for ObjectLayer bucket operations +type BucketOptions struct { + Location string + LockEnabled bool + VersioningEnabled bool +} + // LockType represents required locking for ObjectLayer operations type LockType int @@ -62,12 +70,14 @@ type ObjectLayer interface { StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks // Bucket operations. - MakeBucketWithLocation(ctx context.Context, bucket string, location string, lockEnabled bool) error + MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) + ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (result ListObjectVersionsInfo, err error) + // Walk lists all objects including versions, delete markers. Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error // Object operations. @@ -83,8 +93,8 @@ type ObjectLayer interface { GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObject(ctx context.Context, bucket, object string) error - DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) + DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) // Multipart operations. ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) @@ -101,8 +111,8 @@ type ObjectLayer interface { ReloadFormat(ctx context.Context, dryRun bool) error HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) - HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) - HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) error + HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) + HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) error ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) // Policy operations @@ -124,7 +134,7 @@ type ObjectLayer interface { IsReady(ctx context.Context) bool // ObjectTagging operations - PutObjectTags(context.Context, string, string, string) error - GetObjectTags(context.Context, string, string) (*tags.Tags, error) - DeleteObjectTags(context.Context, string, string) error + PutObjectTags(context.Context, string, string, string, ObjectOptions) error + GetObjectTags(context.Context, string, string, ObjectOptions) (*tags.Tags, error) + DeleteObjectTags(context.Context, string, string, ObjectOptions) error } diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 46623206b..c60966249 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -29,7 +29,7 @@ import ( "testing" ) -// Wrapper for calling ListObjects tests for both XL multiple disks and single node setup. +// Wrapper for calling ListObjects tests for both Erasure multiple disks and single node setup. func TestListObjects(t *testing.T) { ExecObjectLayerTest(t, testListObjects) } @@ -49,7 +49,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { "test-bucket-single-object", } for _, bucket := range testBuckets { - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } @@ -669,7 +669,7 @@ func BenchmarkListObjects(b *testing.B) { bucket := "ls-benchmark-bucket" // Create a bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 351a5c944..f621a4cba 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio/pkg/hash" ) -// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup. func TestObjectNewMultipartUpload(t *testing.T) { ExecObjectLayerTest(t, testObjectNewMultipartUpload) } @@ -55,7 +55,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr } // Create bucket before intiating NewMultipartUpload. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -77,7 +77,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr } } -// Wrapper for calling AbortMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling AbortMultipartUpload tests for both Erasure multiple disks and single node setup. func TestObjectAbortMultipartUpload(t *testing.T) { ExecObjectLayerTest(t, testObjectAbortMultipartUpload) } @@ -89,7 +89,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test object := "minio-object" opts := ObjectOptions{} // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -124,7 +124,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test } } -// Wrapper for calling isUploadIDExists tests for both XL multiple disks and single node setup. +// Wrapper for calling isUploadIDExists tests for both Erasure multiple disks and single node setup. func TestObjectAPIIsUploadIDExists(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIIsUploadIDExists) } @@ -135,7 +135,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE object := "minio-object" // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -154,7 +154,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE } } -// Wrapper for calling PutObjectPart tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObjectPart tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectPart(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIPutObjectPart) } @@ -166,7 +166,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH object := "minio-object" opts := ObjectOptions{} // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -178,7 +178,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -210,7 +210,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH {"a", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: a")}, // Test case - 5. // Case with invalid object names. - {bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket#")}, + {bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket/")}, // Test case - 6. // Valid object and bucket names but non-existent bucket. {"abc", "def", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")}, @@ -286,7 +286,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH } } -// Wrapper for calling TestListMultipartUploads tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListMultipartUploads tests for both Erasure multiple disks and single node setup. func TestListMultipartUploads(t *testing.T) { ExecObjectLayerTest(t, testListMultipartUploads) } @@ -302,7 +302,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // objectNames[0]. // uploadIds [0]. // Create bucket before initiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -320,7 +320,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // objectNames[0]. // uploadIds [1-3]. // Bucket to test for mutiple upload Id's for a given object. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -341,7 +341,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // bucketnames[2]. // objectNames[0-2]. // uploadIds [4-9]. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1150,7 +1150,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling TestListObjectPartsDiskNotFound tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListObjectPartsDiskNotFound tests for both Erasure multiple disks and single node setup. func TestListObjectPartsDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testListObjectPartsDiskNotFound) } @@ -1166,7 +1166,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1395,7 +1395,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks } } -// Wrapper for calling TestListObjectParts tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListObjectParts tests for both Erasure multiple disks and single node setup. func TestListObjectParts(t *testing.T) { ExecObjectLayerTest(t, testListObjectParts) } @@ -1411,7 +1411,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler) // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1657,7 +1657,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -1796,7 +1796,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // the case above successfully completes CompleteMultipartUpload, the remaining Parts will be flushed. // Expecting to fail with Invalid UploadID. {bucketNames[0], objectNames[0], uploadIDs[0], inputParts[4].parts, "", InvalidUploadID{UploadID: uploadIDs[0]}, false}, - // Expecting to fail due to bad } for _, testCase := range testCases { @@ -1829,16 +1828,16 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // Benchmarks for ObjectLayer.PutObjectPart(). // The intent is to benchmark PutObjectPart for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB. func BenchmarkPutObjectPart5MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 5*humanize.MiByte) } -// BenchmarkPutObjectPart5MbXL - Benchmark XL.PutObjectPart() for object size of 5MB. -func BenchmarkPutObjectPart5MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 5*humanize.MiByte) +// BenchmarkPutObjectPart5MbErasure - Benchmark Erasure.PutObjectPart() for object size of 5MB. +func BenchmarkPutObjectPart5MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 5*humanize.MiByte) } // BenchmarkPutObjectPart10MbFS - Benchmark FS.PutObjectPart() for object size of 10MB. @@ -1846,9 +1845,9 @@ func BenchmarkPutObjectPart10MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 10*humanize.MiByte) } -// BenchmarkPutObjectPart10MbXL - Benchmark XL.PutObjectPart() for object size of 10MB. -func BenchmarkPutObjectPart10MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 10*humanize.MiByte) +// BenchmarkPutObjectPart10MbErasure - Benchmark Erasure.PutObjectPart() for object size of 10MB. +func BenchmarkPutObjectPart10MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 10*humanize.MiByte) } // BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB. @@ -1857,9 +1856,9 @@ func BenchmarkPutObjectPart25MbFS(b *testing.B) { } -// BenchmarkPutObjectPart25MbXL - Benchmark XL.PutObjectPart() for object size of 25MB. -func BenchmarkPutObjectPart25MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 25*humanize.MiByte) +// BenchmarkPutObjectPart25MbErasure - Benchmark Erasure.PutObjectPart() for object size of 25MB. +func BenchmarkPutObjectPart25MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 25*humanize.MiByte) } // BenchmarkPutObjectPart50MbFS - Benchmark FS.PutObjectPart() for object size of 50MB. @@ -1867,7 +1866,7 @@ func BenchmarkPutObjectPart50MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 50*humanize.MiByte) } -// BenchmarkPutObjectPart50MbXL - Benchmark XL.PutObjectPart() for object size of 50MB. -func BenchmarkPutObjectPart50MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 50*humanize.MiByte) +// BenchmarkPutObjectPart50MbErasure - Benchmark Erasure.PutObjectPart() for object size of 50MB. +func BenchmarkPutObjectPart50MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 50*humanize.MiByte) } diff --git a/cmd/object-api-putobject_test.go b/cmd/object-api-putobject_test.go index 9886d31f0..1c6a8aac9 100644 --- a/cmd/object-api-putobject_test.go +++ b/cmd/object-api-putobject_test.go @@ -34,7 +34,7 @@ func md5Header(data []byte) map[string]string { return map[string]string{"etag": getMD5Hash([]byte(data))} } -// Wrapper for calling PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectSingle(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIPutObject) } @@ -46,14 +46,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -198,7 +198,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl } } -// Wrapper for calling PutObject tests for both XL multiple disks case +// Wrapper for calling PutObject tests for both Erasure multiple disks case // when quorum is not available. func TestObjectAPIPutObjectDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testObjectAPIPutObjectDiskNotFound) @@ -211,14 +211,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -311,7 +311,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di } } -// Wrapper for calling PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectStaleFiles(t *testing.T) { ExecObjectLayerStaleFilesTest(t, testObjectAPIPutObjectStaleFiles) } @@ -323,7 +323,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -345,7 +345,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk } } -// Wrapper for calling Multipart PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling Multipart PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIMultipartPutObjectStaleFiles(t *testing.T) { ExecObjectLayerStaleFilesTest(t, testObjectAPIMultipartPutObjectStaleFiles) } @@ -357,7 +357,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -425,16 +425,16 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str // Benchmarks for ObjectLayer.PutObject(). // The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. func BenchmarkPutObjectVerySmallFS(b *testing.B) { benchmarkPutObject(b, "FS", 10) } -// BenchmarkPutObjectVerySmallXL - Benchmark XL.PutObject() for object size of 10 bytes. -func BenchmarkPutObjectVerySmallXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10) +// BenchmarkPutObjectVerySmallErasure - Benchmark Erasure.PutObject() for object size of 10 bytes. +func BenchmarkPutObjectVerySmallErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10) } // BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB. @@ -442,9 +442,9 @@ func BenchmarkPutObject10KbFS(b *testing.B) { benchmarkPutObject(b, "FS", 10*humanize.KiByte) } -// BenchmarkPutObject10KbXL - Benchmark XL.PutObject() for object size of 10KB. -func BenchmarkPutObject10KbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10*humanize.KiByte) +// BenchmarkPutObject10KbErasure - Benchmark Erasure.PutObject() for object size of 10KB. +func BenchmarkPutObject10KbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10*humanize.KiByte) } // BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB. @@ -452,9 +452,9 @@ func BenchmarkPutObject100KbFS(b *testing.B) { benchmarkPutObject(b, "FS", 100*humanize.KiByte) } -// BenchmarkPutObject100KbXL - Benchmark XL.PutObject() for object size of 100KB. -func BenchmarkPutObject100KbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 100*humanize.KiByte) +// BenchmarkPutObject100KbErasure - Benchmark Erasure.PutObject() for object size of 100KB. +func BenchmarkPutObject100KbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 100*humanize.KiByte) } // BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB. @@ -462,9 +462,9 @@ func BenchmarkPutObject1MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 1*humanize.MiByte) } -// BenchmarkPutObject1MbXL - Benchmark XL.PutObject() for object size of 1MB. -func BenchmarkPutObject1MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 1*humanize.MiByte) +// BenchmarkPutObject1MbErasure - Benchmark Erasure.PutObject() for object size of 1MB. +func BenchmarkPutObject1MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 1*humanize.MiByte) } // BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB. @@ -472,9 +472,9 @@ func BenchmarkPutObject5MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 5*humanize.MiByte) } -// BenchmarkPutObject5MbXL - Benchmark XL.PutObject() for object size of 5MB. -func BenchmarkPutObject5MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 5*humanize.MiByte) +// BenchmarkPutObject5MbErasure - Benchmark Erasure.PutObject() for object size of 5MB. +func BenchmarkPutObject5MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 5*humanize.MiByte) } // BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB. @@ -482,9 +482,9 @@ func BenchmarkPutObject10MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 10*humanize.MiByte) } -// BenchmarkPutObject10MbXL - Benchmark XL.PutObject() for object size of 10MB. -func BenchmarkPutObject10MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10*humanize.MiByte) +// BenchmarkPutObject10MbErasure - Benchmark Erasure.PutObject() for object size of 10MB. +func BenchmarkPutObject10MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10*humanize.MiByte) } // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. @@ -493,9 +493,9 @@ func BenchmarkPutObject25MbFS(b *testing.B) { } -// BenchmarkPutObject25MbXL - Benchmark XL.PutObject() for object size of 25MB. -func BenchmarkPutObject25MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 25*humanize.MiByte) +// BenchmarkPutObject25MbErasure - Benchmark Erasure.PutObject() for object size of 25MB. +func BenchmarkPutObject25MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 25*humanize.MiByte) } // BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB. @@ -503,9 +503,9 @@ func BenchmarkPutObject50MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 50*humanize.MiByte) } -// BenchmarkPutObject50MbXL - Benchmark XL.PutObject() for object size of 50MB. -func BenchmarkPutObject50MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 50*humanize.MiByte) +// BenchmarkPutObject50MbErasure - Benchmark Erasure.PutObject() for object size of 50MB. +func BenchmarkPutObject50MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 50*humanize.MiByte) } // parallel benchmarks for ObjectLayer.PutObject() . @@ -515,9 +515,9 @@ func BenchmarkParallelPutObjectVerySmallFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10) } -// BenchmarkParallelPutObjectVerySmallXL - BenchmarkParallel XL.PutObject() for object size of 10 bytes. -func BenchmarkParallelPutObjectVerySmallXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10) +// BenchmarkParallelPutObjectVerySmallErasure - BenchmarkParallel Erasure.PutObject() for object size of 10 bytes. +func BenchmarkParallelPutObjectVerySmallErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10) } // BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB. @@ -525,9 +525,9 @@ func BenchmarkParallelPutObject10KbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte) } -// BenchmarkParallelPutObject10KbXL - BenchmarkParallel XL.PutObject() for object size of 10KB. -func BenchmarkParallelPutObject10KbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10*humanize.KiByte) +// BenchmarkParallelPutObject10KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10KB. +func BenchmarkParallelPutObject10KbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10*humanize.KiByte) } // BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB. @@ -535,9 +535,9 @@ func BenchmarkParallelPutObject100KbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte) } -// BenchmarkParallelPutObject100KbXL - BenchmarkParallel XL.PutObject() for object size of 100KB. -func BenchmarkParallelPutObject100KbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 100*humanize.KiByte) +// BenchmarkParallelPutObject100KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 100KB. +func BenchmarkParallelPutObject100KbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 100*humanize.KiByte) } // BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB. @@ -545,9 +545,9 @@ func BenchmarkParallelPutObject1MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte) } -// BenchmarkParallelPutObject1MbXL - BenchmarkParallel XL.PutObject() for object size of 1MB. -func BenchmarkParallelPutObject1MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 1*humanize.MiByte) +// BenchmarkParallelPutObject1MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 1MB. +func BenchmarkParallelPutObject1MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 1*humanize.MiByte) } // BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB. @@ -555,9 +555,9 @@ func BenchmarkParallelPutObject5MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte) } -// BenchmarkParallelPutObject5MbXL - BenchmarkParallel XL.PutObject() for object size of 5MB. -func BenchmarkParallelPutObject5MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 5*humanize.MiByte) +// BenchmarkParallelPutObject5MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 5MB. +func BenchmarkParallelPutObject5MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 5*humanize.MiByte) } // BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB. @@ -565,9 +565,9 @@ func BenchmarkParallelPutObject10MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte) } -// BenchmarkParallelPutObject10MbXL - BenchmarkParallel XL.PutObject() for object size of 10MB. -func BenchmarkParallelPutObject10MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10*humanize.MiByte) +// BenchmarkParallelPutObject10MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10MB. +func BenchmarkParallelPutObject10MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10*humanize.MiByte) } // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. @@ -576,7 +576,7 @@ func BenchmarkParallelPutObject25MbFS(b *testing.B) { } -// BenchmarkParallelPutObject25MbXL - BenchmarkParallel XL.PutObject() for object size of 25MB. -func BenchmarkParallelPutObject25MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 25*humanize.MiByte) +// BenchmarkParallelPutObject25MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 25MB. +func BenchmarkParallelPutObject25MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 25*humanize.MiByte) } diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 9d7ecb1b7..73cc3396b 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -21,10 +21,12 @@ import ( "fmt" "net/http" "regexp" + "strconv" "time" "github.com/minio/minio/cmd/crypto" xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/handlers" ) @@ -253,11 +255,35 @@ func isETagEqual(left, right string) bool { return canonicalizeETag(left) == canonicalizeETag(right) } -// setAmzExpirationHeader sets x-amz-expiration header with expiry time -// after analyzing the current bucket lifecycle rules if any. -func setAmzExpirationHeader(w http.ResponseWriter, bucket string, objInfo ObjectInfo) { - if lc, err := globalLifecycleSys.Get(bucket); err == nil { - ruleID, expiryTime := lc.PredictExpiryTime(objInfo.Name, objInfo.ModTime, objInfo.UserTags) +// setPutObjHeaders sets all the necessary headers returned back +// upon a success Put/Copy/CompleteMultipart/Delete requests +// to activate delete only headers set delete as true +func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) { + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. + if objInfo.ETag != "" && !delete { + w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} + } + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + // If version is a deleted marker, set this header as well + if objInfo.DeleteMarker && delete { // only returned during delete object + w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} + } + } + + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !delete { + ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{ + Name: objInfo.Name, + UserTags: objInfo.UserTags, + VersionID: objInfo.VersionID, + ModTime: objInfo.ModTime, + IsLatest: objInfo.IsLatest, + DeleteMarker: objInfo.DeleteMarker, + }) if !expiryTime.IsZero() { w.Header()[xhttp.AmzExpiration] = []string{ fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID), @@ -269,27 +295,37 @@ func setAmzExpirationHeader(w http.ResponseWriter, bucket string, objInfo Object // deleteObject is a convenient wrapper to delete an object, this // is a common function to be called from object handlers and // web handlers. -func deleteObject(ctx context.Context, obj ObjectLayer, cache CacheObjectLayer, bucket, object string, r *http.Request) (err error) { +func deleteObject(ctx context.Context, obj ObjectLayer, cache CacheObjectLayer, bucket, object string, r *http.Request, opts ObjectOptions) (objInfo ObjectInfo, err error) { deleteObject := obj.DeleteObject if cache != nil { deleteObject = cache.DeleteObject } // Proceed to delete the object. - if err = deleteObject(ctx, bucket, object); err != nil { - return err + if objInfo, err = deleteObject(ctx, bucket, object, opts); err != nil { + return objInfo, err } - // Notify object deleted event. - sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: object, - }, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: handlers.GetSourceIP(r), - }) - - return nil + // Requesting only a delete marker which was successfully attempted. + if objInfo.DeleteMarker { + // Notify object deleted marker event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDeleteMarkerCreated, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }) + } else { + // Notify object deleted event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }) + } + return objInfo, nil } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index d8d7404e9..c6a09bc4f 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -31,6 +31,7 @@ import ( "time" + "github.com/google/uuid" "github.com/gorilla/mux" miniogo "github.com/minio/minio-go/v6" "github.com/minio/minio-go/v6/pkg/encrypt" @@ -309,11 +310,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - // get gateway encryption options opts, err := getOpts(ctx, r, bucket, object) if err != nil { @@ -432,7 +428,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req } setHeadGetRespHeaders(w, r.URL.Query()) - setAmzExpirationHeader(w, bucket, objInfo) statusCodeWritten := false httpWriter := ioutil.WriteOnClose(w) @@ -496,11 +491,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchVersion)) - return - } - getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -617,9 +607,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re // Set any additional requested response headers. setHeadGetRespHeaders(w, r.URL.Query()) - // Set the expiration header - setAmzExpirationHeader(w, bucket, objInfo) - // Successful response. if rs != nil { w.WriteHeader(http.StatusPartialContent) @@ -773,35 +760,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } - // TODO: Reject requests where body/payload is present, for now we don't even read it. - // Read escaped copy source path to check for parameters. cpSrcPath := r.Header.Get(xhttp.AmzCopySource) - - // Check https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html - // Regardless of whether you have enabled versioning, each object in your bucket - // has a version ID. If you have not enabled versioning, Amazon S3 sets the value - // of the version ID to null. If you have enabled versioning, Amazon S3 assigns a - // unique version ID value for the object. + var vid string if u, err := url.Parse(cpSrcPath); err == nil { - // Check if versionId query param was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid := u.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + vid = strings.TrimSpace(u.Query().Get("versionId")) // Note that url.Parse does the unescaping cpSrcPath = u.Path } - if vid := r.Header.Get(xhttp.AmzCopySourceVersionID); vid != "" { - // Check if versionId header was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + if vid == "" { + vid = strings.TrimSpace(r.Header.Get(xhttp.AmzCopySourceVersionID)) } srcBucket, srcObject := path2BucketObject(cpSrcPath) @@ -811,6 +779,18 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: vid, + }), r.URL, guessIsBrowserReq(r)) + return + } + } + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) return @@ -849,12 +829,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } + srcOpts.VersionID = vid + // convert copy src encryption options for GET calls - var getOpts = ObjectOptions{} + var getOpts = ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned} getSSE := encrypt.SSE(srcOpts.ServerSideEncryption) if getSSE != srcOpts.ServerSideEncryption { getOpts.ServerSideEncryption = getSSE } + dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil) if err != nil { logger.LogIf(ctx, err) @@ -1193,11 +1176,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } } - setAmzExpirationHeader(w, dstBucket, objInfo) - - response := generateCopyObjectResponse(getDecryptedETag(r.Header, objInfo, false), objInfo.ModTime) + objInfo.ETag = getDecryptedETag(r.Header, objInfo, false) + response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime) encodedSuccessResponse := encodeResponse(response) + setPutObjHeaders(w, objInfo, false) + // Write success response. writeSuccessResponseXML(w, encodedSuccessResponse) @@ -1376,6 +1360,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req sha256hex = getContentSha256Cksum(r, serviceS3) } } + if err := enforceBucketQuota(ctx, bucket, size); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -1487,39 +1472,30 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } - etag := objInfo.ETag switch { case objInfo.IsCompressed(): if !strings.HasSuffix(objInfo.ETag, "-1") { - etag = objInfo.ETag + "-1" + objInfo.ETag = objInfo.ETag + "-1" } case crypto.IsEncrypted(objInfo.UserDefined): switch { case crypto.S3.IsEncrypted(objInfo.UserDefined): w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256) - etag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: etag}) + objInfo.ETag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: objInfo.ETag}) case crypto.SSEC.IsEncrypted(objInfo.UserDefined): w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm)) w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5)) - if len(etag) >= 32 && strings.Count(etag, "-") != 1 { - etag = etag[len(etag)-32:] + if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 { + objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:] } } } - // We must not use the http.Header().Set method here because some (broken) - // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). - // Therefore, we have to set the ETag directly as map entry. - w.Header()[xhttp.ETag] = []string{`"` + etag + `"`} - - setAmzExpirationHeader(w, bucket, objInfo) + setPutObjHeaders(w, objInfo, false) writeSuccessResponseHeadersOnly(w) - // Set the etag sent to the client as part of the event. - objInfo.ETag = etag - // Notify object created event. sendEvent(eventArgs{ EventName: event.ObjectCreatedPut, @@ -1697,31 +1673,14 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt // Read escaped copy source path to check for parameters. cpSrcPath := r.Header.Get(xhttp.AmzCopySource) - - // Check https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html - // Regardless of whether you have enabled versioning, each object in your bucket - // has a version ID. If you have not enabled versioning, Amazon S3 sets the value - // of the version ID to null. If you have enabled versioning, Amazon S3 assigns a - // unique version ID value for the object. + var vid string if u, err := url.Parse(cpSrcPath); err == nil { - // Check if versionId query param was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid := u.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + vid = strings.TrimSpace(u.Query().Get("versionId")) // Note that url.Parse does the unescaping cpSrcPath = u.Path } - if vid := r.Header.Get(xhttp.AmzCopySourceVersionID); vid != "" { - // Check if X-Amz-Copy-Source-Version-Id header was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + if vid == "" { + vid = strings.TrimSpace(r.Header.Get(xhttp.AmzCopySourceVersionID)) } srcBucket, srcObject := path2BucketObject(cpSrcPath) @@ -1731,6 +1690,18 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return } + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: vid, + }), r.URL, guessIsBrowserReq(r)) + return + } + } + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) return @@ -1757,8 +1728,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } + srcOpts.VersionID = vid + // convert copy src and dst encryption options for GET/PUT calls - var getOpts = ObjectOptions{} + var getOpts = ObjectOptions{VersionID: srcOpts.VersionID} if srcOpts.ServerSideEncryption != nil { getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption) } @@ -1779,8 +1752,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt if rangeHeader != "" { var parseRangeErr error if rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader); parseRangeErr != nil { - // Handle only errInvalidRange - // Ignore other parse error and treat it as regular Get request like Amazon S3. logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader) logger.LogIf(ctx, parseRangeErr) writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r)) @@ -2098,7 +2069,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http // get encryption options var opts ObjectOptions if crypto.SSEC.IsRequested(r.Header) { - opts, err = putOpts(ctx, r, bucket, object, nil) + opts, err = getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -2204,6 +2175,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http if isEncrypted { etag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header)) } + + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""} writeSuccessResponseHeadersOnly(w) @@ -2445,8 +2420,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } var objectEncryptionKey []byte - var opts ObjectOptions var isEncrypted, ssec bool + var opts ObjectOptions if objectAPI.IsEncryptionSupported() { mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err != nil { @@ -2507,7 +2482,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { switch err.Code { case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum": - // Set retry-after header to indicate user-agents to retry request after 120secs. + // Set retxry-after header to indicate user-agents to retry request after 120secs. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After w.Header().Set(xhttp.RetryAfter, "120") } @@ -2552,10 +2527,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } } - // Set etag. - w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""} - - setAmzExpirationHeader(w, bucket, objInfo) + setPutObjHeaders(w, objInfo, false) // Write success response. writeSuccessResponseXML(w, encodedSuccessResponse) @@ -2599,11 +2571,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -2617,18 +2584,30 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. } } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + apiErr := ErrNone if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { - apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfo) - if apiErr != ErrNone && apiErr != ErrNoSuchKey { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r)) - return + if opts.VersionID != "" { + apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, ObjectToDelete{ + ObjectName: object, + VersionID: opts.VersionID, + }, getObjectInfo) + if apiErr != ErrNone && apiErr != ErrNoSuchKey { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r)) + return + } } } if apiErr == ErrNone { // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html - if err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil { + objInfo, err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r, opts) + if err != nil { switch err.(type) { case BucketNotFound: // When bucket doesn't exist specially handle it. @@ -2637,6 +2616,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. } // Ignore delete object errors while replying to client, since we are suppposed to reply only 204. } + setPutObjHeaders(w, objInfo, true) } writeSuccessNoContent(w) @@ -2656,11 +2636,6 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2697,6 +2672,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo } + opts, err := getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) @@ -2713,7 +2689,9 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags } objInfo.metadataOnly = true - if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{}, ObjectOptions{}); err != nil { + if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{ + VersionID: opts.VersionID, + }, ObjectOptions{}); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -2746,11 +2724,6 @@ func (api objectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2816,11 +2789,6 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2856,6 +2824,12 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -2873,7 +2847,9 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags } objInfo.metadataOnly = true // Perform only metadata updates. - if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{}, ObjectOptions{}); err != nil { + if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{ + VersionID: opts.VersionID, + }, ObjectOptions{}); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -2904,11 +2880,6 @@ func (api objectAPIHandlers) GetObjectRetentionHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2980,8 +2951,14 @@ func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *h return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Get object tags - tags, err := objAPI.GetObjectTags(ctx, bucket, object) + tags, err := objAPI.GetObjectTags(ctx, bucket, object, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -3025,8 +3002,14 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Put object tags - err = objAPI.PutObjectTags(ctx, bucket, object, tags.String()) + err = objAPI.PutObjectTags(ctx, bucket, object, tags.String(), opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -3064,8 +3047,14 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Delete object tags - if err = objAPI.DeleteObjectTags(ctx, bucket, object); err != nil && err != errConfigNotFound { + if err = objAPI.DeleteObjectTags(ctx, bucket, object, opts); err != nil && err != errConfigNotFound { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 592029553..3b7c2d71d 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -56,7 +56,7 @@ const ( MissingUploadID ) -// Wrapper for calling HeadObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling HeadObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIHeadObjectHandler(t *testing.T) { ExecObjectLayerAPITest(t, testAPIHeadObjectHandler, []string{"HeadObject"}) } @@ -322,7 +322,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke } } -// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling GetObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIGetObjectHandler(t *testing.T) { globalPolicySys = NewPolicySys() defer func() { globalPolicySys = nil }() @@ -646,7 +646,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) } -// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling GetObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIGetObjectWithMPHandler(t *testing.T) { globalPolicySys = NewPolicySys() defer func() { globalPolicySys = nil }() @@ -844,7 +844,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str } -// Wrapper for calling PutObject API handler tests using streaming signature v4 for both XL multiple disks and FS single drive setup. +// Wrapper for calling PutObject API handler tests using streaming signature v4 for both Erasure multiple disks and FS single drive setup. func TestAPIPutObjectStreamSigV4Handler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIPutObjectStreamSigV4Handler, []string{"PutObject"}) @@ -1162,7 +1162,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam } } -// Wrapper for calling PutObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling PutObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIPutObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIPutObjectHandler, []string{"PutObject"}) @@ -1522,7 +1522,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam } } -// Wrapper for calling Copy Object Part API handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Copy Object Part API handler tests for both Erasure multiple disks and single node setup. func TestAPICopyObjectPartHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPICopyObjectPartHandler, []string{"CopyObjectPart"}) @@ -1766,7 +1766,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, // Test case - 16, copy part 1 from from newObject1 with null X-Amz-Copy-Source-Version-Id { @@ -1783,10 +1783,10 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri bucketName: bucketName, uploadID: uploadID, copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), - copySourceVersionID: "17", + copySourceVersionID: "17", // invalid id accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, } @@ -1816,6 +1816,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri if testCase.copySourceRange != "" { req.Header.Set("X-Amz-Copy-Source-Range", testCase.copySourceRange) } + // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler. // Call the ServeHTTP to execute the handler, `func (api objectAPIHandlers) CopyObjectHandler` handles the request. apiRouter.ServeHTTP(rec, req) @@ -1861,7 +1862,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri } -// Wrapper for calling Copy Object API handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup. func TestAPICopyObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"}) @@ -2159,7 +2160,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, // Test case - 19, copy metadata from newObject1 with null X-Amz-Copy-Source-Version-Id { @@ -2179,7 +2180,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, copySourceVersionID: "17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, } @@ -2319,7 +2320,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, } -// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup. // First register the HTTP handler for NewMutlipartUpload, then a HTTP request for NewMultipart upload is made. // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandler(t *testing.T) { @@ -2465,7 +2466,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string } -// Wrapper for calling NewMultipartUploadParallel tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUploadParallel tests for both Erasure multiple disks and single node setup. // The objective of the test is to initialte multipart upload on the same object 10 times concurrently, // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandlerParallel(t *testing.T) { @@ -3064,7 +3065,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) } -// Wrapper for calling Delete Object API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling Delete Object API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIDeleteObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIDeleteObjectHandler, []string{"DeleteObject"}) diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index cd8764397..45939478a 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -70,20 +70,20 @@ func (r *testOneByteReadNoEOF) Read(p []byte) (n int, err error) { type ObjectLayerAPISuite struct{} -// Wrapper for calling testMakeBucket for both XL and FS. +// Wrapper for calling testMakeBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMakeBucket(t *testing.T) { ExecObjectLayerTest(t, testMakeBucket) } // Tests validate bucket creation. func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } } -// Wrapper for calling testMultipartObjectCreation for both XL and FS. +// Wrapper for calling testMultipartObjectCreation for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) { ExecObjectLayerTest(t, testMultipartObjectCreation) } @@ -91,7 +91,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) { // Tests validate creation of part files during Multipart operation. func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -127,7 +127,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr } } -// Wrapper for calling testMultipartObjectAbort for both XL and FS. +// Wrapper for calling testMultipartObjectAbort for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) { ExecObjectLayerTest(t, testMultipartObjectAbort) } @@ -135,7 +135,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) { // Tests validate abortion of Multipart operation. func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) { var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -172,7 +172,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling testMultipleObjectCreation for both XL and FS. +// Wrapper for calling testMultipleObjectCreation for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) { ExecObjectLayerTest(t, testMultipleObjectCreation) } @@ -181,7 +181,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) { func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { objects := make(map[string][]byte) var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -229,14 +229,14 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH } } -// Wrapper for calling TestPaging for both XL and FS. +// Wrapper for calling TestPaging for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPaging(t *testing.T) { ExecObjectLayerTest(t, testPaging) } // Tests validate creation of objects and the order of listing using various filters for ListObjects operation. func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { - obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0) if err != nil { t.Fatalf("%s: %s", instanceType, err) @@ -433,14 +433,14 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testObjectOverwriteWorks for both XL and FS. +// Wrapper for calling testObjectOverwriteWorks for both Erasure and FS. func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(t *testing.T) { ExecObjectLayerTest(t, testObjectOverwriteWorks) } // Tests validate overwriting of an existing object. func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -470,7 +470,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling testNonExistantBucketOperations for both XL and FS. +// Wrapper for calling testNonExistantBucketOperations for both Erasure and FS. func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) { ExecObjectLayerTest(t, testNonExistantBucketOperations) } @@ -487,18 +487,18 @@ func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t Tes } } -// Wrapper for calling testBucketRecreateFails for both XL and FS. +// Wrapper for calling testBucketRecreateFails for both Erasure and FS. func (s *ObjectLayerAPISuite) TestBucketRecreateFails(t *testing.T) { ExecObjectLayerTest(t, testBucketRecreateFails) } // Tests validate that recreation of the bucket fails. func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "string", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } - err = obj.MakeBucketWithLocation(context.Background(), "string", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) if err == nil { t.Fatalf("%s: Expected error but found nil.", instanceType) } @@ -508,7 +508,7 @@ func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHand } } -// Wrapper for calling testPutObject for both XL and FS. +// Wrapper for calling testPutObject for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPutObject(t *testing.T) { ExecObjectLayerTest(t, testPutObject) } @@ -519,7 +519,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { length := int64(len(content)) readerEOF := newTestReaderEOF(content) readerNoEOF := newTestReaderNoEOF(content) - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -552,14 +552,14 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testPutObjectInSubdir for both XL and FS. +// Wrapper for calling testPutObjectInSubdir for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(t *testing.T) { ExecObjectLayerTest(t, testPutObjectInSubdir) } // Tests validate PutObject with subdirectory prefix. func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -584,7 +584,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle } } -// Wrapper for calling testListBuckets for both XL and FS. +// Wrapper for calling testListBuckets for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListBuckets(t *testing.T) { ExecObjectLayerTest(t, testListBuckets) } @@ -601,7 +601,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add one and test exists. - err = obj.MakeBucketWithLocation(context.Background(), "bucket1", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -615,7 +615,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add two and test exists. - err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -629,7 +629,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add three and test exists + prefix. - err = obj.MakeBucketWithLocation(context.Background(), "bucket22", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket22", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -643,7 +643,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testListBucketsOrder for both XL and FS. +// Wrapper for calling testListBucketsOrder for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListBucketsOrder(t *testing.T) { ExecObjectLayerTest(t, testListBucketsOrder) } @@ -653,11 +653,11 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler // if implementation contains a map, order of map keys will vary. // this ensures they return in the same order each time. // add one and test exists. - err := obj.MakeBucketWithLocation(context.Background(), "bucket1", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } - err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -677,7 +677,7 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler } } -// Wrapper for calling testListObjectsTestsForNonExistantBucket for both XL and FS. +// Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListObjectsTestsForNonExistantBucket(t *testing.T) { ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket) } @@ -699,14 +699,14 @@ func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType stri } } -// Wrapper for calling testNonExistantObjectInBucket for both XL and FS. +// Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(t *testing.T) { ExecObjectLayerTest(t, testNonExistantObjectInBucket) } // Tests validate that GetObject fails on a non-existent bucket as expected. func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -726,7 +726,7 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestE } } -// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both XL and FS. +// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both Erasure and FS. func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T) { ExecObjectLayerTest(t, testGetDirectoryReturnsObjectNotFound) } @@ -734,7 +734,7 @@ func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T // Tests validate that GetObject on an existing directory fails as expected. func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) { bucketName := "bucket" - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -769,14 +769,14 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, } } -// Wrapper for calling testContentType for both XL and FS. +// Wrapper for calling testContentType for both Erasure and FS. func (s *ObjectLayerAPISuite) TestContentType(t *testing.T) { ExecObjectLayerTest(t, testContentType) } // Test content-type. func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } diff --git a/cmd/os-readdir_other.go b/cmd/os-readdir_other.go index febd16f48..a7fd75ee2 100644 --- a/cmd/os-readdir_other.go +++ b/cmd/os-readdir_other.go @@ -21,7 +21,7 @@ package cmd import ( "io" "os" - "strings" + "syscall" ) // Return all the entries at the directory dirPath. @@ -34,16 +34,7 @@ func readDir(dirPath string) (entries []string, err error) { func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { d, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return errFileNotFound - } - return err + return osErrToFileErr(err) } defer d.Close() @@ -55,7 +46,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e if err == io.EOF { break } - return err + return osErrToFileErr(err) } for _, fi := range fis { if err = filter(fi.Name(), fi.Mode()); err == errDoneForNow { @@ -71,16 +62,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e func readDirN(dirPath string, count int) (entries []string, err error) { d, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return nil, errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return nil, errFileNotFound - } - return nil, err + return nil, osErrToFileErr(err) } defer d.Close() @@ -99,7 +81,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { if err == io.EOF { break } - return nil, err + return nil, osErrToFileErr(err) } if count > 0 { if remaining <= len(fis) { @@ -125,3 +107,8 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } return entries, nil } + +func globalSync() { + // no-op not sure about plan9/solaris support for syscall support + syscall.Sync() +} diff --git a/cmd/os-readdir_test.go b/cmd/os-readdir_test.go index 2c6afb2b5..64ef1ea3b 100644 --- a/cmd/os-readdir_test.go +++ b/cmd/os-readdir_test.go @@ -68,7 +68,7 @@ type result struct { func mustSetupDir(t *testing.T) string { // Create unique test directory. - dir, err := ioutil.TempDir(globalTestTmpDir, "minio-posix-list-dir") + dir, err := ioutil.TempDir(globalTestTmpDir, "minio-list-dir") if err != nil { t.Fatalf("Unable to setup directory, %s", err) } diff --git a/cmd/os-readdir_unix.go b/cmd/os-readdir_unix.go index 09494f799..ee318ce3f 100644 --- a/cmd/os-readdir_unix.go +++ b/cmd/os-readdir_unix.go @@ -87,17 +87,11 @@ func readDir(dirPath string) (entries []string, err error) { // readDir applies the filter function on each entries at dirPath, doesn't recurse into // the directory itself. func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { - fd, err := syscall.Open(dirPath, 0, 0) + f, err := os.Open(dirPath) if err != nil { - if os.IsNotExist(err) || isSysErrNotDir(err) { - return errFileNotFound - } - if os.IsPermission(err) { - return errFileAccessDenied - } - return err + return osErrToFileErr(err) } - defer syscall.Close(fd) + defer f.Close() buf := make([]byte, blockSize) boff := 0 // starting read position in buf @@ -106,7 +100,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e for { if boff >= nbuf { boff = 0 - nbuf, err = syscall.ReadDirent(fd, buf) + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) if err != nil { if isSysErrNotDir(err) { return errFileNotFound @@ -140,17 +134,11 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e // Return count entries at the directory dirPath and all entries // if count is set to -1 func readDirN(dirPath string, count int) (entries []string, err error) { - fd, err := syscall.Open(dirPath, 0, 0) + f, err := os.Open(dirPath) if err != nil { - if os.IsNotExist(err) || isSysErrNotDir(err) { - return nil, errFileNotFound - } - if os.IsPermission(err) { - return nil, errFileAccessDenied - } - return nil, err + return nil, osErrToFileErr(err) } - defer syscall.Close(fd) + defer f.Close() bufp := direntPool.Get().(*[]byte) defer direntPool.Put(bufp) @@ -161,7 +149,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { for count != 0 { if boff >= nbuf { boff = 0 - nbuf, err = syscall.ReadDirent(fd, *bufp) + nbuf, err = syscall.ReadDirent(int(f.Fd()), *bufp) if err != nil { if isSysErrNotDir(err) { return nil, errFileNotFound @@ -209,3 +197,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } return } + +func globalSync() { + syscall.Sync() +} diff --git a/cmd/os-readdir_windows.go b/cmd/os-readdir_windows.go index 377bf38f6..191ad6a24 100644 --- a/cmd/os-readdir_windows.go +++ b/cmd/os-readdir_windows.go @@ -20,7 +20,6 @@ package cmd import ( "os" - "strings" "syscall" ) @@ -32,43 +31,24 @@ func readDir(dirPath string) (entries []string, err error) { // readDir applies the filter function on each entries at dirPath, doesn't recurse into // the directory itself. func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { - d, err := os.Open(dirPath) + f, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return errFileNotFound - } - return err - } - defer d.Close() - - st, err := d.Stat() - if err != nil { - return err - } - // Not a directory return error. - if !st.IsDir() { - return errFileAccessDenied + return osErrToFileErr(err) } + defer f.Close() data := &syscall.Win32finddata{} for { - e := syscall.FindNextFile(syscall.Handle(d.Fd()), data) + e := syscall.FindNextFile(syscall.Handle(f.Fd()), data) if e != nil { if e == syscall.ERROR_NO_MORE_FILES { break } else { - err = &os.PathError{ + return osErrToFileErr(&os.PathError{ Op: "FindNextFile", Path: dirPath, Err: e, - } - return err + }) } } name := syscall.UTF16ToString(data.FileName[0:]) @@ -82,7 +62,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e if data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { typ = os.ModeDir } - if err = filter(name, typ); err == errDoneForNow { + if e = filter(name, typ); e == errDoneForNow { // filtering requested to return by caller. return nil } @@ -93,43 +73,25 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e // Return N entries at the directory dirPath. If count is -1, return all entries func readDirN(dirPath string, count int) (entries []string, err error) { - d, err := os.Open(dirPath) + f, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return nil, errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return nil, errFileNotFound - } - return nil, err - } - defer d.Close() - - st, err := d.Stat() - if err != nil { - return nil, err - } - // Not a directory return error. - if !st.IsDir() { - return nil, errFileNotFound + return nil, osErrToFileErr(err) } + defer f.Close() data := &syscall.Win32finddata{} for count != 0 { - err = syscall.FindNextFile(syscall.Handle(d.Fd()), data) - if err != nil { - if err == syscall.ERROR_NO_MORE_FILES { + e := syscall.FindNextFile(syscall.Handle(f.Fd()), data) + if e != nil { + if e == syscall.ERROR_NO_MORE_FILES { break } else { - return nil, &os.PathError{ + return nil, osErrToFileErr(&os.PathError{ Op: "FindNextFile", Path: dirPath, - Err: err, - } + Err: e, + }) } } @@ -147,5 +109,10 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } count-- } + return entries, nil } + +func globalSync() { + // no-op on windows +} diff --git a/cmd/os-reliable_test.go b/cmd/os-reliable_test.go index ba1b1a256..c2cee0d27 100644 --- a/cmd/os-reliable_test.go +++ b/cmd/os-reliable_test.go @@ -23,10 +23,10 @@ import ( // Tests - mkdirAll() func TestOSMkdirAll(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -45,10 +45,10 @@ func TestOSMkdirAll(t *testing.T) { // Tests - renameAll() func TestOSRenameAll(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 4799e5ca0..8bd1807fc 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -667,8 +667,8 @@ func (s *peerRESTServer) CycleServerBloomFilterHandler(w http.ResponseWriter, r s.writeErrorResponse(w, err) return } + logger.LogIf(ctx, gob.NewEncoder(w).Encode(bf)) - w.(http.Flusher).Flush() } // PutBucketNotificationHandler - Set bucket policy. @@ -702,7 +702,7 @@ func (s *peerRESTServer) PutBucketNotificationHandler(w http.ResponseWriter, r * } // Return disk IDs of all the local disks. -func getLocalDiskIDs(z *xlZones) []string { +func getLocalDiskIDs(z *erasureZones) []string { var ids []string for zoneIdx := range z.zones { @@ -746,7 +746,7 @@ func (s *peerRESTServer) GetLocalDiskIDs(w http.ResponseWriter, r *http.Request) return } - z, ok := objLayer.(*xlZones) + z, ok := objLayer.(*erasureZones) if !ok { s.writeErrorResponse(w, errServerNotInitialized) return diff --git a/cmd/posix-diskid-check.go b/cmd/posix-diskid-check.go deleted file mode 100644 index abe5bd5f1..000000000 --- a/cmd/posix-diskid-check.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "io" -) - -// Detects change in underlying disk. -type posixDiskIDCheck struct { - storage *posix - diskID string -} - -func (p *posixDiskIDCheck) String() string { - return p.storage.String() -} - -func (p *posixDiskIDCheck) IsOnline() bool { - storedDiskID, err := p.storage.GetDiskID() - if err != nil { - return false - } - return storedDiskID == p.diskID -} - -func (p *posixDiskIDCheck) IsLocal() bool { - return p.storage.IsLocal() -} - -func (p *posixDiskIDCheck) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { - return p.storage.CrawlAndGetDataUsage(ctx, cache) -} - -func (p *posixDiskIDCheck) Hostname() string { - return p.storage.Hostname() -} - -func (p *posixDiskIDCheck) Close() error { - return p.storage.Close() -} - -func (p *posixDiskIDCheck) GetDiskID() (string, error) { - return p.storage.GetDiskID() -} - -func (p *posixDiskIDCheck) SetDiskID(id string) { - p.diskID = id -} - -func (p *posixDiskIDCheck) isDiskStale() bool { - if p.diskID == "" { - // For empty disk-id we allow the call as the server might be coming up and trying to read format.json - // or create format.json - return false - } - storedDiskID, err := p.storage.GetDiskID() - if err == nil && p.diskID == storedDiskID { - return false - } - return true -} - -func (p *posixDiskIDCheck) DiskInfo() (info DiskInfo, err error) { - if p.isDiskStale() { - return info, errDiskNotFound - } - return p.storage.DiskInfo() -} - -func (p *posixDiskIDCheck) MakeVolBulk(volumes ...string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.MakeVolBulk(volumes...) -} - -func (p *posixDiskIDCheck) MakeVol(volume string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.MakeVol(volume) -} - -func (p *posixDiskIDCheck) ListVols() ([]VolInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ListVols() -} - -func (p *posixDiskIDCheck) StatVol(volume string) (vol VolInfo, err error) { - if p.isDiskStale() { - return vol, errDiskNotFound - } - return p.storage.StatVol(volume) -} - -func (p *posixDiskIDCheck) DeleteVol(volume string, forceDelete bool) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.DeleteVol(volume, forceDelete) -} - -func (p *posixDiskIDCheck) Walk(volume, dirPath string, marker string, recursive bool, leafFile string, readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.Walk(volume, dirPath, marker, recursive, leafFile, readMetadataFn, endWalkCh) -} - -func (p *posixDiskIDCheck) WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.WalkSplunk(volume, dirPath, marker, endWalkCh) -} - -func (p *posixDiskIDCheck) ListDir(volume, dirPath string, count int, leafFile string) ([]string, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ListDir(volume, dirPath, count, leafFile) -} - -func (p *posixDiskIDCheck) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { - if p.isDiskStale() { - return 0, errDiskNotFound - } - return p.storage.ReadFile(volume, path, offset, buf, verifier) -} - -func (p *posixDiskIDCheck) AppendFile(volume string, path string, buf []byte) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.AppendFile(volume, path, buf) -} - -func (p *posixDiskIDCheck) CreateFile(volume, path string, size int64, reader io.Reader) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.CreateFile(volume, path, size, reader) -} - -func (p *posixDiskIDCheck) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ReadFileStream(volume, path, offset, length) -} - -func (p *posixDiskIDCheck) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) -} - -func (p *posixDiskIDCheck) StatFile(volume string, path string) (file FileInfo, err error) { - if p.isDiskStale() { - return file, errDiskNotFound - } - return p.storage.StatFile(volume, path) -} - -func (p *posixDiskIDCheck) DeleteFile(volume string, path string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.DeleteFile(volume, path) -} - -func (p *posixDiskIDCheck) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.DeleteFileBulk(volume, paths) -} - -func (p *posixDiskIDCheck) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.DeletePrefixes(volume, paths) -} - -func (p *posixDiskIDCheck) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.VerifyFile(volume, path, size, algo, sum, shardSize) -} - -func (p *posixDiskIDCheck) WriteAll(volume string, path string, reader io.Reader) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.WriteAll(volume, path, reader) -} - -func (p *posixDiskIDCheck) ReadAll(volume string, path string) (buf []byte, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ReadAll(volume, path) -} diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 005b22a0a..08baaffb4 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -113,7 +113,7 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) [] return []byte(retStr) } -// Wrapper for calling TestPostPolicyBucketHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup. func TestPostPolicyBucketHandler(t *testing.T) { ExecObjectLayerTest(t, testPostPolicyBucketHandler) } @@ -128,7 +128,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr bucketName := getRandomBucketName() var opts ObjectOptions - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"}) credentials := globalActiveCred @@ -140,7 +140,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr // objectNames[0]. // uploadIds [0]. // Create bucket before initiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -414,7 +414,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr } -// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both XL multiple disks and single node setup. +// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup. func TestPostPolicyBucketHandlerRedirect(t *testing.T) { ExecObjectLayerTest(t, testPostPolicyBucketHandlerRedirect) } @@ -442,7 +442,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t t.Fatal(err) } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"}) credentials := globalActiveCred @@ -450,7 +450,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t curTime := UTCNow() curTimePlus5Min := curTime.Add(time.Minute * 5) - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index 9c813c96e..d29c58687 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -60,7 +60,7 @@ var printEndpointError = func() func(Endpoint, error) { }() // Migrates backend format of local disks. -func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { +func formatErasureMigrateLocalEndpoints(endpoints Endpoints) error { g := errgroup.WithNErrs(len(endpoints)) for index, endpoint := range endpoints { if !endpoint.IsLocal { @@ -76,7 +76,7 @@ func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { } return fmt.Errorf("unable to access (%s) %w", formatPath, err) } - return formatXLMigrate(epPath) + return formatErasureMigrate(epPath) }, index) } for _, err := range g.Wait() { @@ -88,7 +88,7 @@ func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { } // Cleans up tmp directory of local disks. -func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error { +func formatErasureCleanupTmpLocalEndpoints(endpoints Endpoints) error { g := errgroup.WithNErrs(len(endpoints)) for index, endpoint := range endpoints { if !endpoint.IsLocal { @@ -157,7 +157,7 @@ func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error { // the disk UUID association. Below error message is returned when // we see this situation in format.json, for more info refer // https://github.com/minio/minio/issues/5667 -var errXLV3ThisEmpty = fmt.Errorf("XL format version 3 has This field empty") +var errErasureV3ThisEmpty = fmt.Errorf("Erasure format version 3 has This field empty") // IsServerResolvable - checks if the endpoint is resolvable // by sending a naked HTTP request with liveness checks. @@ -199,10 +199,10 @@ func IsServerResolvable(endpoint Endpoint) error { return nil } -// connect to list of endpoints and load all XL disk formats, validate the formats are correct +// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct // and are in quorum, if no formats are found attempt to initialize all of them for the first // time. additionally make sure to close all the disks used in this attempt. -func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) (storageDisks []StorageAPI, format *formatXLV3, err error) { +func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { // Initialize all storage disks storageDisks, errs := initStorageDisksWithErrors(endpoints) @@ -224,7 +224,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, } // Attempt to load all `format.json` from all disks. - formatConfigs, sErrs := loadFormatXLAll(storageDisks, false) + formatConfigs, sErrs := loadFormatErasureAll(storageDisks, false) // Check if we have for i, sErr := range sErrs { if _, ok := formatCriticalErrors[sErr]; ok { @@ -241,19 +241,19 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, // Pre-emptively check if one of the formatted disks // is invalid. This function returns success for the // most part unless one of the formats is not consistent - // with expected XL format. For example if a user is - // trying to pool FS backend into an XL set. - if err = checkFormatXLValues(formatConfigs, drivesPerSet); err != nil { + // with expected Erasure format. For example if a user is + // trying to pool FS backend into an Erasure set. + if err = checkFormatErasureValues(formatConfigs, drivesPerSet); err != nil { return nil, nil, err } // All disks report unformatted we should initialized everyone. - if shouldInitXLDisks(sErrs) && firstDisk { + if shouldInitErasureDisks(sErrs) && firstDisk { logger.Info("Formatting %s zone, %v set(s), %v drives per set.", humanize.Ordinal(zoneCount), setCount, drivesPerSet) // Initialize erasure code format on disks - format, err = initFormatXL(GlobalContext, storageDisks, setCount, drivesPerSet, deploymentID) + format, err = initFormatErasure(GlobalContext, storageDisks, setCount, drivesPerSet, deploymentID) if err != nil { return nil, nil, err } @@ -281,16 +281,16 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, // This migration failed to capture '.This' field properly which indicates // the disk UUID association. Below function is called to handle and fix // this regression, for more info refer https://github.com/minio/minio/issues/5667 - if err = fixFormatXLV3(storageDisks, endpoints, formatConfigs); err != nil { + if err = fixFormatErasureV3(storageDisks, endpoints, formatConfigs); err != nil { return nil, nil, err } // If any of the .This field is still empty, we return error. - if formatXLV3ThisEmpty(formatConfigs) { - return nil, nil, errXLV3ThisEmpty + if formatErasureV3ThisEmpty(formatConfigs) { + return nil, nil, errErasureV3ThisEmpty } - format, err = getFormatXLInQuorum(formatConfigs) + format, err = getFormatErasureInQuorum(formatConfigs) if err != nil { return nil, nil, err } @@ -300,35 +300,35 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, if !firstDisk { return nil, nil, errNotFirstDisk } - if err = formatXLFixDeploymentID(endpoints, storageDisks, format); err != nil { + if err = formatErasureFixDeploymentID(endpoints, storageDisks, format); err != nil { return nil, nil, err } } globalDeploymentID = format.ID - if err = formatXLFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { + if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { return nil, nil, err } // The will always recreate some directories inside .minio.sys of // the local disk such as tmp, multipart and background-ops - initXLMetaVolumesInLocalDisks(storageDisks, formatConfigs) + initErasureMetaVolumesInLocalDisks(storageDisks, formatConfigs) return storageDisks, format, nil } // Format disks before initialization of object layer. -func waitForFormatXL(firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) ([]StorageAPI, *formatXLV3, error) { +func waitForFormatErasure(firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) { if len(endpoints) == 0 || setCount == 0 || drivesPerSet == 0 { return nil, nil, errInvalidArgument } - if err := formatXLMigrateLocalEndpoints(endpoints); err != nil { + if err := formatErasureMigrateLocalEndpoints(endpoints); err != nil { return nil, nil, err } - if err := formatXLCleanupTmpLocalEndpoints(endpoints); err != nil { + if err := formatErasureCleanupTmpLocalEndpoints(endpoints); err != nil { return nil, nil, err } @@ -358,11 +358,11 @@ func waitForFormatXL(firstDisk bool, endpoints Endpoints, zoneCount, setCount, d // Fresh setup, wait for other servers to come up. logger.Info("Waiting for all other servers to be online to format the disks.") continue - case errXLReadQuorum: + case errErasureReadQuorum: // no quorum available continue to wait for minimum number of servers. logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n", len(endpoints)/2, getElapsedTime()) continue - case errXLV3ThisEmpty: + case errErasureV3ThisEmpty: // need to wait for this error to be healed, so continue. continue default: diff --git a/cmd/routers.go b/cmd/routers.go index e78e72628..85a137cdc 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -22,8 +22,8 @@ import ( "github.com/gorilla/mux" ) -// Composed function registering routers for only distributed XL setup. -func registerDistXLRouters(router *mux.Router, endpointZones EndpointZones) { +// Composed function registering routers for only distributed Erasure setup. +func registerDistErasureRouters(router *mux.Router, endpointZones EndpointZones) { // Register storage REST router only if its a distributed setup. registerStorageRESTHandlers(router, endpointZones) @@ -87,8 +87,8 @@ func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) { router := mux.NewRouter().SkipClean(true).UseEncodedPath() // Initialize distributed NS lock. - if globalIsDistXL { - registerDistXLRouters(router, endpointZones) + if globalIsDistErasure { + registerDistErasureRouters(router, endpointZones) } // Add STS router always. diff --git a/cmd/server-main.go b/cmd/server-main.go index f02de2528..21eb9af8a 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -116,9 +116,9 @@ func serverHandleCmdArgs(ctx *cli.Context) { globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr) endpoints := strings.Fields(env.Get(config.EnvEndpoints, "")) if len(endpoints) > 0 { - globalEndpoints, globalXLSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, endpoints...) + globalEndpoints, globalErasureSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, endpoints...) } else { - globalEndpoints, globalXLSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...) + globalEndpoints, globalErasureSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...) } logger.FatalIf(err, "Invalid command line arguments") @@ -128,10 +128,10 @@ func serverHandleCmdArgs(ctx *cli.Context) { // To avoid this error situation we check for port availability. logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server") - globalIsXL = (setupType == XLSetupType) - globalIsDistXL = (setupType == DistXLSetupType) - if globalIsDistXL { - globalIsXL = true + globalIsErasure = (setupType == ErasureSetupType) + globalIsDistErasure = (setupType == DistErasureSetupType) + if globalIsDistErasure { + globalIsErasure = true } } @@ -167,6 +167,9 @@ func newAllSubsystems() { // Create new bucket quota subsystem globalBucketQuotaSys = NewBucketQuotaSys() + + // Create new bucket versioning subsystem + globalBucketVersioningSys = NewBucketVersioningSys() } func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { @@ -225,7 +228,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { } // These messages only meant primarily for distributed setup, so only log during distributed setup. - if globalIsDistXL { + if globalIsDistErasure { logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired") } @@ -237,7 +240,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { // if all sub-systems initialized successfully return right away if err = initAllSubsystems(retryCtx, newObject); err == nil { // All successful return. - if globalIsDistXL { + if globalIsDistErasure { // These messages only meant primarily for distributed setup, so only log during distributed setup. logger.Info("All MinIO sub-systems initialized successfully") } @@ -278,7 +281,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) { // you want to add extra context to your error. This // ensures top level retry works accordingly. var buckets []BucketInfo - if globalIsDistXL || globalIsXL { + if globalIsDistErasure || globalIsErasure { // List buckets to heal, and be re-used for loading configs. buckets, err = newObject.ListBucketsHeal(ctx) if err != nil { @@ -289,7 +292,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) { wquorum := &InsufficientWriteQuorum{} rquorum := &InsufficientReadQuorum{} for _, bucket := range buckets { - if err = newObject.MakeBucketWithLocation(ctx, bucket.Name, "", false); err != nil { + if err = newObject.MakeBucketWithLocation(ctx, bucket.Name, BucketOptions{}); err != nil { if errors.As(err, &wquorum) || errors.As(err, &rquorum) { // Return the error upwards for the caller to retry. return fmt.Errorf("Unable to heal bucket: %w", err) @@ -346,7 +349,7 @@ func startBackgroundOps(ctx context.Context, objAPI ObjectLayer) { // No unlock for "leader" lock. } - if globalIsXL { + if globalIsErasure { initGlobalHeal(ctx, objAPI) } @@ -396,7 +399,7 @@ func serverMain(ctx *cli.Context) { }() // Is distributed setup, error out if no certificates are found for HTTPS endpoints. - if globalIsDistXL { + if globalIsDistErasure { if globalEndpoints.HTTPS() && !globalIsSSL { logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server") } @@ -410,7 +413,7 @@ func serverMain(ctx *cli.Context) { checkUpdate(getMinioMode()) } - if !globalActiveCred.IsValid() && globalIsDistXL { + if !globalActiveCred.IsValid() && globalIsDistErasure { logger.Fatal(config.ErrEnvCredentialsMissingDistributed(nil), "Unable to initialize the server in distributed mode") } @@ -418,7 +421,7 @@ func serverMain(ctx *cli.Context) { // Set system resources to maximum. setMaxResources() - if globalIsXL { + if globalIsErasure { // Init global heal state globalAllHealState = initHealState() globalBackgroundHealState = initHealState() @@ -467,7 +470,7 @@ func serverMain(ctx *cli.Context) { globalHTTPServer = httpServer globalObjLayerMutex.Unlock() - if globalIsDistXL && globalEndpoints.FirstLocal() { + if globalIsDistErasure && globalEndpoints.FirstLocal() { for { // Additionally in distributed setup, validate the setup and configuration. err := verifyServerSystemConfig(globalEndpoints) @@ -502,7 +505,7 @@ func serverMain(ctx *cli.Context) { newAllSubsystems() // Enable healing to heal drives if possible - if globalIsXL { + if globalIsErasure { initBackgroundHealing(GlobalContext, newObject) initLocalDisksAutoHeal(GlobalContext, newObject) } @@ -549,5 +552,5 @@ func newObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) } - return newXLZones(ctx, endpointZones) + return newErasureZones(ctx, endpointZones) } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index e05bbdcde..07340bc87 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -43,7 +43,7 @@ func TestNewObjectLayer(t *testing.T) { t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) } - // Tests for XL object layer initialization. + // Tests for Erasure object layer initialization. // Create temporary backend for the test server. nDisks = 16 @@ -58,7 +58,7 @@ func TestNewObjectLayer(t *testing.T) { t.Fatal("Unexpected object layer initialization error", err) } - _, ok = obj.(*xlZones) + _, ok = obj.(*erasureZones) if !ok { t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) } diff --git a/cmd/server_test.go b/cmd/server_test.go index df59bf654..8cf86e7e8 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -36,7 +36,7 @@ import ( "github.com/minio/minio/pkg/bucket/policy" ) -// API suite container common to both FS and XL. +// API suite container common to both FS and Erasure. type TestSuiteCommon struct { serverType string testServer TestServer @@ -124,10 +124,10 @@ func TestServerSuite(t *testing.T) { {serverType: "FS", signer: signerV2}, // Init and run test on FS backend, with tls enabled. {serverType: "FS", signer: signerV4, secure: true}, - // Init and run test on XL backend. - {serverType: "XL", signer: signerV4}, - // Init and run test on XLSet backend. - {serverType: "XLSet", signer: signerV4}, + // Init and run test on Erasure backend. + {serverType: "Erasure", signer: signerV4}, + // Init and run test on ErasureSet backend. + {serverType: "ErasureSet", signer: signerV4}, } for i, testCase := range testCases { t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) { @@ -516,7 +516,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { // assert the status of http response. c.Assert(response.StatusCode, http.StatusOK) // Append all objects. - delObjReq.Objects = append(delObjReq.Objects, ObjectIdentifier{ + delObjReq.Objects = append(delObjReq.Objects, ObjectToDelete{ ObjectName: objName, }) } @@ -539,7 +539,10 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) for i := 0; i < 10; i++ { // All the objects should be under deleted list (including non-existent object) - c.Assert(deleteResp.DeletedObjects[i], delObjReq.Objects[i]) + c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ + ObjectName: delObjReq.Objects[i].ObjectName, + VersionID: delObjReq.Objects[i].VersionID, + }) } c.Assert(len(deleteResp.Errors), 0) @@ -559,7 +562,10 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) c.Assert(len(deleteResp.DeletedObjects), len(delObjReq.Objects)) for i := 0; i < 10; i++ { - c.Assert(deleteResp.DeletedObjects[i], delObjReq.Objects[i]) + c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ + ObjectName: delObjReq.Objects[i].ObjectName, + VersionID: delObjReq.Objects[i].VersionID, + }) } c.Assert(len(deleteResp.Errors), 0) } diff --git a/cmd/setup-type.go b/cmd/setup-type.go index 516e58213..df91868dd 100644 --- a/cmd/setup-type.go +++ b/cmd/setup-type.go @@ -26,11 +26,11 @@ const ( // FSSetupType - FS setup type enum. FSSetupType - // XLSetupType - XL setup type enum. - XLSetupType + // ErasureSetupType - Erasure setup type enum. + ErasureSetupType - // DistXLSetupType - Distributed XL setup type enum. - DistXLSetupType + // DistErasureSetupType - Distributed Erasure setup type enum. + DistErasureSetupType // GatewaySetupType - gateway setup type enum. GatewaySetupType @@ -40,10 +40,10 @@ func (setupType SetupType) String() string { switch setupType { case FSSetupType: return globalMinioModeFS - case XLSetupType: - return globalMinioModeXL - case DistXLSetupType: - return globalMinioModeDistXL + case ErasureSetupType: + return globalMinioModeErasure + case DistErasureSetupType: + return globalMinioModeDistErasure case GatewaySetupType: return globalMinioModeGatewayPrefix } diff --git a/cmd/storage-datatypes.go b/cmd/storage-datatypes.go index 12f3ed194..775ffecb7 100644 --- a/cmd/storage-datatypes.go +++ b/cmd/storage-datatypes.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,6 @@ package cmd import ( "os" "time" - - xhttp "github.com/minio/minio/cmd/http" ) // VolInfo - represents volume stat information. @@ -39,6 +37,29 @@ type FilesInfo struct { IsTruncated bool } +// FilesInfoVersions represents a list of file versions, +// additionally indicates if the list is last. +type FilesInfoVersions struct { + FilesVersions []FileInfoVersions + IsTruncated bool +} + +// FileInfoVersions represent a list of versions for a given file. +type FileInfoVersions struct { + // Name of the volume. + Volume string + + // Name of the file. + Name string + + // Represents the latest mod time of the + // latest version. + LatestModTime time.Time + + Versions []FileInfo + Deleted []FileInfo +} + // FileInfo - represents file stat information. type FileInfo struct { // Name of the volume. @@ -47,7 +68,21 @@ type FileInfo struct { // Name of the file. Name string - // Date and time when the file was last modified. + // Version of the file. + VersionID string + + // Indicates if the version is the latest + IsLatest bool + + // Deleted is set when this FileInfo represents + // a deleted marker for a versioned bucket. + Deleted bool + + // DataDir of the file + DataDir string + + // Date and time when the file was last modified, if Deleted + // is 'true' this value represents when while was deleted. ModTime time.Time // Total file size. @@ -62,49 +97,18 @@ type FileInfo struct { // All the parts per object. Parts []ObjectPartInfo - Quorum int + // Erasure info for all objects. + Erasure ErasureInfo } -// ToObjectInfo converts FileInfo into objectInfo. -func (entry FileInfo) ToObjectInfo() ObjectInfo { - var objInfo ObjectInfo - if HasSuffix(entry.Name, SlashSeparator) { - objInfo = ObjectInfo{ - Bucket: entry.Volume, - Name: entry.Name, - IsDir: true, - } - } else { - objInfo = ObjectInfo{ - IsDir: false, - Bucket: entry.Volume, - Name: entry.Name, - ModTime: entry.ModTime, - Size: entry.Size, - ContentType: entry.Metadata["content-type"], - ContentEncoding: entry.Metadata["content-encoding"], - } - - // Extract object tagging information - objInfo.UserTags = entry.Metadata[xhttp.AmzObjectTagging] - - // Extract etag from metadata. - objInfo.ETag = extractETag(entry.Metadata) - - // All the parts per object. - objInfo.Parts = entry.Parts - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - objInfo.UserDefined = cleanMetadata(entry.Metadata) - - // Update storage class - if sc, ok := entry.Metadata[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } +// newFileInfo - initializes new FileInfo, allocates a fresh erasure info. +func newFileInfo(object string, dataBlocks, parityBlocks int) (fi FileInfo) { + fi.Erasure = ErasureInfo{ + Algorithm: erasureAlgorithm, + DataBlocks: dataBlocks, + ParityBlocks: parityBlocks, + BlockSize: blockSizeV1, + Distribution: hashOrder(object, dataBlocks+parityBlocks), } - return objInfo + return fi } diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index 6a674e1cc..bcd38f4b0 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ package cmd +import "os" + // errUnexpected - unexpected error, requires manual intervention. var errUnexpected = StorageErr("Unexpected error, please report this issue at https://github.com/minio/minio/issues") @@ -31,6 +33,9 @@ var errUnsupportedDisk = StorageErr("disk does not support O_DIRECT") // errDiskFull - cannot create volume or files when disk is full. var errDiskFull = StorageErr("disk path full") +// errDiskNotDir - cannot use storage disk if its not a directory +var errDiskNotDir = StorageErr("disk is not directory or mountpoint") + // errDiskNotFound - cannot find the underlying configured disk anymore. var errDiskNotFound = StorageErr("disk not found") @@ -46,6 +51,9 @@ var errDiskAccessDenied = StorageErr("disk access denied") // errFileNotFound - cannot find the file. var errFileNotFound = StorageErr("file not found") +// errFileNotFound - cannot find requested file version. +var errFileVersionNotFound = StorageErr("file version not found") + // errTooManyOpenFiles - too many open files. var errTooManyOpenFiles = StorageErr("too many open files") @@ -92,7 +100,7 @@ var errLessData = StorageErr("less data available than what was requested") // errMoreData = returned when more data was sent by the caller than what it was supposed to. var errMoreData = StorageErr("more data was sent than what was advertised") -// StorageErr represents error generated by posix call. +// StorageErr represents error generated by xlStorage call. type StorageErr string func (h StorageErr) Error() string { @@ -107,3 +115,32 @@ var baseErrs = []error{ } var baseIgnoredErrs = baseErrs + +// Is a one place function which converts all os.PathError +// into a more FS object layer friendly form, converts +// known errors into their typed form for top level +// interpretation. +func osErrToFileErr(err error) error { + if err == nil { + return nil + } + if os.IsNotExist(err) { + return errFileNotFound + } + if os.IsPermission(err) { + return errFileAccessDenied + } + if isSysErrNotDir(err) { + return errFileNotFound + } + if isSysErrPathNotFound(err) { + return errFileNotFound + } + if isSysErrTooManyFiles(err) { + return errTooManyOpenFiles + } + if isSysErrHandleInvalid(err) { + return errFileNotFound + } + return err +} diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go index 7cfe37f8d..104256cd2 100644 --- a/cmd/storage-interface.go +++ b/cmd/storage-interface.go @@ -44,24 +44,31 @@ type StorageAPI interface { StatVol(volume string) (vol VolInfo, err error) DeleteVol(volume string, forceDelete bool) (err error) + // WalkVersions in sorted order directly on disk. + WalkVersions(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) // Walk in sorted order directly on disk. - Walk(volume, dirPath string, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) + Walk(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) // Walk in sorted order directly on disk. WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) + // Metadata operations + DeleteVersion(volume, path string, fi FileInfo) error + DeleteVersions(volume string, versions []FileInfo) []error + WriteMetadata(volume, path string, fi FileInfo) error + ReadVersion(volume, path, versionID string) (FileInfo, error) + RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error + // File operations. - ListDir(volume, dirPath string, count int, leafFile string) ([]string, error) + ListDir(volume, dirPath string, count int) ([]string, error) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) AppendFile(volume string, path string, buf []byte) (err error) CreateFile(volume, path string, size int64, reader io.Reader) error ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error - StatFile(volume string, path string) (file FileInfo, err error) + CheckParts(volume string, path string, fi FileInfo) error + CheckFile(volume string, path string) (err error) DeleteFile(volume string, path string) (err error) - DeleteFileBulk(volume string, paths []string) (errs []error, err error) - DeletePrefixes(volume string, paths []string) (errs []error, err error) - VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error + VerifyFile(volume, path string, fi FileInfo) error // Write all data, syncs the data to disk. WriteAll(volume string, path string, reader io.Reader) (err error) diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index d21934f0c..c683d35bf 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -270,6 +270,33 @@ func (client *storageRESTClient) CreateFile(volume, path string, length int64, r return err } +func (client *storageRESTClient) WriteMetadata(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodWriteMetadata, values, &reader, -1) + defer http.DrainBody(respBody) + return err +} + +func (client *storageRESTClient) DeleteVersion(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + values.Set(storageRESTVersionID, fi.VersionID) + values.Set(storageRESTDeleteMarker, strconv.FormatBool(fi.Deleted)) + + respBody, err := client.call(storageRESTMethodDeleteVersion, values, nil, -1) + defer http.DrainBody(respBody) + return err +} + // WriteAll - write all data to a file. func (client *storageRESTClient) WriteAll(volume, path string, reader io.Reader) error { values := make(url.Values) @@ -280,18 +307,60 @@ func (client *storageRESTClient) WriteAll(volume, path string, reader io.Reader) return err } -// StatFile - stat a file. -func (client *storageRESTClient) StatFile(volume, path string) (info FileInfo, err error) { +// CheckFile - stat a file metadata. +func (client *storageRESTClient) CheckFile(volume, path string) error { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - respBody, err := client.call(storageRESTMethodStatFile, values, nil, -1) + respBody, err := client.call(storageRESTMethodCheckFile, values, nil, -1) + defer http.DrainBody(respBody) + return err +} + +// CheckParts - stat all file parts. +func (client *storageRESTClient) CheckParts(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodWriteMetadata, values, &reader, -1) + defer http.DrainBody(respBody) + return err +} + +// RenameData - rename source path to destination path atomically, metadata and data file. +func (client *storageRESTClient) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) { + values := make(url.Values) + values.Set(storageRESTSrcVolume, srcVolume) + values.Set(storageRESTSrcPath, srcPath) + values.Set(storageRESTDataDir, dataDir) + values.Set(storageRESTDstVolume, dstVolume) + values.Set(storageRESTDstPath, dstPath) + respBody, err := client.call(storageRESTMethodRenameData, values, nil, -1) + defer http.DrainBody(respBody) + + return err +} + +func (client *storageRESTClient) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + values.Set(storageRESTVersionID, versionID) + + respBody, err := client.call(storageRESTMethodReadVersion, values, nil, -1) if err != nil { - return info, err + return fi, err } defer http.DrainBody(respBody) - err = gob.NewDecoder(respBody).Decode(&info) - return info, err + + err = gob.NewDecoder(respBody).Decode(&fi) + return fi, err } // ReadAll - reads all contents of a file. @@ -378,14 +447,47 @@ func (client *storageRESTClient) WalkSplunk(volume, dirPath, marker string, endW return ch, nil } -func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { +func (client *storageRESTClient) WalkVersions(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTDirPath, dirPath) + values.Set(storageRESTMarkerPath, marker) + values.Set(storageRESTRecursive, strconv.FormatBool(recursive)) + respBody, err := client.call(storageRESTMethodWalk, values, nil, -1) + if err != nil { + return nil, err + } + + ch := make(chan FileInfoVersions) + go func() { + defer close(ch) + defer http.DrainBody(respBody) + + decoder := gob.NewDecoder(respBody) + for { + var fi FileInfoVersions + if gerr := decoder.Decode(&fi); gerr != nil { + // Upon error return + return + } + select { + case ch <- fi: + case <-endWalkCh: + return + } + + } + }() + + return ch, nil +} + +func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTMarkerPath, marker) values.Set(storageRESTRecursive, strconv.FormatBool(recursive)) - values.Set(storageRESTLeafFile, leafFile) respBody, err := client.call(storageRESTMethodWalk, values, nil, -1) if err != nil { return nil, err @@ -416,12 +518,11 @@ func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive } // ListDir - lists a directory. -func (client *storageRESTClient) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) { +func (client *storageRESTClient) ListDir(volume, dirPath string, count int) (entries []string, err error) { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTCount, strconv.Itoa(count)) - values.Set(storageRESTLeafFile, leafFile) respBody, err := client.call(storageRESTMethodListDir, values, nil, -1) if err != nil { return nil, err @@ -441,78 +542,54 @@ func (client *storageRESTClient) DeleteFile(volume, path string) error { return err } -// DeleteFileBulk - deletes files in bulk. -func (client *storageRESTClient) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { - if len(paths) == 0 { - return errs, err +// DeleteVersions - deletes list of specified versions if present +func (client *storageRESTClient) DeleteVersions(volume string, versions []FileInfo) (errs []error) { + if len(versions) == 0 { + return errs } + values := make(url.Values) values.Set(storageRESTVolume, volume) + values.Set(storageRESTTotalVersions, strconv.Itoa(len(versions))) var buffer bytes.Buffer - for _, path := range paths { - buffer.WriteString(path) - buffer.WriteString("\n") + encoder := gob.NewEncoder(&buffer) + for _, version := range versions { + encoder.Encode(&version) } - respBody, err := client.call(storageRESTMethodDeleteFileBulk, values, &buffer, -1) + errs = make([]error, len(versions)) + + respBody, err := client.call(storageRESTMethodDeleteVersions, values, &buffer, -1) defer http.DrainBody(respBody) if err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } reader, err := waitForHTTPResponse(respBody) if err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } - dErrResp := &DeleteFileBulkErrsResp{} + dErrResp := &DeleteVersionsErrsResp{} if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } - for _, dErr := range dErrResp.Errs { - errs = append(errs, toStorageErr(dErr)) + for i, dErr := range dErrResp.Errs { + errs[i] = toStorageErr(dErr) } - return errs, nil -} - -// DeletePrefixes - deletes prefixes in bulk. -func (client *storageRESTClient) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - if len(paths) == 0 { - return errs, err - } - values := make(url.Values) - values.Set(storageRESTVolume, volume) - - var buffer bytes.Buffer - for _, path := range paths { - buffer.WriteString(path) - buffer.WriteString("\n") - } - - respBody, err := client.call(storageRESTMethodDeletePrefixes, values, &buffer, -1) - defer http.DrainBody(respBody) - if err != nil { - return nil, err - } - - reader, err := waitForHTTPResponse(respBody) - if err != nil { - return nil, err - } - - dErrResp := &DeletePrefixesErrsResp{} - if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil { - return nil, err - } - - for _, dErr := range dErrResp.Errs { - errs = append(errs, toStorageErr(dErr)) - } - - return errs, nil + return errs } // RenameFile - renames a file. @@ -527,28 +604,32 @@ func (client *storageRESTClient) RenameFile(srcVolume, srcPath, dstVolume, dstPa return err } -func (client *storageRESTClient) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { +func (client *storageRESTClient) VerifyFile(volume, path string, fi FileInfo) error { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - values.Set(storageRESTBitrotAlgo, algo.String()) - values.Set(storageRESTLength, strconv.FormatInt(size, 10)) - values.Set(storageRESTShardSize, strconv.Itoa(int(shardSize))) - values.Set(storageRESTBitrotHash, hex.EncodeToString(sum)) - respBody, err := client.call(storageRESTMethodVerifyFile, values, nil, -1) + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodVerifyFile, values, &reader, -1) defer http.DrainBody(respBody) if err != nil { return err } - reader, err := waitForHTTPResponse(respBody) + + respReader, err := waitForHTTPResponse(respBody) if err != nil { return err } + verifyResp := &VerifyFileResp{} - if err = gob.NewDecoder(reader).Decode(verifyResp); err != nil { + if err = gob.NewDecoder(respReader).Decode(verifyResp); err != nil { return err } + return toStorageErr(verifyResp.Err) } diff --git a/cmd/storage-rest-common.go b/cmd/storage-rest-common.go index e9d8ed5c7..37e02c337 100644 --- a/cmd/storage-rest-common.go +++ b/cmd/storage-rest-common.go @@ -17,7 +17,7 @@ package cmd const ( - storageRESTVersion = "v17" // RemoveBucket API change + storageRESTVersion = "v20" // Re-implementation of storage layer storageRESTVersionPrefix = SlashSeparator + storageRESTVersion storageRESTPrefix = minioReservedBucketPath + "/storage" ) @@ -34,38 +34,45 @@ const ( storageRESTMethodAppendFile = "/appendfile" storageRESTMethodCreateFile = "/createfile" storageRESTMethodWriteAll = "/writeall" - storageRESTMethodStatFile = "/statfile" + storageRESTMethodWriteMetadata = "/writemetadata" + storageRESTMethodDeleteVersion = "/deleteversion" + storageRESTMethodReadVersion = "/readversion" + storageRESTMethodRenameData = "/renamedata" + storageRESTMethodCheckParts = "/checkparts" + storageRESTMethodCheckFile = "/checkfile" storageRESTMethodReadAll = "/readall" storageRESTMethodReadFile = "/readfile" storageRESTMethodReadFileStream = "/readfilestream" storageRESTMethodListDir = "/listdir" storageRESTMethodWalk = "/walk" + storageRESTMethodWalkVersions = "/walkversions" storageRESTMethodWalkSplunk = "/walksplunk" storageRESTMethodDeleteFile = "/deletefile" - storageRESTMethodDeleteFileBulk = "/deletefilebulk" - storageRESTMethodDeletePrefixes = "/deleteprefixes" + storageRESTMethodDeleteVersions = "/deleteverions" storageRESTMethodRenameFile = "/renamefile" storageRESTMethodVerifyFile = "/verifyfile" ) const ( - storageRESTVolume = "volume" - storageRESTVolumes = "volumes" - storageRESTDirPath = "dir-path" - storageRESTFilePath = "file-path" - storageRESTSrcVolume = "source-volume" - storageRESTSrcPath = "source-path" - storageRESTDstVolume = "destination-volume" - storageRESTDstPath = "destination-path" - storageRESTOffset = "offset" - storageRESTLength = "length" - storageRESTShardSize = "shard-size" - storageRESTCount = "count" - storageRESTMarkerPath = "marker" - storageRESTLeafFile = "leaf-file" - storageRESTRecursive = "recursive" - storageRESTBitrotAlgo = "bitrot-algo" - storageRESTBitrotHash = "bitrot-hash" - storageRESTDiskID = "disk-id" - storageRESTForceDelete = "force-delete" + storageRESTVolume = "volume" + storageRESTVolumes = "volumes" + storageRESTDirPath = "dir-path" + storageRESTFilePath = "file-path" + storageRESTVersionID = "version-id" + storageRESTTotalVersions = "total-versions" + storageRESTDeleteMarker = "delete-marker" + storageRESTSrcVolume = "source-volume" + storageRESTSrcPath = "source-path" + storageRESTDataDir = "data-dir" + storageRESTDstVolume = "destination-volume" + storageRESTDstPath = "destination-path" + storageRESTOffset = "offset" + storageRESTLength = "length" + storageRESTCount = "count" + storageRESTMarkerPath = "marker" + storageRESTRecursive = "recursive" + storageRESTBitrotAlgo = "bitrot-algo" + storageRESTBitrotHash = "bitrot-hash" + storageRESTDiskID = "disk-id" + storageRESTForceDelete = "force-delete" ) diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 9411c96c5..d4404c7b7 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -43,7 +43,7 @@ var errDiskStale = errors.New("disk stale") // To abstract a disk over network. type storageRESTServer struct { - storage *posix + storage *xlStorage } func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) { @@ -269,6 +269,70 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req } } +// DeleteVersion delete updated metadata. +func (s *storageRESTServer) DeleteVersionHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + versionID := vars[storageRESTVersionID] + deleteMarker := vars[storageRESTDeleteMarker] == "true" + + err := s.storage.DeleteVersion(volume, filePath, FileInfo{VersionID: versionID, Deleted: deleteMarker}) + if err != nil { + s.writeErrorResponse(w, err) + } +} + +// ReadVersion delete updated metadata. +func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + versionID := vars[storageRESTVersionID] + + fi, err := s.storage.ReadVersion(volume, filePath, versionID) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + gob.NewEncoder(w).Encode(fi) + w.(http.Flusher).Flush() +} + +// WriteMetadata write new updated metadata. +func (s *storageRESTServer) WriteMetadataHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + + if r.ContentLength < 0 { + s.writeErrorResponse(w, errInvalidArgument) + return + } + + var fi FileInfo + err := gob.NewDecoder(r.Body).Decode(&fi) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + err = s.storage.WriteMetadata(volume, filePath, fi) + if err != nil { + s.writeErrorResponse(w, err) + } +} + // WriteAllHandler - write to file all content. func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -289,8 +353,8 @@ func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Reque } } -// StatFileHandler - stat a file. -func (s *storageRESTServer) StatFileHandler(w http.ResponseWriter, r *http.Request) { +// CheckPartsHandler - check if a file metadata exists. +func (s *storageRESTServer) CheckPartsHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } @@ -298,13 +362,34 @@ func (s *storageRESTServer) StatFileHandler(w http.ResponseWriter, r *http.Reque volume := vars[storageRESTVolume] filePath := vars[storageRESTFilePath] - info, err := s.storage.StatFile(volume, filePath) - if err != nil { + if r.ContentLength < 0 { + s.writeErrorResponse(w, errInvalidArgument) + return + } + + var fi FileInfo + if err := gob.NewDecoder(r.Body).Decode(&fi); err != nil { s.writeErrorResponse(w, err) return } - gob.NewEncoder(w).Encode(info) - w.(http.Flusher).Flush() + + if err := s.storage.CheckParts(volume, filePath, fi); err != nil { + s.writeErrorResponse(w, err) + } +} + +// CheckFileHandler - check if a file metadata exists. +func (s *storageRESTServer) CheckFileHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + + if err := s.storage.CheckFile(volume, filePath); err != nil { + s.writeErrorResponse(w, err) + } } // ReadAllHandler - read all the contents of a file. @@ -400,26 +485,6 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http io.Copy(w, rc) w.(http.Flusher).Flush() - -} - -// readMetadata func provides the function types for reading leaf metadata. -type readMetadataFunc func(buf []byte, volume, entry string) FileInfo - -func readMetadata(buf []byte, volume, entry string) FileInfo { - m, err := xlMetaV1UnmarshalJSON(GlobalContext, buf) - if err != nil { - return FileInfo{} - } - return FileInfo{ - Volume: volume, - Name: entry, - ModTime: m.Stat.ModTime, - Size: m.Stat.Size, - Metadata: m.Meta, - Parts: m.Parts, - Quorum: m.Erasure.DataBlocks, - } } // WalkHandler - remote caller to start walking at a requested directory path. @@ -446,6 +511,35 @@ func (s *storageRESTServer) WalkSplunkHandler(w http.ResponseWriter, r *http.Req w.(http.Flusher).Flush() } +// WalkVersionsHandler - remote caller to start walking at a requested directory path. +func (s *storageRESTServer) WalkVersionsHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + dirPath := vars[storageRESTDirPath] + markerPath := vars[storageRESTMarkerPath] + recursive, err := strconv.ParseBool(vars[storageRESTRecursive]) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + w.Header().Set(xhttp.ContentType, "text/event-stream") + encoder := gob.NewEncoder(w) + + fch, err := s.storage.WalkVersions(volume, dirPath, markerPath, recursive, r.Context().Done()) + if err != nil { + s.writeErrorResponse(w, err) + return + } + for fi := range fch { + encoder.Encode(&fi) + } + w.(http.Flusher).Flush() +} + // WalkHandler - remote caller to start walking at a requested directory path. func (s *storageRESTServer) WalkHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -460,12 +554,11 @@ func (s *storageRESTServer) WalkHandler(w http.ResponseWriter, r *http.Request) s.writeErrorResponse(w, err) return } - leafFile := vars[storageRESTLeafFile] w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) - fch, err := s.storage.Walk(volume, dirPath, markerPath, recursive, leafFile, readMetadata, r.Context().Done()) + fch, err := s.storage.Walk(volume, dirPath, markerPath, recursive, r.Context().Done()) if err != nil { s.writeErrorResponse(w, err) return @@ -484,14 +577,13 @@ func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Reques vars := mux.Vars(r) volume := vars[storageRESTVolume] dirPath := vars[storageRESTDirPath] - leafFile := vars[storageRESTLeafFile] count, err := strconv.Atoi(vars[storageRESTCount]) if err != nil { s.writeErrorResponse(w, err) return } - entries, err := s.storage.ListDir(volume, dirPath, count, leafFile) + entries, err := s.storage.ListDir(volume, dirPath, count) if err != nil { s.writeErrorResponse(w, err) return @@ -515,96 +607,67 @@ func (s *storageRESTServer) DeleteFileHandler(w http.ResponseWriter, r *http.Req } } -// DeleteFileBulkErrsResp - collection of deleteFile errors -// for bulk deletes -type DeleteFileBulkErrsResp struct { +// DeleteVersionsErrsResp - collection of delete errors +// for bulk version deletes +type DeleteVersionsErrsResp struct { Errs []error } -// DeleteFileBulkHandler - delete a file. -func (s *storageRESTServer) DeleteFileBulkHandler(w http.ResponseWriter, r *http.Request) { +// DeleteVersionsHandler - delete a set of a versions. +func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } + vars := r.URL.Query() volume := vars.Get(storageRESTVolume) - bio := bufio.NewScanner(r.Body) - var filePaths []string - for bio.Scan() { - filePaths = append(filePaths, bio.Text()) - } - - if err := bio.Err(); err != nil { + totalVersions, err := strconv.Atoi(vars.Get(storageRESTTotalVersions)) + if err != nil { s.writeErrorResponse(w, err) return } - dErrsResp := &DeleteFileBulkErrsResp{Errs: make([]error, len(filePaths))} + versions := make([]FileInfo, totalVersions) + decoder := gob.NewDecoder(r.Body) + for i := 0; i < totalVersions; i++ { + if err := decoder.Decode(&versions[i]); err != nil { + s.writeErrorResponse(w, err) + return + } + } + + dErrsResp := &DeleteVersionsErrsResp{Errs: make([]error, totalVersions)} w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - errs, err := s.storage.DeleteFileBulk(volume, filePaths) + errs := s.storage.DeleteVersions(volume, versions) done(nil) - - for idx := range filePaths { - if err != nil { - dErrsResp.Errs[idx] = StorageErr(err.Error()) - } else { - if errs[idx] != nil { - dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) - } + for idx := range versions { + if errs[idx] != nil { + dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) } } - encoder.Encode(dErrsResp) w.(http.Flusher).Flush() } -// DeletePrefixesErrsResp - collection of delete errors -// for bulk prefixes deletes -type DeletePrefixesErrsResp struct { - Errs []error -} - -// DeletePrefixesHandler - delete a set of a prefixes. -func (s *storageRESTServer) DeletePrefixesHandler(w http.ResponseWriter, r *http.Request) { +// RenameDataHandler - renames a meta object and data dir to destination. +func (s *storageRESTServer) RenameDataHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } - vars := r.URL.Query() - volume := vars.Get(storageRESTVolume) - - bio := bufio.NewScanner(r.Body) - var prefixes []string - for bio.Scan() { - prefixes = append(prefixes, bio.Text()) - } - - if err := bio.Err(); err != nil { + vars := mux.Vars(r) + srcVolume := vars[storageRESTSrcVolume] + srcFilePath := vars[storageRESTSrcPath] + dataDir := vars[storageRESTDataDir] + dstVolume := vars[storageRESTDstVolume] + dstFilePath := vars[storageRESTDstPath] + err := s.storage.RenameData(srcVolume, srcFilePath, dataDir, dstVolume, dstFilePath) + if err != nil { s.writeErrorResponse(w, err) - return } - - dErrsResp := &DeletePrefixesErrsResp{Errs: make([]error, len(prefixes))} - - w.Header().Set(xhttp.ContentType, "text/event-stream") - encoder := gob.NewEncoder(w) - done := keepHTTPResponseAlive(w) - errs, err := s.storage.DeletePrefixes(volume, prefixes) - done(nil) - for idx := range prefixes { - if err != nil { - dErrsResp.Errs[idx] = StorageErr(err.Error()) - } else { - if errs[idx] != nil { - dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) - } - } - } - encoder.Encode(dErrsResp) - w.(http.Flusher).Flush() } // RenameFileHandler - rename a file. @@ -701,42 +764,31 @@ type VerifyFileResp struct { Err error } -// VerifyFile - Verify the file for bitrot errors. -func (s *storageRESTServer) VerifyFile(w http.ResponseWriter, r *http.Request) { +// VerifyFileHandler - Verify all part of file for bitrot errors. +func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } vars := mux.Vars(r) volume := vars[storageRESTVolume] filePath := vars[storageRESTFilePath] - size, err := strconv.ParseInt(vars[storageRESTLength], 10, 0) - if err != nil { - s.writeErrorResponse(w, err) - return - } - shardSize, err := strconv.Atoi(vars[storageRESTShardSize]) - if err != nil { - s.writeErrorResponse(w, err) - return - } - hashStr := vars[storageRESTBitrotHash] - var hash []byte - if hashStr != "" { - hash, err = hex.DecodeString(hashStr) - if err != nil { - s.writeErrorResponse(w, err) - return - } - } - algoStr := vars[storageRESTBitrotAlgo] - if algoStr == "" { + + if r.ContentLength < 0 { s.writeErrorResponse(w, errInvalidArgument) return } + + var fi FileInfo + err := gob.NewDecoder(r.Body).Decode(&fi) + if err != nil { + s.writeErrorResponse(w, err) + return + } + w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - err = s.storage.VerifyFile(volume, filePath, size, BitrotAlgorithmFromString(algoStr), hash, int64(shardSize)) + err = s.storage.VerifyFile(volume, filePath, fi) done(nil) vresp := &VerifyFileResp{} if err != nil { @@ -753,7 +805,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones if !endpoint.IsLocal { continue } - storage, err := newPosix(endpoint.Path, endpoint.Host) + storage, err := newXLStorage(endpoint.Path, endpoint.Host) if err != nil { if err == errMinDiskSize { logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(err.Error()), "Unable to initialize backend") @@ -768,10 +820,8 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones } else { username = "" } - hint := fmt.Sprintf("Run the following command to add the convenient permissions: `sudo chown %s %s && sudo chmod u+rxw %s`", - username, endpoint.Path, endpoint.Path) - logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), - "Unable to initialize posix backend") + hint := fmt.Sprintf("Run the following command to add the convenient permissions: `sudo chown %s %s && sudo chmod u+rxw %s`", username, endpoint.Path, endpoint.Path) + logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize posix backend") } server := &storageRESTServer{storage: storage} @@ -790,11 +840,23 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteMetadata).HandlerFunc(httpTraceHdrs(server.WriteMetadataHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersion).HandlerFunc(httpTraceHdrs(server.DeleteVersionHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID, storageRESTDeleteMarker)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(httpTraceHdrs(server.ReadVersionHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameData).HandlerFunc(httpTraceHdrs(server.RenameDataHandler)). + Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDataDir, + storageRESTDstVolume, storageRESTDstPath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)). + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckFile).HandlerFunc(httpTraceHdrs(server.CheckFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckParts).HandlerFunc(httpTraceHdrs(server.CheckPartsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)). @@ -802,22 +864,23 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)). - Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...) + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)). - Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...) + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkSplunk).HandlerFunc(httpTraceHdrs(server.WalkSplunkHandler)). Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeletePrefixes).HandlerFunc(httpTraceHdrs(server.DeletePrefixesHandler)). - Queries(restQueries(storageRESTVolume)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkVersions).HandlerFunc(httpTraceHdrs(server.WalkVersionsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath)...) + + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(httpTraceHdrs(server.DeleteVersionsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTTotalVersions)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)). - Queries(restQueries(storageRESTVolume)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)). Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)). - Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTBitrotHash, storageRESTLength, storageRESTShardSize)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFileHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) } } } diff --git a/cmd/storage-rest_test.go b/cmd/storage-rest_test.go index 525e50a5c..6c5b9d7cc 100644 --- a/cmd/storage-rest_test.go +++ b/cmd/storage-rest_test.go @@ -191,7 +191,7 @@ func testStorageAPIDeleteVol(t *testing.T, storage StorageAPI) { } } -func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { +func testStorageAPICheckFile(t *testing.T, storage StorageAPI) { tmpGlobalServerConfig := globalServerConfig defer func() { globalServerConfig = tmpGlobalServerConfig @@ -202,7 +202,7 @@ func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { if err != nil { t.Fatalf("unexpected error %v", err) } - err = storage.AppendFile("foo", "myobject", []byte("foo")) + err = storage.AppendFile("foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -218,18 +218,12 @@ func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.StatFile(testCase.volumeName, testCase.objectName) + err := storage.CheckFile(testCase.volumeName, testCase.objectName) expectErr := (err != nil) if expectErr != testCase.expectErr { t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) } - - if !testCase.expectErr { - if result.Name != testCase.objectName { - t.Fatalf("case %v: result: expected: %+v, got: %+v", i+1, testCase.objectName, result.Name) - } - } } } @@ -261,7 +255,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.ListDir(testCase.volumeName, testCase.prefix, -1, "") + result, err := storage.ListDir(testCase.volumeName, testCase.prefix, -1) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -586,7 +580,7 @@ func TestStorageRESTClientDeleteVol(t *testing.T) { testStorageAPIDeleteVol(t, restClient) } -func TestStorageRESTClientStatFile(t *testing.T) { +func TestStorageRESTClientCheckFile(t *testing.T) { httpServer, restClient, prevGlobalServerConfig, endpointPath := newStorageRESTHTTPServerClient(t) defer httpServer.Close() defer func() { @@ -594,7 +588,7 @@ func TestStorageRESTClientStatFile(t *testing.T) { }() defer os.RemoveAll(endpointPath) - testStorageAPIStatFile(t, restClient) + testStorageAPICheckFile(t, restClient) } func TestStorageRESTClientListDir(t *testing.T) { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 55213ab2e..2da3b7f92 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -79,7 +79,7 @@ func init() { } // Set as non-distributed. - globalIsDistXL = false + globalIsDistErasure = false // Disable printing console messages during tests. color.Output = ioutil.Discard @@ -93,6 +93,8 @@ func init() { logger.Disable = true initHelp() + + resetTestGlobals() // Uncomment the following line to see trace logs during unit tests. // logger.AddTarget(console.New()) } @@ -173,11 +175,11 @@ func prepareFS() (ObjectLayer, string, error) { return obj, fsDirs[0], nil } -func prepareXLSets32(ctx context.Context) (ObjectLayer, []string, error) { - return prepareXL(ctx, 32) +func prepareErasureSets32(ctx context.Context) (ObjectLayer, []string, error) { + return prepareErasure(ctx, 32) } -func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { +func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { fsDirs, err := getRandomDisks(nDisks) if err != nil { return nil, nil, err @@ -190,8 +192,8 @@ func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { return obj, fsDirs, nil } -func prepareXL16(ctx context.Context) (ObjectLayer, []string, error) { - return prepareXL(ctx, 16) +func prepareErasure16(ctx context.Context) (ObjectLayer, []string, error) { + return prepareErasure(ctx, 16) } // Initialize FS objects. @@ -205,9 +207,10 @@ func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { return obj } -// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface. +// TestErrHandler - Go testing.T satisfy this interface. // This makes it easy to run the TestServer from any of the tests. -// Using this interface, functionalities to be used in tests can be made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. +// Using this interface, functionalities to be used in tests can be +// made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. type TestErrHandler interface { Log(args ...interface{}) Logf(format string, args ...interface{}) @@ -222,11 +225,11 @@ const ( // FSTestStr is the string which is used as notation for Single node ObjectLayer in the unit tests. FSTestStr string = "FS" - // XLTestStr is the string which is used as notation for XL ObjectLayer in the unit tests. - XLTestStr string = "XL" + // ErasureTestStr is the string which is used as notation for Erasure ObjectLayer in the unit tests. + ErasureTestStr string = "Erasure" - // XLSetsTestStr is the string which is used as notation for XL sets object layer in the unit tests. - XLSetsTestStr string = "XLSet" + // ErasureSetsTestStr is the string which is used as notation for Erasure sets object layer in the unit tests. + ErasureSetsTestStr string = "ErasureSet" ) const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" @@ -272,7 +275,7 @@ func isSameType(obj1, obj2 interface{}) bool { // TestServer encapsulates an instantiation of a MinIO instance with a temporary backend. // Example usage: -// s := StartTestServer(t,"XL") +// s := StartTestServer(t,"Erasure") // defer s.Stop() type TestServer struct { Root string @@ -284,14 +287,14 @@ type TestServer struct { cancel context.CancelFunc } -// UnstartedTestServer - Configures a temp FS/XL backend, +// UnstartedTestServer - Configures a temp FS/Erasure backend, // initializes the endpoints and configures the test server. // The server should be started using the Start() method. func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { ctx, cancel := context.WithCancel(context.Background()) // create an instance of TestServer. testServer := TestServer{cancel: cancel} - // return FS/XL object layer and temp backend. + // return FS/Erasure object layer and temp backend. objLayer, disks, err := prepareTestBackend(ctx, instanceType) if err != nil { t.Fatal(err) @@ -396,8 +399,8 @@ func resetGlobalEndpoints() { globalEndpoints = EndpointZones{} } -func resetGlobalIsXL() { - globalIsXL = false +func resetGlobalIsErasure() { + globalIsErasure = false } // reset global heal state @@ -445,8 +448,8 @@ func resetTestGlobals() { resetGlobalConfig() // Reset global endpoints. resetGlobalEndpoints() - // Reset global isXL flag. - resetGlobalIsXL() + // Reset global isErasure flag. + resetGlobalIsErasure() // Reset global heal state resetGlobalHealState() // Reset globalIAMSys to `nil` @@ -1549,7 +1552,7 @@ func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newOb return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) } - z, err := newXLZones(ctx, endpointZones) + z, err := newErasureZones(ctx, endpointZones) if err != nil { return nil, err } @@ -1570,7 +1573,7 @@ func initObjectLayer(ctx context.Context, endpointZones EndpointZones) (ObjectLa var formattedDisks []StorageAPI // Should use the object layer tests for validating cache. - if z, ok := objLayer.(*xlZones); ok { + if z, ok := objLayer.(*erasureZones); ok { formattedDisks = z.zones[0].GetDisks(0)() } @@ -1608,12 +1611,12 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl bucketName := getRandomBucketName() // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, return err. return "", nil, err } - // Register the API end points with XL object layer. + // Register the API end points with Erasure object layer. // Registering only the GetObject handler. apiRouter := initTestAPIEndPoints(obj, endpoints) f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1624,16 +1627,16 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl } // prepare test backend. -// create FS/XL/XLSet backend. +// create FS/Erasure/ErasureSet backend. // return object layer, backend disks. func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer, []string, error) { switch instanceType { - // Total number of disks for XL sets backend is set to 32. - case XLSetsTestStr: - return prepareXLSets32(ctx) - // Total number of disks for XL backend is set to 16. - case XLTestStr: - return prepareXL16(ctx) + // Total number of disks for Erasure sets backend is set to 32. + case ErasureSetsTestStr: + return prepareErasureSets32(ctx) + // Total number of disks for Erasure backend is set to 16. + case ErasureTestStr: + return prepareErasure16(ctx) default: // return FS backend by default. obj, disk, err := prepareFS() @@ -1801,7 +1804,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc } // ExecObjectLayerAPITest - executes object layer API tests. -// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance, registers the specified API end points and runs test for both the layers. func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1831,19 +1834,20 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ // Executing the object layer tests for single node setup. objAPITest(objLayer, FSTestStr, bucketFS, fsAPIRouter, credentials, t) - objLayer, xlDisks, err := prepareXL16(ctx) + objLayer, erasureDisks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) + bucketErasure, erAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) if err != nil { t.Fatalf("Initialzation of API handler tests failed: %s", err) } - // Executing the object layer tests for XL. - objAPITest(objLayer, XLTestStr, bucketXL, xlAPIRouter, credentials, t) + // Executing the object layer tests for Erasure. + objAPITest(objLayer, ErasureTestStr, bucketErasure, erAPIRouter, credentials, t) + // clean up the temporary test backend. - removeRoots(append(xlDisks, fsDir)) + removeRoots(append(erasureDisks, fsDir)) } // function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests. @@ -1860,7 +1864,7 @@ type objTestTypeWithDirs func(obj ObjectLayer, instanceType string, dirs []strin type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) // ExecObjectLayerTest - executes object layer tests. -// Creates single node and XL ObjectLayer instance and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance and runs test for both the layers. func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1885,27 +1889,27 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { newAllSubsystems() - objLayer, fsDirs, err := prepareXLSets32(ctx) + objLayer, fsDirs, err := prepareErasureSets32(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } initAllSubsystems(ctx, objLayer) defer removeRoots(append(fsDirs, fsDir)) - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, t) } // ExecObjectLayerTestWithDirs - executes object layer tests. -// Creates single node and XL ObjectLayer instance and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance and runs test for both the layers. func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - objLayer, fsDirs, err := prepareXL16(ctx) + objLayer, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } // initialize the server and obtain the credentials and root. @@ -1914,28 +1918,28 @@ func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) t.Fatal("Unexpected error", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, fsDirs, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, fsDirs, t) defer removeRoots(fsDirs) } // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering -// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer. +// disks in between tests. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - objLayer, fsDirs, err := prepareXL16(ctx) + objLayer, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { t.Fatal("Failed to create config directory", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, fsDirs, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, fsDirs, t) defer removeRoots(fsDirs) } @@ -1943,7 +1947,7 @@ func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundTyp type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale -// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer. +// files/directories under .minio/tmp. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1951,18 +1955,18 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) nDisks := 16 erasureDisks, err := getRandomDisks(nDisks) if err != nil { - t.Fatalf("Initialization of disks for XL setup: %s", err) + t.Fatalf("Initialization of disks for Erasure setup: %s", err) } objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(erasureDisks...)) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { t.Fatal("Failed to create config directory", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, erasureDisks, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, erasureDisks, t) defer removeRoots(erasureDisks) } @@ -2088,7 +2092,7 @@ func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFuncti registerBucketLevelFunc(bucketRouter, api, apiFunctions...) } -// Takes in XL object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. +// Takes in Erasure object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. // Need isolated registration of API end points while writing unit tests for end points. // All the API end points are registered only for the default case. func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler { diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index 0ea5e02d1..3dd5be28f 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -27,24 +27,6 @@ import ( "time" ) -// Returns function "listDir" of the type listDirFunc. -// disks - used for doing disk.ListDir() -func listDirFactory(ctx context.Context, disk StorageAPI) ListDirFunc { - // Returns sorted merged entries from all the disks. - listDir := func(volume, dirPath, dirEntry string) (bool, []string) { - entries, err := disk.ListDir(volume, dirPath, -1, xlMetaJSONFile) - if err != nil { - return false, nil - } - if len(entries) == 0 { - return true, nil - } - sort.Strings(entries) - return false, filterMatchingPrefix(entries, dirEntry) - } - return listDir -} - // Fixed volume name that could be used across tests const volume = "testvolume" @@ -101,6 +83,22 @@ func createNamespace(disk StorageAPI, volume string, files []string) error { return err } +// Returns function "listDir" of the type listDirFunc. +// disks - used for doing disk.ListDir() +func listDirFactory(ctx context.Context, disk StorageAPI) ListDirFunc { + return func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := disk.ListDir(volume, dirPath, -1) + if err != nil { + return false, nil + } + if len(entries) == 0 { + return true, nil + } + sort.Strings(entries) + return false, filterMatchingPrefix(entries, dirEntry) + } +} + // Test if tree walker returns entries matching prefix alone are received // when a non empty prefix is supplied. func testTreeWalkPrefix(t *testing.T, listDir ListDirFunc) { @@ -237,66 +235,6 @@ func TestTreeWalkTimeout(t *testing.T) { } } -// Test ListDir - listDir is expected to only list one disk. -func TestListDir(t *testing.T) { - file1 := "file1" - file2 := "file2" - // Create two backend directories fsDir1 and fsDir2. - fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Errorf("Unable to create tmp directory: %s", err) - } - fsDir2, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Errorf("Unable to create tmp directory: %s", err) - } - - // Create two StorageAPIs disk1 and disk2. - endpoints := mustGetNewEndpoints(fsDir1) - disk1, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) - } - - endpoints = mustGetNewEndpoints(fsDir2) - disk2, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) - } - - // create listDir function. - listDir1 := listDirFactory(context.Background(), disk1) - listDir2 := listDirFactory(context.Background(), disk2) - - // Create file1 in fsDir1 and file2 in fsDir2. - disks := []StorageAPI{disk1, disk2} - for i, disk := range disks { - err = createNamespace(disk, volume, []string{fmt.Sprintf("file%d", i+1)}) - if err != nil { - t.Fatal(err) - } - } - - // Should list "file1" from fsDir1. - _, entries := listDir1(volume, "", "") - if len(entries) != 1 { - t.Fatal("Expected the number of entries to be 1") - } - - if entries[0] != file1 { - t.Fatal("Expected the entry to be file1") - } - - _, entries = listDir2(volume, "", "") - if len(entries) != 1 { - t.Fatal("Expected the number of entries to be 1") - } - - if entries[0] != file2 { - t.Fatal("Expected the entry to be file2") - } -} - // TestRecursiveWalk - tests if treeWalk returns entries correctly with and // without recursively traversing prefixes. func TestRecursiveTreeWalk(t *testing.T) { diff --git a/cmd/update_test.go b/cmd/update_test.go index 5a4b73afd..d1bd75e2d 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -136,14 +136,14 @@ func TestUserAgent(t *testing.T) { { envName: "MESOS_CONTAINER_NAME", envValue: "mesos-11111", - mode: globalMinioModeXL, - expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/universe-%s", runtime.GOOS, runtime.GOARCH, globalMinioModeXL, "dcos", "mesos-1111"), + mode: globalMinioModeErasure, + expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/universe-%s", runtime.GOOS, runtime.GOARCH, globalMinioModeErasure, "dcos", "mesos-1111"), }, { envName: "KUBERNETES_SERVICE_HOST", envValue: "10.11.148.5", - mode: globalMinioModeXL, - expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET", runtime.GOOS, runtime.GOARCH, globalMinioModeXL, "kubernetes"), + mode: globalMinioModeErasure, + expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET", runtime.GOOS, runtime.GOARCH, globalMinioModeErasure, "kubernetes"), }, } diff --git a/cmd/utils.go b/cmd/utils.go index 1b28ba915..0a32e4b88 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -614,10 +614,10 @@ func lcp(l []string) string { // Returns the mode in which MinIO is running func getMinioMode() string { mode := globalMinioModeFS - if globalIsDistXL { - mode = globalMinioModeDistXL - } else if globalIsXL { - mode = globalMinioModeXL + if globalIsDistErasure { + mode = globalMinioModeDistErasure + } else if globalIsErasure { + mode = globalMinioModeErasure } else if globalIsGateway { mode = globalMinioModeGatewayPrefix + globalGatewayName } diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 39ed623d7..087f9842c 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -366,7 +366,7 @@ func TestJSONSave(t *testing.T) { t.Fatal(err) } if fi1.Size() != fi2.Size() { - t.Fatal("Size should not differ after jsonSave()", fi1.Size(), fi2.Size(), f.Name()) + t.Fatal("Size should not differs after jsonSave()", fi1.Size(), fi2.Size(), f.Name()) } } @@ -473,14 +473,14 @@ func TestGetMinioMode(t *testing.T) { t.Fatalf("Expected %s got %s", expected, mode) } } - globalIsDistXL = true - testMinioMode(globalMinioModeDistXL) + globalIsDistErasure = true + testMinioMode(globalMinioModeDistErasure) - globalIsDistXL = false - globalIsXL = true - testMinioMode(globalMinioModeXL) + globalIsDistErasure = false + globalIsErasure = true + testMinioMode(globalMinioModeErasure) - globalIsDistXL, globalIsXL = false, false + globalIsDistErasure, globalIsErasure = false, false testMinioMode(globalMinioModeFS) globalIsGateway, globalGatewayName = true, "azure" diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 8f6f0c968..95528a0fa 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -169,11 +169,16 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep return toJSONError(ctx, errInvalidBucketName) } + opts := BucketOptions{ + Location: globalServerRegion, + LockEnabled: false, + } + if globalDNSConfig != nil { if _, err := globalDNSConfig.Get(args.BucketName); err != nil { if err == dns.ErrNoEntriesFound { // Proceed to creating a bucket. - if err = objectAPI.MakeBucketWithLocation(ctx, args.BucketName, globalServerRegion, false); err != nil { + if err = objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err) } if err = globalDNSConfig.Put(args.BucketName); err != nil { @@ -189,7 +194,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep return toJSONError(ctx, errBucketAlreadyExists) } - if err := objectAPI.MakeBucketWithLocation(ctx, args.BucketName, globalServerRegion, false); err != nil { + if err := objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err, args.BucketName) } @@ -259,16 +264,15 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs, return toJSONError(ctx, err, args.BucketName) } + globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName) + if globalDNSConfig != nil { if err := globalDNSConfig.Delete(args.BucketName); err != nil { - // Deleting DNS entry failed, attempt to create the bucket again. - objectAPI.MakeBucketWithLocation(ctx, args.BucketName, "", false) + logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err)) return toJSONError(ctx, err) } } - globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName) - return nil } @@ -583,11 +587,6 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, return toJSONError(ctx, errServerNotInitialized) } - getObjectInfo := objectAPI.GetObjectInfo - if web.CacheAPI() != nil { - getObjectInfo = web.CacheAPI().GetObjectInfo - } - deleteObjects := objectAPI.DeleteObjects if web.CacheAPI() != nil { deleteObjects = web.CacheAPI().DeleteObjects @@ -656,13 +655,14 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, return nil } + versioned := globalBucketVersioningSys.Enabled(args.BucketName) + var err error next: for _, objectName := range args.Objects { // If not a directory, remove the object. if !HasSuffix(objectName, SlashSeparator) && objectName != "" { // Check permissions for non-anonymous user. - govBypassPerms := false if authErr != errNoAuthToken { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, @@ -675,17 +675,6 @@ next: }) { return toJSONError(ctx, errAccessDenied) } - if globalIAMSys.IsAllowed(iampolicy.Args{ - AccountName: claims.AccessKey, - Action: iampolicy.BypassGovernanceRetentionAction, - BucketName: args.BucketName, - ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), - IsOwner: owner, - ObjectName: objectName, - Claims: claims.Map(), - }) { - govBypassPerms = true - } } if authErr == errNoAuthToken { @@ -699,32 +688,10 @@ next: }) { return toJSONError(ctx, errAccessDenied) } - - // Check if object is allowed to be deleted anonymously - if globalPolicySys.IsAllowed(policy.Args{ - Action: policy.BypassGovernanceRetentionAction, - BucketName: args.BucketName, - ConditionValues: getConditionValues(r, "", "", nil), - IsOwner: false, - ObjectName: objectName, - }) { - govBypassPerms = true - } } - apiErr := enforceRetentionBypassForDeleteWeb(ctx, r, args.BucketName, objectName, getObjectInfo, govBypassPerms) - if apiErr == ErrObjectLocked { - return toJSONError(ctx, errLockedObject) - } - if apiErr != ErrNone && apiErr != ErrNoSuchKey { - return toJSONError(ctx, errAccessDenied) - } - if apiErr == ErrNone { - if err = deleteObject(ctx, objectAPI, web.CacheAPI(), args.BucketName, objectName, r); err != nil { - break next - } - } - continue + _, err = deleteObject(ctx, objectAPI, web.CacheAPI(), args.BucketName, objectName, r, ObjectOptions{}) + logger.LogIf(ctx, err) } if authErr == errNoAuthToken { @@ -761,13 +728,16 @@ next: } for { - var objects []string + var objects []ObjectToDelete for obj := range objInfoCh { if len(objects) == maxDeleteList { // Reached maximum delete requests, attempt a delete for now. break } - objects = append(objects, obj.Name) + objects = append(objects, ObjectToDelete{ + ObjectName: obj.Name, + VersionID: obj.VersionID, + }) } // Nothing to do. @@ -776,10 +746,12 @@ next: } // Deletes a list of objects. - _, err = deleteObjects(ctx, args.BucketName, objects) - if err != nil { - logger.LogIf(ctx, err) - break next + _, errs := deleteObjects(ctx, args.BucketName, objects, ObjectOptions{Versioned: versioned}) + for _, err := range errs { + if err != nil { + logger.LogIf(ctx, err) + break next + } } } } @@ -1084,6 +1056,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { // get gateway encryption options var opts ObjectOptions opts, err = putOpts(ctx, r, bucket, object, metadata) + if err != nil { writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 676410195..e1e10e250 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -136,7 +136,7 @@ func TestWebHandlerLogin(t *testing.T) { // testLoginWebHandler - Test login web handler func testLoginWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -176,7 +176,7 @@ func TestWebHandlerStorageInfo(t *testing.T) { // testStorageInfoWebHandler - Test StorageInfo web handler func testStorageInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { // get random bucket name. - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -209,7 +209,7 @@ func TestWebHandlerServerInfo(t *testing.T) { // testServerInfoWebHandler - Test ServerInfo web handler func testServerInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -251,7 +251,7 @@ func TestWebHandlerMakeBucket(t *testing.T) { // testMakeBucketWebHandler - Test MakeBucket web handler func testMakeBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -315,7 +315,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH bucketName := getRandomBucketName() var opts ObjectOptions - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType) } @@ -381,7 +381,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH // If we created the bucket with an object, now delete the object to cleanup. if test.initWithObject { - err = obj.DeleteObject(context.Background(), test.bucketName, "object") + _, err = obj.DeleteObject(context.Background(), test.bucketName, "object", ObjectOptions{}) if err != nil { t.Fatalf("could not delete object, %s", err.Error()) } @@ -393,7 +393,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH continue } - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create new bucket, abort. t.Fatalf("failed to create new bucket (%s): %s", instanceType, err.Error()) @@ -408,7 +408,7 @@ func TestWebHandlerListBuckets(t *testing.T) { // testListBucketsHandler - Test ListBuckets web handler func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -421,7 +421,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa bucketName := getRandomBucketName() // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -456,7 +456,7 @@ func TestWebHandlerListObjects(t *testing.T) { // testListObjectsHandler - Test ListObjects web handler func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -472,7 +472,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -537,7 +537,7 @@ func TestWebHandlerRemoveObject(t *testing.T) { // testRemoveObjectWebHandler - Test RemoveObjectObject web handler func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -552,7 +552,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -628,7 +628,7 @@ func TestWebHandlerGenerateAuth(t *testing.T) { // testGenerateAuthWebHandler - Test GenerateAuth web handler func testGenerateAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -727,7 +727,7 @@ func TestWebHandlerUpload(t *testing.T) { // testUploadWebHandler - Test Upload web handler func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -766,7 +766,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler return rec.Code } // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -809,7 +809,7 @@ func TestWebHandlerDownload(t *testing.T) { // testDownloadWebHandler - Test Download web handler func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -841,7 +841,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl } // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -919,7 +919,7 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa fileThree := "cccccccccccccc" // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -990,7 +990,7 @@ func TestWebHandlerPresignedGetHandler(t *testing.T) { } func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -1006,7 +1006,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -1039,7 +1039,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH t.Fatalf("Failed, %v", err) } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter = initTestAPIEndPoints(obj, []string{"GetObject"}) // Initialize a new api recorder. @@ -1062,7 +1062,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH t.Fatal("Read data is not equal was what was expected") } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter = initTestWebRPCEndPoint(obj) presignGetReq = PresignedGetArgs{ @@ -1093,15 +1093,15 @@ func TestWebCheckAuthorization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Prepare XL backend - obj, fsDirs, err := prepareXL16(ctx) + // Prepare Erasure backend + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - // Executing the object layer tests for XL. + // Executing the object layer tests for Erasure. defer removeRoots(fsDirs) - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. @@ -1185,12 +1185,12 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Prepare XL backend - obj, fsDirs, err := prepareXL16(ctx) + // Prepare Erasure backend + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - // Executing the object layer tests for XL. + // Executing the object layer tests for Erasure. defer removeRoots(fsDirs) // initialize the server and obtain the credentials and root. @@ -1201,23 +1201,23 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { } bucketName := "mybucket" - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatal("Cannot make bucket:", err) } - // Set faulty disks to XL backend - z := obj.(*xlZones) + // Set faulty disks to Erasure backend + z := obj.(*erasureZones) xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks := xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - for i, d := range xlDisks { - xlDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) + for i, d := range erasureDisks { + erasureDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Initialize web rpc endpoint. apiRouter := initTestWebRPCEndPoint(obj) diff --git a/cmd/xl-sets_test.go b/cmd/xl-sets_test.go deleted file mode 100644 index 91d2f9522..000000000 --- a/cmd/xl-sets_test.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "os" - "path/filepath" - "testing" -) - -// TestCrcHashMod - test crc hash. -func TestCrcHashMod(t *testing.T) { - testCases := []struct { - objectName string - crcHash int - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", 12}, - {"The Shining Script .pdf", 14}, - {"Cost Benefit Analysis (2009-2010).pptx", 13}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 1}, - {"SHØRT", 9}, - {"There are far too many object names, and far too few bucket names!", 13}, - {"a/b/c/", 1}, - {"/a/b/c", 4}, - {string([]byte{0xff, 0xfe, 0xfd}), 13}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - if crcHashElement := hashKey("CRCMOD", testCase.objectName, 16); crcHashElement != testCase.crcHash { - t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement) - } - } - - if crcHashElement := hashKey("CRCMOD", "This will fail", -1); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } - - if crcHashElement := hashKey("CRCMOD", "This will fail", 0); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } - - if crcHashElement := hashKey("UNKNOWN", "This will fail", 0); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } -} - -// TestNewXL - tests initialization of all input disks -// and constructs a valid `XL` object -func TestNewXLSets(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var nDisks = 16 // Maximum disks. - var erasureDisks []string - for i := 0; i < nDisks; i++ { - // Do not attempt to create this path, the test validates - // so that newXLSets initializes non existing paths - // and successfully returns initialized object layer. - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - erasureDisks = append(erasureDisks, disk) - defer os.RemoveAll(disk) - } - - endpoints := mustGetNewEndpoints(erasureDisks...) - _, _, err := waitForFormatXL(true, endpoints, 1, 0, 16, "") - if err != errInvalidArgument { - t.Fatalf("Expecting error, got %s", err) - } - - _, _, err = waitForFormatXL(true, nil, 1, 1, 16, "") - if err != errInvalidArgument { - t.Fatalf("Expecting error, got %s", err) - } - - // Initializes all erasure disks - storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "") - if err != nil { - t.Fatalf("Unable to format disks for erasure, %s", err) - } - - if _, err := newXLSets(ctx, endpoints, storageDisks, format); err != nil { - t.Fatalf("Unable to initialize erasure") - } -} - -// TestHashedLayer - tests the hashed layer which will be returned -// consistently for a given object name. -func TestHashedLayer(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var objs []*xlObjects - - for i := 0; i < 16; i++ { - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal("Unable to initialize 'XL' object layer.", err) - } - - // Remove all dirs. - for _, dir := range fsDirs { - defer os.RemoveAll(dir) - } - - z := obj.(*xlZones) - objs = append(objs, z.zones[0].sets[0]) - } - - sets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} - - testCases := []struct { - objectName string - expectedObj *xlObjects - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", objs[12]}, - {"The Shining Script .pdf", objs[14]}, - {"Cost Benefit Analysis (2009-2010).pptx", objs[13]}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]}, - {"SHØRT", objs[9]}, - {"There are far too many object names, and far too few bucket names!", objs[13]}, - {"a/b/c/", objs[1]}, - {"/a/b/c", objs[4]}, - {string([]byte{0xff, 0xfe, 0xfd}), objs[13]}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - gotObj := sets.getHashedSet(testCase.objectName) - if gotObj != testCase.expectedObj { - t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj) - } - } -} diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go new file mode 100644 index 000000000..b3a2894e8 --- /dev/null +++ b/cmd/xl-storage-disk-id-check.go @@ -0,0 +1,266 @@ +/* + * MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "io" +) + +// Detects change in underlying disk. +type xlStorageDiskIDCheck struct { + storage *xlStorage + diskID string +} + +func (p *xlStorageDiskIDCheck) String() string { + return p.storage.String() +} + +func (p *xlStorageDiskIDCheck) IsOnline() bool { + storedDiskID, err := p.storage.GetDiskID() + if err != nil { + return false + } + return storedDiskID == p.diskID +} + +func (p *xlStorageDiskIDCheck) IsLocal() bool { + return p.storage.IsLocal() +} + +func (p *xlStorageDiskIDCheck) Hostname() string { + return p.storage.Hostname() +} + +func (p *xlStorageDiskIDCheck) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { + if p.isDiskStale() { + return dataUsageCache{}, errDiskNotFound + } + return p.storage.CrawlAndGetDataUsage(ctx, cache) +} + +func (p *xlStorageDiskIDCheck) Close() error { + return p.storage.Close() +} + +func (p *xlStorageDiskIDCheck) GetDiskID() (string, error) { + return p.storage.GetDiskID() +} + +func (p *xlStorageDiskIDCheck) SetDiskID(id string) { + p.diskID = id +} + +func (p *xlStorageDiskIDCheck) isDiskStale() bool { + if p.diskID == "" { + // For empty disk-id we allow the call as the server might be coming up and trying to read format.json + // or create format.json + return false + } + storedDiskID, err := p.storage.GetDiskID() + if err == nil && p.diskID == storedDiskID { + return false + } + return true +} + +func (p *xlStorageDiskIDCheck) DiskInfo() (info DiskInfo, err error) { + if p.isDiskStale() { + return info, errDiskNotFound + } + return p.storage.DiskInfo() +} + +func (p *xlStorageDiskIDCheck) MakeVolBulk(volumes ...string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.MakeVolBulk(volumes...) +} + +func (p *xlStorageDiskIDCheck) MakeVol(volume string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.MakeVol(volume) +} + +func (p *xlStorageDiskIDCheck) ListVols() ([]VolInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ListVols() +} + +func (p *xlStorageDiskIDCheck) StatVol(volume string) (vol VolInfo, err error) { + if p.isDiskStale() { + return vol, errDiskNotFound + } + return p.storage.StatVol(volume) +} + +func (p *xlStorageDiskIDCheck) DeleteVol(volume string, forceDelete bool) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteVol(volume, forceDelete) +} + +func (p *xlStorageDiskIDCheck) WalkVersions(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.WalkVersions(volume, dirPath, marker, recursive, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) Walk(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.Walk(volume, dirPath, marker, recursive, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.WalkSplunk(volume, dirPath, marker, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) ListDir(volume, dirPath string, count int) ([]string, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ListDir(volume, dirPath, count) +} + +func (p *xlStorageDiskIDCheck) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { + if p.isDiskStale() { + return 0, errDiskNotFound + } + return p.storage.ReadFile(volume, path, offset, buf, verifier) +} + +func (p *xlStorageDiskIDCheck) AppendFile(volume string, path string, buf []byte) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.AppendFile(volume, path, buf) +} + +func (p *xlStorageDiskIDCheck) CreateFile(volume, path string, size int64, reader io.Reader) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CreateFile(volume, path, size, reader) +} + +func (p *xlStorageDiskIDCheck) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ReadFileStream(volume, path, offset, length) +} + +func (p *xlStorageDiskIDCheck) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) +} + +func (p *xlStorageDiskIDCheck) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath) +} + +func (p *xlStorageDiskIDCheck) CheckParts(volume string, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CheckParts(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) CheckFile(volume string, path string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CheckFile(volume, path) +} + +func (p *xlStorageDiskIDCheck) DeleteFile(volume string, path string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteFile(volume, path) +} + +func (p *xlStorageDiskIDCheck) DeleteVersions(volume string, versions []FileInfo) (errs []error) { + if p.isDiskStale() { + errs = make([]error, len(versions)) + for i := range errs { + errs[i] = errDiskNotFound + } + return errs + } + return p.storage.DeleteVersions(volume, versions) +} + +func (p *xlStorageDiskIDCheck) VerifyFile(volume, path string, fi FileInfo) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.VerifyFile(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) WriteAll(volume string, path string, reader io.Reader) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.WriteAll(volume, path, reader) +} + +func (p *xlStorageDiskIDCheck) DeleteVersion(volume, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteVersion(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) WriteMetadata(volume, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.WriteMetadata(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + if p.isDiskStale() { + return fi, errDiskNotFound + } + return p.storage.ReadVersion(volume, path, versionID) +} + +func (p *xlStorageDiskIDCheck) ReadAll(volume string, path string) (buf []byte, err error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ReadAll(volume, path) +} diff --git a/cmd/posix-errors.go b/cmd/xl-storage-errors.go similarity index 98% rename from cmd/posix-errors.go rename to cmd/xl-storage-errors.go index 652333260..5819c241d 100644 --- a/cmd/posix-errors.go +++ b/cmd/xl-storage-errors.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/posix-errors_test.go b/cmd/xl-storage-errors_test.go similarity index 96% rename from cmd/posix-errors_test.go rename to cmd/xl-storage-errors_test.go index da4624be3..0e0db6cd3 100644 --- a/cmd/posix-errors_test.go +++ b/cmd/xl-storage-errors_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-storage-format-utils.go b/cmd/xl-storage-format-utils.go new file mode 100644 index 000000000..90a848bc2 --- /dev/null +++ b/cmd/xl-storage-format-utils.go @@ -0,0 +1,81 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + jsoniter "github.com/json-iterator/go" +) + +func getFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) { + if isXL2V1Format(xlMetaBuf) { + var xlMeta xlMetaV2 + if err := xlMeta.Load(xlMetaBuf); err != nil { + return FileInfoVersions{}, err + } + versions, deletedVersions, latestModTime, err := xlMeta.ListVersions(volume, path) + if err != nil { + return FileInfoVersions{}, err + } + return FileInfoVersions{ + Volume: volume, + Name: path, + Versions: versions, + Deleted: deletedVersions, + LatestModTime: latestModTime, + }, nil + } + + xlMeta := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaBuf, xlMeta); err != nil { + return FileInfoVersions{}, errFileCorrupt + } + + fi, err := xlMeta.ToFileInfo(volume, path) + if err != nil { + return FileInfoVersions{}, err + } + + fi.IsLatest = true // No versions so current version is latest. + return FileInfoVersions{ + Volume: volume, + Name: path, + Versions: []FileInfo{fi}, + LatestModTime: fi.ModTime, + }, nil +} + +func getFileInfo(xlMetaBuf []byte, volume, path, versionID string) (FileInfo, error) { + if isXL2V1Format(xlMetaBuf) { + var xlMeta xlMetaV2 + if err := xlMeta.Load(xlMetaBuf); err != nil { + return FileInfo{}, err + } + return xlMeta.ToFileInfo(volume, path, versionID) + } + + xlMeta := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaBuf, xlMeta); err != nil { + return FileInfo{}, errFileCorrupt + } + fi, err := xlMeta.ToFileInfo(volume, path) + if err == errFileNotFound && versionID != "" { + return fi, errFileVersionNotFound + } + return fi, err +} diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go new file mode 100644 index 000000000..af5ca731b --- /dev/null +++ b/cmd/xl-storage-format-v1.go @@ -0,0 +1,208 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/cmd/logger" +) + +// XL constants. +const ( + // XL metadata file carries per object metadata. + xlStorageFormatFileV1 = "xl.json" +) + +// Valid - tells us if the format is sane by validating +// format version and erasure coding information. +func (m *xlMetaV1Object) valid() bool { + return isXLMetaFormatValid(m.Version, m.Format) && + isXLMetaErasureInfoValid(m.Erasure.DataBlocks, m.Erasure.ParityBlocks) +} + +// Verifies if the backend format metadata is sane by validating +// the version string and format style. +func isXLMetaFormatValid(version, format string) bool { + return ((version == xlMetaVersion101 || + version == xlMetaVersion100) && + format == xlMetaFormat) +} + +// Verifies if the backend format metadata is sane by validating +// the ErasureInfo, i.e. data and parity blocks. +func isXLMetaErasureInfoValid(data, parity int) bool { + return ((data >= parity) && (data != 0) && (parity != 0)) +} + +//go:generate msgp -file=$GOFILE -unexported + +// A xlMetaV1Object represents `xl.meta` metadata header. +type xlMetaV1Object struct { + Version string `json:"version"` // Version of the current `xl.meta`. + Format string `json:"format"` // Format of the current `xl.meta`. + Stat StatInfo `json:"stat"` // Stat of the current object `xl.meta`. + // Erasure coded info for the current object `xl.meta`. + Erasure ErasureInfo `json:"erasure"` + // MinIO release tag for current object `xl.meta`. + Minio struct { + Release string `json:"release"` + } `json:"minio"` + // Metadata map for current object `xl.meta`. + Meta map[string]string `json:"meta,omitempty"` + // Captures all the individual object `xl.meta`. + Parts []ObjectPartInfo `json:"parts,omitempty"` + + // Dummy values used for legacy use cases. + VersionID string `json:"versionId,omitempty"` + DataDir string `json:"dataDir,omitempty"` // always points to "legacy" +} + +// StatInfo - carries stat information of the object. +type StatInfo struct { + Size int64 `json:"size"` // Size of the object `xl.meta`. + ModTime time.Time `json:"modTime"` // ModTime of the object `xl.meta`. +} + +// ErasureInfo holds erasure coding and bitrot related information. +type ErasureInfo struct { + // Algorithm is the string representation of erasure-coding-algorithm + Algorithm string `json:"algorithm"` + // DataBlocks is the number of data blocks for erasure-coding + DataBlocks int `json:"data"` + // ParityBlocks is the number of parity blocks for erasure-coding + ParityBlocks int `json:"parity"` + // BlockSize is the size of one erasure-coded block + BlockSize int64 `json:"blockSize"` + // Index is the index of the current disk + Index int `json:"index"` + // Distribution is the distribution of the data and parity blocks + Distribution []int `json:"distribution"` + // Checksums holds all bitrot checksums of all erasure encoded blocks + Checksums []ChecksumInfo `json:"checksum,omitempty"` +} + +// BitrotAlgorithm specifies a algorithm used for bitrot protection. +type BitrotAlgorithm uint + +const ( + // SHA256 represents the SHA-256 hash function + SHA256 BitrotAlgorithm = 1 + iota + // HighwayHash256 represents the HighwayHash-256 hash function + HighwayHash256 + // HighwayHash256S represents the Streaming HighwayHash-256 hash function + HighwayHash256S + // BLAKE2b512 represents the BLAKE2b-512 hash function + BLAKE2b512 +) + +// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection. +const ( + DefaultBitrotAlgorithm = HighwayHash256S +) + +// ObjectPartInfo Info of each part kept in the multipart metadata +// file after CompleteMultipartUpload() is called. +type ObjectPartInfo struct { + ETag string `json:"etag,omitempty"` + Number int `json:"number"` + Size int64 `json:"size"` + ActualSize int64 `json:"actualSize"` +} + +// ChecksumInfo - carries checksums of individual scattered parts per disk. +type ChecksumInfo struct { + PartNumber int + Algorithm BitrotAlgorithm + Hash []byte +} + +type checksumInfoJSON struct { + Name string `json:"name"` + Algorithm string `json:"algorithm"` + Hash string `json:"hash,omitempty"` +} + +// MarshalJSON marshals the ChecksumInfo struct +func (c ChecksumInfo) MarshalJSON() ([]byte, error) { + info := checksumInfoJSON{ + Name: fmt.Sprintf("part.%d", c.PartNumber), + Algorithm: c.Algorithm.String(), + Hash: hex.EncodeToString(c.Hash), + } + return json.Marshal(info) +} + +// UnmarshalJSON - custom checksum info unmarshaller +func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { + var info checksumInfoJSON + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(data, &info); err != nil { + return err + } + sum, err := hex.DecodeString(info.Hash) + if err != nil { + return err + } + c.Algorithm = BitrotAlgorithmFromString(info.Algorithm) + c.Hash = sum + if _, err = fmt.Sscanf(info.Name, "part.%d", &c.PartNumber); err != nil { + return err + } + + if !c.Algorithm.Available() { + logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) + return errBitrotHashAlgoInvalid + } + return nil +} + +// constant and shouldn't be changed. +const legacyDataDir = "legacy" + +func (m *xlMetaV1Object) ToFileInfo(volume, path string) (FileInfo, error) { + if !m.valid() { + return FileInfo{}, errFileCorrupt + } + return FileInfo{ + Volume: volume, + Name: path, + ModTime: m.Stat.ModTime, + Size: m.Stat.Size, + Metadata: m.Meta, + Parts: m.Parts, + Erasure: m.Erasure, + VersionID: m.VersionID, + DataDir: m.DataDir, + }, nil +} + +// XL metadata constants. +const ( + // XL meta version. + xlMetaVersion101 = "1.0.1" + + // XL meta version. + xlMetaVersion100 = "1.0.0" + + // XL meta format string. + xlMetaFormat = "xl" +) diff --git a/cmd/xl-storage-format-v1_gen.go b/cmd/xl-storage-format-v1_gen.go new file mode 100644 index 000000000..6c88e0daf --- /dev/null +++ b/cmd/xl-storage-format-v1_gen.go @@ -0,0 +1,1568 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *BitrotAlgorithm) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint + zb0001, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BitrotAlgorithm(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BitrotAlgorithm) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint(uint(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BitrotAlgorithm) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint(o, uint(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BitrotAlgorithm) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint + zb0001, bts, err = msgp.ReadUintBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BitrotAlgorithm(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BitrotAlgorithm) Msgsize() (s int) { + s = msgp.UintSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ChecksumInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "PartNumber": + z.PartNumber, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + case "Algorithm": + { + var zb0002 uint + zb0002, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + z.Algorithm = BitrotAlgorithm(zb0002) + } + case "Hash": + z.Hash, err = dc.ReadBytes(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ChecksumInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "PartNumber" + err = en.Append(0x83, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteInt(z.PartNumber) + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + // write "Algorithm" + err = en.Append(0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteUint(uint(z.Algorithm)) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "Hash" + err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteBytes(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ChecksumInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "PartNumber" + o = append(o, 0x83, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = msgp.AppendInt(o, z.PartNumber) + // string "Algorithm" + o = append(o, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendUint(o, uint(z.Algorithm)) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendBytes(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ChecksumInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "PartNumber": + z.PartNumber, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + case "Algorithm": + { + var zb0002 uint + zb0002, bts, err = msgp.ReadUintBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + z.Algorithm = BitrotAlgorithm(zb0002) + } + case "Hash": + z.Hash, bts, err = msgp.ReadBytesBytes(bts, z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ChecksumInfo) Msgsize() (s int) { + s = 1 + 11 + msgp.IntSize + 10 + msgp.UintSize + 5 + msgp.BytesPrefixSize + len(z.Hash) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ErasureInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Algorithm, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "DataBlocks": + z.DataBlocks, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + case "ParityBlocks": + z.ParityBlocks, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + case "BlockSize": + z.BlockSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + case "Index": + z.Index, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "Distribution": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + if cap(z.Distribution) >= int(zb0002) { + z.Distribution = (z.Distribution)[:zb0002] + } else { + z.Distribution = make([]int, zb0002) + } + for za0001 := range z.Distribution { + z.Distribution[za0001], err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + case "Checksums": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + if cap(z.Checksums) >= int(zb0003) { + z.Checksums = (z.Checksums)[:zb0003] + } else { + z.Checksums = make([]ChecksumInfo, zb0003) + } + for za0002 := range z.Checksums { + err = z.Checksums[za0002].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ErasureInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 7 + // write "Algorithm" + err = en.Append(0x87, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteString(z.Algorithm) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "DataBlocks" + err = en.Append(0xaa, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.DataBlocks) + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + // write "ParityBlocks" + err = en.Append(0xac, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.ParityBlocks) + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + // write "BlockSize" + err = en.Append(0xa9, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.BlockSize) + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + // write "Index" + err = en.Append(0xa5, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.Index) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + // write "Distribution" + err = en.Append(0xac, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Distribution))) + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + for za0001 := range z.Distribution { + err = en.WriteInt(z.Distribution[za0001]) + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + // write "Checksums" + err = en.Append(0xa9, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Checksums))) + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + for za0002 := range z.Checksums { + err = z.Checksums[za0002].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ErasureInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 7 + // string "Algorithm" + o = append(o, 0x87, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Algorithm) + // string "DataBlocks" + o = append(o, 0xaa, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + o = msgp.AppendInt(o, z.DataBlocks) + // string "ParityBlocks" + o = append(o, 0xac, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + o = msgp.AppendInt(o, z.ParityBlocks) + // string "BlockSize" + o = append(o, 0xa9, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.BlockSize) + // string "Index" + o = append(o, 0xa5, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.Index) + // string "Distribution" + o = append(o, 0xac, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendArrayHeader(o, uint32(len(z.Distribution))) + for za0001 := range z.Distribution { + o = msgp.AppendInt(o, z.Distribution[za0001]) + } + // string "Checksums" + o = append(o, 0xa9, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Checksums))) + for za0002 := range z.Checksums { + o, err = z.Checksums[za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ErasureInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "DataBlocks": + z.DataBlocks, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + case "ParityBlocks": + z.ParityBlocks, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + case "BlockSize": + z.BlockSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + case "Index": + z.Index, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "Distribution": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + if cap(z.Distribution) >= int(zb0002) { + z.Distribution = (z.Distribution)[:zb0002] + } else { + z.Distribution = make([]int, zb0002) + } + for za0001 := range z.Distribution { + z.Distribution[za0001], bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + case "Checksums": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + if cap(z.Checksums) >= int(zb0003) { + z.Checksums = (z.Checksums)[:zb0003] + } else { + z.Checksums = make([]ChecksumInfo, zb0003) + } + for za0002 := range z.Checksums { + bts, err = z.Checksums[za0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ErasureInfo) Msgsize() (s int) { + s = 1 + 10 + msgp.StringPrefixSize + len(z.Algorithm) + 11 + msgp.IntSize + 13 + msgp.IntSize + 10 + msgp.Int64Size + 6 + msgp.IntSize + 13 + msgp.ArrayHeaderSize + (len(z.Distribution) * (msgp.IntSize)) + 10 + msgp.ArrayHeaderSize + for za0002 := range z.Checksums { + s += z.Checksums[za0002].Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ETag": + z.ETag, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + case "Number": + z.Number, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + case "Size": + z.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ActualSize": + z.ActualSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 + // write "ETag" + err = en.Append(0x84, 0xa4, 0x45, 0x54, 0x61, 0x67) + if err != nil { + return + } + err = en.WriteString(z.ETag) + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + // write "Number" + err = en.Append(0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteInt(z.Number) + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + // write "Size" + err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "ActualSize" + err = en.Append(0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ActualSize) + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "ETag" + o = append(o, 0x84, 0xa4, 0x45, 0x54, 0x61, 0x67) + o = msgp.AppendString(o, z.ETag) + // string "Number" + o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = msgp.AppendInt(o, z.Number) + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) + // string "ActualSize" + o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.ActualSize) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ETag": + z.ETag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + case "Number": + z.Number, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + case "Size": + z.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ActualSize": + z.ActualSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ObjectPartInfo) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StatInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ModTime": + z.ModTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StatInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Size" + err = en.Append(0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "ModTime" + err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StatInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Size" + o = append(o, 0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) + // string "ModTime" + o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendTime(o, z.ModTime) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StatInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ModTime": + z.ModTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StatInfo) Msgsize() (s int) { + s = 1 + 5 + msgp.Int64Size + 8 + msgp.TimeSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *checksumInfoJSON) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Algorithm": + z.Algorithm, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "Hash": + z.Hash, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z checksumInfoJSON) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "Name" + err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "Algorithm" + err = en.Append(0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteString(z.Algorithm) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "Hash" + err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteString(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z checksumInfoJSON) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Name" + o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Algorithm" + o = append(o, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Algorithm) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *checksumInfoJSON) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Algorithm": + z.Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "Hash": + z.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z checksumInfoJSON) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 10 + msgp.StringPrefixSize + len(z.Algorithm) + 5 + msgp.StringPrefixSize + len(z.Hash) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV1Object) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Format": + z.Format, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + case "Stat": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Stat.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + case "ModTime": + z.Stat.ModTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + } + } + case "Erasure": + err = z.Erasure.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + case "Minio": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + switch msgp.UnsafeString(field) { + case "Release": + z.Minio.Release, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + } + } + case "Meta": + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil { + z.Meta = make(map[string]string, zb0004) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0004 > 0 { + zb0004-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + z.Meta[za0001] = za0002 + } + case "Parts": + var zb0005 uint32 + zb0005, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + if cap(z.Parts) >= int(zb0005) { + z.Parts = (z.Parts)[:zb0005] + } else { + z.Parts = make([]ObjectPartInfo, zb0005) + } + for za0003 := range z.Parts { + err = z.Parts[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + case "VersionID": + z.VersionID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DataDir": + z.DataDir, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV1Object) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 9 + // write "Version" + err = en.Append(0x89, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + // write "Format" + err = en.Append(0xa6, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Format) + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + // write "Stat" + err = en.Append(0xa4, 0x53, 0x74, 0x61, 0x74) + if err != nil { + return + } + // map header, size 2 + // write "Size" + err = en.Append(0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Stat.Size) + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + // write "ModTime" + err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.Stat.ModTime) + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + // write "Erasure" + err = en.Append(0xa7, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65) + if err != nil { + return + } + err = z.Erasure.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + // write "Minio" + err = en.Append(0xa5, 0x4d, 0x69, 0x6e, 0x69, 0x6f) + if err != nil { + return + } + // map header, size 1 + // write "Release" + err = en.Append(0x81, 0xa7, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Minio.Release) + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + // write "Meta" + err = en.Append(0xa4, 0x4d, 0x65, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Meta))) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + for za0001, za0002 := range z.Meta { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + } + // write "Parts" + err = en.Append(0xa5, 0x50, 0x61, 0x72, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Parts))) + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + for za0003 := range z.Parts { + err = z.Parts[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + // write "VersionID" + err = en.Append(0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.VersionID) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "DataDir" + err = en.Append(0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72) + if err != nil { + return + } + err = en.WriteString(z.DataDir) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV1Object) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 9 + // string "Version" + o = append(o, 0x89, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + // string "Format" + o = append(o, 0xa6, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74) + o = msgp.AppendString(o, z.Format) + // string "Stat" + o = append(o, 0xa4, 0x53, 0x74, 0x61, 0x74) + // map header, size 2 + // string "Size" + o = append(o, 0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Stat.Size) + // string "ModTime" + o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendTime(o, z.Stat.ModTime) + // string "Erasure" + o = append(o, 0xa7, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65) + o, err = z.Erasure.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + // string "Minio" + o = append(o, 0xa5, 0x4d, 0x69, 0x6e, 0x69, 0x6f) + // map header, size 1 + // string "Release" + o = append(o, 0x81, 0xa7, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65) + o = msgp.AppendString(o, z.Minio.Release) + // string "Meta" + o = append(o, 0xa4, 0x4d, 0x65, 0x74, 0x61) + o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) + for za0001, za0002 := range z.Meta { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + // string "Parts" + o = append(o, 0xa5, 0x50, 0x61, 0x72, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Parts))) + for za0003 := range z.Parts { + o, err = z.Parts[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + // string "VersionID" + o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + o = msgp.AppendString(o, z.VersionID) + // string "DataDir" + o = append(o, 0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72) + o = msgp.AppendString(o, z.DataDir) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV1Object) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Format": + z.Format, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + case "Stat": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Stat.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + case "ModTime": + z.Stat.ModTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + } + } + case "Erasure": + bts, err = z.Erasure.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + case "Minio": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + switch msgp.UnsafeString(field) { + case "Release": + z.Minio.Release, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + } + } + case "Meta": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil { + z.Meta = make(map[string]string, zb0004) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0004 > 0 { + var za0001 string + var za0002 string + zb0004-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + z.Meta[za0001] = za0002 + } + case "Parts": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + if cap(z.Parts) >= int(zb0005) { + z.Parts = (z.Parts)[:zb0005] + } else { + z.Parts = make([]ObjectPartInfo, zb0005) + } + for za0003 := range z.Parts { + bts, err = z.Parts[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + case "VersionID": + z.VersionID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DataDir": + z.DataDir, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV1Object) Msgsize() (s int) { + s = 1 + 8 + msgp.StringPrefixSize + len(z.Version) + 7 + msgp.StringPrefixSize + len(z.Format) + 5 + 1 + 5 + msgp.Int64Size + 8 + msgp.TimeSize + 8 + z.Erasure.Msgsize() + 6 + 1 + 8 + msgp.StringPrefixSize + len(z.Minio.Release) + 5 + msgp.MapHeaderSize + if z.Meta != nil { + for za0001, za0002 := range z.Meta { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 6 + msgp.ArrayHeaderSize + for za0003 := range z.Parts { + s += z.Parts[za0003].Msgsize() + } + s += 10 + msgp.StringPrefixSize + len(z.VersionID) + 8 + msgp.StringPrefixSize + len(z.DataDir) + return +} diff --git a/cmd/xl-storage-format-v1_gen_test.go b/cmd/xl-storage-format-v1_gen_test.go new file mode 100644 index 000000000..0b66c8938 --- /dev/null +++ b/cmd/xl-storage-format-v1_gen_test.go @@ -0,0 +1,688 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalChecksumInfo(t *testing.T) { + v := ChecksumInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeChecksumInfo(t *testing.T) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeChecksumInfo Msgsize() is inaccurate") + } + + vn := ChecksumInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalErasureInfo(t *testing.T) { + v := ErasureInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgErasureInfo(b *testing.B) { + v := ErasureInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgErasureInfo(b *testing.B) { + v := ErasureInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalErasureInfo(b *testing.B) { + v := ErasureInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeErasureInfo(t *testing.T) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeErasureInfo Msgsize() is inaccurate") + } + + vn := ErasureInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeErasureInfo(b *testing.B) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeErasureInfo(b *testing.B) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalObjectPartInfo(t *testing.T) { + v := ObjectPartInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeObjectPartInfo(t *testing.T) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeObjectPartInfo Msgsize() is inaccurate") + } + + vn := ObjectPartInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalStatInfo(t *testing.T) { + v := StatInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgStatInfo(b *testing.B) { + v := StatInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgStatInfo(b *testing.B) { + v := StatInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalStatInfo(b *testing.B) { + v := StatInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeStatInfo(t *testing.T) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeStatInfo Msgsize() is inaccurate") + } + + vn := StatInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeStatInfo(b *testing.B) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeStatInfo(b *testing.B) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalchecksumInfoJSON(t *testing.T) { + v := checksumInfoJSON{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodechecksumInfoJSON(t *testing.T) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodechecksumInfoJSON Msgsize() is inaccurate") + } + + vn := checksumInfoJSON{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodechecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodechecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV1Object(t *testing.T) { + v := xlMetaV1Object{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV1Object(t *testing.T) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV1Object Msgsize() is inaccurate") + } + + vn := xlMetaV1Object{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go new file mode 100644 index 000000000..f8c09eec1 --- /dev/null +++ b/cmd/xl-storage-format-v2.go @@ -0,0 +1,601 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio/cmd/logger" +) + +var ( + // XL header specifies the format + xlHeader = [4]byte{'X', 'L', '2', ' '} + + // XLv2 version 1 + xlVersionV1 = [4]byte{'1', ' ', ' ', ' '} +) + +func checkXL2V1(buf []byte) error { + if len(buf) <= 8 { + return fmt.Errorf("xlMeta: no data") + } + + if !bytes.Equal(buf[:4], xlHeader[:]) { + return fmt.Errorf("xlMeta: unknown XLv2 header %s", xlHeader) + } + + if !bytes.Equal(buf[4:8], xlVersionV1[:]) { + return fmt.Errorf("xlMeta: unknown XLv2 version %s", xlVersionV1) + } + + return nil +} + +func isXL2V1Format(buf []byte) bool { + return checkXL2V1(buf) == nil +} + +// The []journal contains all the different versions of the object. +// +// This array can have 3 kinds of objects: +// +// ``object``: If the object is uploaded the usual way: putobject, multipart-put, copyobject +// +// ``delete``: This is the delete-marker +// +// ``legacyObject``: This is the legacy object in xlV1 format, preserved until its overwritten +// +// The most recently updated element in the array is considered the latest version. + +// Backend directory tree structure: +// disk1/ +// └── bucket +// └── object +// ├── a192c1d5-9bd5-41fd-9a90-ab10e165398d +// │ └── part.1 +// ├── c06e0436-f813-447e-ae5e-f2564df9dfd4 +// │ └── part.1 +// ├── df433928-2dcf-47b1-a786-43efa0f6b424 +// │ └── part.1 +// ├── legacy +// │ └── part.1 +// └── xl.meta + +//go:generate msgp -file=$GOFILE -unexported + +// VersionType defines the type of journal type of the current entry. +type VersionType uint8 + +// List of different types of journal type +const ( + invalidVersionType VersionType = 0 + ObjectType VersionType = 1 + DeleteType VersionType = 2 + LegacyType VersionType = 3 + lastVersionType VersionType = 4 +) + +func (e VersionType) valid() bool { + return e > invalidVersionType && e < lastVersionType +} + +// ErasureAlgo defines common type of different erasure algorithms +type ErasureAlgo uint8 + +// List of currently supported erasure coding algorithms +const ( + invalidErasureAlgo ErasureAlgo = 0 + ReedSolomon ErasureAlgo = 1 + lastErasureAlgo ErasureAlgo = 2 +) + +func (e ErasureAlgo) valid() bool { + return e > invalidErasureAlgo && e < lastErasureAlgo +} + +func (e ErasureAlgo) String() string { + switch e { + case ReedSolomon: + return "reedsolomon" + } + return "" +} + +// ChecksumAlgo defines common type of different checksum algorithms +type ChecksumAlgo uint8 + +// List of currently supported checksum algorithms +const ( + invalidChecksumAlgo ChecksumAlgo = 0 + HighwayHash ChecksumAlgo = 1 + lastChecksumAlgo ChecksumAlgo = 2 +) + +func (e ChecksumAlgo) valid() bool { + return e > invalidChecksumAlgo && e < lastChecksumAlgo +} + +// xlMetaV2DeleteMarker defines the data struct for the delete marker journal type +type xlMetaV2DeleteMarker struct { + VersionID [16]byte `json:"ID" msg:"ID"` // Version ID for delete marker + ModTime int64 `json:"MTime" msg:"MTime"` // Object delete marker modified time +} + +// xlMetaV2Object defines the data struct for object journal type +type xlMetaV2Object struct { + VersionID [16]byte `json:"ID" msg:"ID"` // Version ID + DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID + ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm + ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks + ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks + ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size + ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index + ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution + BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo + PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers + PartETags []string `json:"PartETags" msg:"PartETags"` // Part ETags + PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes + PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,omitempty"` // Part ActualSizes (compression) + StatSize int64 `json:"Size" msg:"Size"` // Object version size + StatModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time + MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,omitempty"` // Object version internal metadata + MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,omitempty"` // Object version metadata set by user +} + +// xlMetaV2Version describes the jouranal entry, Type defines +// the current journal entry type other types might be nil based +// on what Type field carries, it is imperative for the caller +// to verify which journal type first before accessing rest of the fields. +type xlMetaV2Version struct { + Type VersionType `json:"Type" msg:"Type"` + ObjectV1 *xlMetaV1Object `json:"V1Obj,omitempty" msg:"V1Obj,omitempty"` + ObjectV2 *xlMetaV2Object `json:"V2Obj,omitempty" msg:"V2Obj,omitempty"` + DeleteMarker *xlMetaV2DeleteMarker `json:"DelObj,omitempty" msg:"DelObj,omitempty"` +} + +// Valid xl meta xlMetaV2Version is valid +func (j xlMetaV2Version) Valid() bool { + switch j.Type { + case LegacyType: + return j.ObjectV1 != nil && j.ObjectV1.valid() + case ObjectType: + return j.ObjectV2 != nil && + j.ObjectV2.ErasureAlgorithm.valid() && + j.ObjectV2.BitrotChecksumAlgo.valid() && + isXLMetaErasureInfoValid(j.ObjectV2.ErasureM, j.ObjectV2.ErasureN) + case DeleteType: + return j.DeleteMarker != nil && j.DeleteMarker.ModTime > 0 + } + return false +} + +// xlMetaV2 - object meta structure defines the format and list of +// the journals for the object. +type xlMetaV2 struct { + Versions []xlMetaV2Version `json:"Versions" msg:"Versions"` +} + +// AddLegacy adds a legacy version, is only called when no prior +// versions exist, safe to use it by only one function in xl-storage(RenameData) +func (z *xlMetaV2) AddLegacy(m *xlMetaV1Object) error { + if !m.valid() { + return errFileCorrupt + } + m.VersionID = nullVersionID + m.DataDir = legacyDataDir + z.Versions = []xlMetaV2Version{ + { + Type: LegacyType, + ObjectV1: m, + }, + } + return nil +} + +// Load unmarshal and load the entire message pack. +func (z *xlMetaV2) Load(buf []byte) error { + if err := checkXL2V1(buf); err != nil { + return err + } + _, err := z.UnmarshalMsg(buf[8:]) + return err +} + +// AddVersion adds a new version +func (z *xlMetaV2) AddVersion(fi FileInfo) error { + if fi.Deleted { + uv, err := uuid.Parse(fi.VersionID) + if err != nil { + return err + } + z.Versions = append(z.Versions, xlMetaV2Version{ + Type: DeleteType, + DeleteMarker: &xlMetaV2DeleteMarker{ + VersionID: uv, + ModTime: fi.ModTime.Unix(), + }, + }) + return nil + } + + var uv uuid.UUID + var err error + // null version Id means empty version Id. + if fi.VersionID == nullVersionID { + fi.VersionID = "" + } + + if fi.VersionID != "" { + uv, err = uuid.Parse(fi.VersionID) + if err != nil { + return err + } + } + + dd, err := uuid.Parse(fi.DataDir) + if err != nil { + return err + } + + ventry := xlMetaV2Version{ + Type: ObjectType, + ObjectV2: &xlMetaV2Object{ + VersionID: uv, + DataDir: dd, + StatSize: fi.Size, + StatModTime: fi.ModTime.Unix(), + ErasureAlgorithm: ReedSolomon, + ErasureM: fi.Erasure.DataBlocks, + ErasureN: fi.Erasure.ParityBlocks, + ErasureBlockSize: fi.Erasure.BlockSize, + ErasureIndex: fi.Erasure.Index, + BitrotChecksumAlgo: HighwayHash, + ErasureDist: make([]uint8, len(fi.Erasure.Distribution)), + PartNumbers: make([]int, len(fi.Parts)), + PartETags: make([]string, len(fi.Parts)), + PartSizes: make([]int64, len(fi.Parts)), + PartActualSizes: make([]int64, len(fi.Parts)), + MetaSys: make(map[string][]byte), + MetaUser: make(map[string]string), + }, + } + + for i := range fi.Erasure.Distribution { + ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i]) + } + + for i := range fi.Parts { + ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size + if fi.Parts[i].ETag != "" { + ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag + } + ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number + ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize + } + + for k, v := range fi.Metadata { + if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { + ventry.ObjectV2.MetaSys[k] = []byte(v) + } else { + ventry.ObjectV2.MetaUser[k] = v + } + } + + for i, version := range z.Versions { + if !version.Valid() { + return errFileCorrupt + } + switch version.Type { + case LegacyType: + // This would convert legacy type into new ObjectType + // this means that we are basically purging the `null` + // version of the object. + if version.ObjectV1.VersionID == fi.VersionID { + z.Versions[i] = ventry + return nil + } + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + z.Versions[i] = ventry + return nil + } + case DeleteType: + // Allowing delete marker to replaced with an proper + // object data type as well, this is not S3 complaint + // behavior but kept here for future flexibility. + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + z.Versions[i] = ventry + return nil + } + } + } + + z.Versions = append(z.Versions, ventry) + return nil +} + +func newXLMetaV2(fi FileInfo) (xlMetaV2, error) { + xlMeta := xlMetaV2{} + return xlMeta, xlMeta.AddVersion(fi) +} + +func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) { + fi := FileInfo{ + Volume: volume, + Name: path, + ModTime: time.Unix(j.ModTime, 0).UTC(), + VersionID: uuid.UUID(j.VersionID).String(), + Deleted: true, + } + return fi, nil +} + +func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) { + versionID := "" + var uv uuid.UUID + // check if the version is not "null" + if !bytes.Equal(j.VersionID[:], uv[:]) { + versionID = uuid.UUID(j.VersionID).String() + } + fi := FileInfo{ + Volume: volume, + Name: path, + Size: j.StatSize, + ModTime: time.Unix(j.StatModTime, 0).UTC(), + VersionID: versionID, + } + fi.Parts = make([]ObjectPartInfo, len(j.PartNumbers)) + for i := range fi.Parts { + fi.Parts[i].Number = int(j.PartNumbers[i]) + fi.Parts[i].Size = int64(j.PartSizes[i]) + fi.Parts[i].ETag = j.PartETags[i] + fi.Parts[i].ActualSize = int64(j.PartActualSizes[i]) + } + fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes)) + for i := range fi.Parts { + fi.Erasure.Checksums[i].PartNumber = fi.Parts[i].Number + switch j.BitrotChecksumAlgo { + case HighwayHash: + fi.Erasure.Checksums[i].Algorithm = HighwayHash256S + fi.Erasure.Checksums[i].Hash = []byte{} + default: + return FileInfo{}, fmt.Errorf("unknown BitrotChecksumAlgo: %v", j.BitrotChecksumAlgo) + } + } + fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys)) + for k, v := range j.MetaUser { + fi.Metadata[k] = v + } + for k, v := range j.MetaSys { + if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { + fi.Metadata[k] = string(v) + } + } + fi.Erasure.Algorithm = j.ErasureAlgorithm.String() + fi.Erasure.Index = j.ErasureIndex + fi.Erasure.BlockSize = j.ErasureBlockSize + fi.Erasure.DataBlocks = j.ErasureM + fi.Erasure.ParityBlocks = j.ErasureN + fi.Erasure.Distribution = make([]int, len(j.ErasureDist)) + for i := range j.ErasureDist { + fi.Erasure.Distribution[i] = int(j.ErasureDist[i]) + } + fi.DataDir = uuid.UUID(j.DataDir).String() + return fi, nil +} + +// DeleteVersion deletes the version specified by version id. +// returns to the caller which dataDir to delete, also +// indicates if this is the last version. +func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) { + // This is a situation where versionId is explicitly + // specified as "null", as we do not save "null" + // string it is considered empty. But empty also + // means the version which matches will be purged. + if fi.VersionID == nullVersionID { + fi.VersionID = "" + } + var uv uuid.UUID + if fi.VersionID != "" { + uv, _ = uuid.Parse(fi.VersionID) + } + for i, version := range z.Versions { + if !version.Valid() { + return "", false, errFileCorrupt + } + switch version.Type { + case LegacyType: + if version.ObjectV1.VersionID == fi.VersionID { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return version.ObjectV1.DataDir, len(z.Versions) == 0, nil + } + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil + } + case DeleteType: + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return "", len(z.Versions) == 0, nil + } + } + } + return "", false, errFileVersionNotFound +} + +// TotalSize returns the total size of all versions. +func (z xlMetaV2) TotalSize() int64 { + var total int64 + for i := range z.Versions { + switch z.Versions[i].Type { + case ObjectType: + total += z.Versions[i].ObjectV2.StatSize + case LegacyType: + total += z.Versions[i].ObjectV1.Stat.Size + } + } + return total +} + +// ListVersions lists current versions, and current deleted +// versions returns error for unexpected entries. +func (z xlMetaV2) ListVersions(volume, path string) (versions []FileInfo, deleted []FileInfo, modTime time.Time, err error) { + var latestModTime time.Time + var latestVersionID string + for _, version := range z.Versions { + if !version.Valid() { + return nil, nil, latestModTime, errFileCorrupt + } + var fi FileInfo + switch version.Type { + case ObjectType: + fi, err = version.ObjectV2.ToFileInfo(volume, path) + case DeleteType: + fi, err = version.DeleteMarker.ToFileInfo(volume, path) + case LegacyType: + fi, err = version.ObjectV1.ToFileInfo(volume, path) + default: + continue + } + if err != nil { + return nil, nil, latestModTime, err + } + if fi.ModTime.After(latestModTime) { + latestModTime = fi.ModTime + latestVersionID = fi.VersionID + } + switch version.Type { + case LegacyType: + fallthrough + case ObjectType: + versions = append(versions, fi) + case DeleteType: + deleted = append(deleted, fi) + + } + } + + // Since we can never have duplicate versions the versionID + // if it matches first with deleted markers then we are sure + // that actual versions wouldn't be latest, so we can return + // early if we find the version in delete markers. + for i := range deleted { + if deleted[i].VersionID == latestVersionID { + deleted[i].IsLatest = true + return versions, deleted, latestModTime, nil + } + } + // We didn't find the version in delete markers so latest version + // is indeed one of the actual version of the object with data. + for i := range versions { + if versions[i].VersionID != latestVersionID { + continue + } + versions[i].IsLatest = true + break + } + return versions, deleted, latestModTime, nil +} + +// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure +// for consumption across callers. +func (z xlMetaV2) ToFileInfo(volume, path, versionID string) (FileInfo, error) { + var uv uuid.UUID + if versionID != "" { + uv, _ = uuid.Parse(versionID) + } + + if versionID == "" { + var latestModTime time.Time + var latestIndex int + for i, version := range z.Versions { + if !version.Valid() { + logger.LogIf(GlobalContext, fmt.Errorf("invalid version detected %#v", version)) + return FileInfo{}, errFileNotFound + } + var modTime time.Time + switch version.Type { + case ObjectType: + modTime = time.Unix(version.ObjectV2.StatModTime, 0) + case DeleteType: + modTime = time.Unix(version.DeleteMarker.ModTime, 0) + case LegacyType: + modTime = version.ObjectV1.Stat.ModTime + default: + continue + } + if modTime.After(latestModTime) { + latestModTime = modTime + latestIndex = i + } + } + if len(z.Versions) >= 1 { + switch z.Versions[latestIndex].Type { + case ObjectType: + return z.Versions[latestIndex].ObjectV2.ToFileInfo(volume, path) + case DeleteType: + return z.Versions[latestIndex].DeleteMarker.ToFileInfo(volume, path) + case LegacyType: + return z.Versions[latestIndex].ObjectV1.ToFileInfo(volume, path) + } + } + return FileInfo{}, errFileNotFound + } + + for _, version := range z.Versions { + if !version.Valid() { + logger.LogIf(GlobalContext, fmt.Errorf("invalid version detected %#v", version)) + if versionID == "" { + return FileInfo{}, errFileNotFound + } + return FileInfo{}, errFileVersionNotFound + } + switch version.Type { + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + return version.ObjectV2.ToFileInfo(volume, path) + } + case LegacyType: + if version.ObjectV1.VersionID == versionID { + return version.ObjectV1.ToFileInfo(volume, path) + } + case DeleteType: + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + return version.DeleteMarker.ToFileInfo(volume, path) + } + default: + logger.LogIf(GlobalContext, fmt.Errorf("unknown version type: %v", version.Type)) + if versionID == "" { + return FileInfo{}, errFileNotFound + } + + return FileInfo{}, errFileVersionNotFound + } + } + + if versionID == "" { + return FileInfo{}, errFileNotFound + } + + return FileInfo{}, errFileVersionNotFound +} diff --git a/cmd/xl-storage-format-v2_gen.go b/cmd/xl-storage-format-v2_gen.go new file mode 100644 index 000000000..da111d395 --- /dev/null +++ b/cmd/xl-storage-format-v2_gen.go @@ -0,0 +1,1780 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *ChecksumAlgo) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ChecksumAlgo(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z ChecksumAlgo) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z ChecksumAlgo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ChecksumAlgo) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ChecksumAlgo(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z ChecksumAlgo) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ErasureAlgo) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ErasureAlgo(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z ErasureAlgo) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z ErasureAlgo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ErasureAlgo) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ErasureAlgo(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z ErasureAlgo) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *VersionType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z VersionType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z VersionType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *VersionType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z VersionType) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Versions": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] + } else { + z.Versions = make([]xlMetaV2Version, zb0002) + } + for za0001 := range z.Versions { + err = z.Versions[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "Versions" + err = en.Append(0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Versions))) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + for za0001 := range z.Versions { + err = z.Versions[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "Versions" + o = append(o, 0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Versions))) + for za0001 := range z.Versions { + o, err = z.Versions[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Versions": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] + } else { + z.Versions = make([]xlMetaV2Version, zb0002) + } + for za0001 := range z.Versions { + bts, err = z.Versions[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2) Msgsize() (s int) { + s = 1 + 9 + msgp.ArrayHeaderSize + for za0001 := range z.Versions { + s += z.Versions[za0001].Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "MTime": + z.ModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2DeleteMarker) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "ID" + err = en.Append(0x82, 0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2DeleteMarker) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "ID" + o = append(o, 0x82, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.ModTime) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "MTime": + z.ModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2DeleteMarker) Msgsize() (s int) { + s = 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 6 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DDir": + err = dc.ReadExactBytes((z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + case "EcAlgo": + { + var zb0002 uint8 + zb0002, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + z.ErasureAlgorithm = ErasureAlgo(zb0002) + } + case "EcM": + z.ErasureM, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + case "EcN": + z.ErasureN, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + case "EcBSize": + z.ErasureBlockSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + case "EcIndex": + z.ErasureIndex, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + case "EcDist": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + if cap(z.ErasureDist) >= int(zb0003) { + z.ErasureDist = (z.ErasureDist)[:zb0003] + } else { + z.ErasureDist = make([]uint8, zb0003) + } + for za0003 := range z.ErasureDist { + z.ErasureDist[za0003], err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + case "CSumAlgo": + { + var zb0004 uint8 + zb0004, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + z.BitrotChecksumAlgo = ChecksumAlgo(zb0004) + } + case "PartNums": + var zb0005 uint32 + zb0005, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + if cap(z.PartNumbers) >= int(zb0005) { + z.PartNumbers = (z.PartNumbers)[:zb0005] + } else { + z.PartNumbers = make([]int, zb0005) + } + for za0004 := range z.PartNumbers { + z.PartNumbers[za0004], err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + case "PartETags": + var zb0006 uint32 + zb0006, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + if cap(z.PartETags) >= int(zb0006) { + z.PartETags = (z.PartETags)[:zb0006] + } else { + z.PartETags = make([]string, zb0006) + } + for za0005 := range z.PartETags { + z.PartETags[za0005], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + case "PartSizes": + var zb0007 uint32 + zb0007, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + if cap(z.PartSizes) >= int(zb0007) { + z.PartSizes = (z.PartSizes)[:zb0007] + } else { + z.PartSizes = make([]int64, zb0007) + } + for za0006 := range z.PartSizes { + z.PartSizes[za0006], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + case "PartASizes": + var zb0008 uint32 + zb0008, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + if cap(z.PartActualSizes) >= int(zb0008) { + z.PartActualSizes = (z.PartActualSizes)[:zb0008] + } else { + z.PartActualSizes = make([]int64, zb0008) + } + for za0007 := range z.PartActualSizes { + z.PartActualSizes[za0007], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + case "Size": + z.StatSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + case "MTime": + z.StatModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + case "MetaSys": + var zb0009 uint32 + zb0009, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + if z.MetaSys == nil { + z.MetaSys = make(map[string][]byte, zb0009) + } else if len(z.MetaSys) > 0 { + for key := range z.MetaSys { + delete(z.MetaSys, key) + } + } + for zb0009 > 0 { + zb0009-- + var za0008 string + var za0009 []byte + za0008, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + za0009, err = dc.ReadBytes(za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + z.MetaSys[za0008] = za0009 + } + case "MetaUsr": + var zb0010 uint32 + zb0010, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + if z.MetaUser == nil { + z.MetaUser = make(map[string]string, zb0010) + } else if len(z.MetaUser) > 0 { + for key := range z.MetaUser { + delete(z.MetaUser, key) + } + } + for zb0010 > 0 { + zb0010-- + var za0010 string + var za0011 string + za0010, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + za0011, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + z.MetaUser[za0010] = za0011 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(17) + var zb0001Mask uint32 /* 17 bits */ + if z.PartActualSizes == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + if z.MetaSys == nil { + zb0001Len-- + zb0001Mask |= 0x8000 + } + if z.MetaUser == nil { + zb0001Len-- + zb0001Mask |= 0x10000 + } + // variable map header, size zb0001Len + err = en.WriteMapHeader(zb0001Len) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "ID" + err = en.Append(0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "DDir" + err = en.Append(0xa4, 0x44, 0x44, 0x69, 0x72) + if err != nil { + return + } + err = en.WriteBytes((z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + // write "EcAlgo" + err = en.Append(0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.ErasureAlgorithm)) + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + // write "EcM" + err = en.Append(0xa3, 0x45, 0x63, 0x4d) + if err != nil { + return + } + err = en.WriteInt(z.ErasureM) + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + // write "EcN" + err = en.Append(0xa3, 0x45, 0x63, 0x4e) + if err != nil { + return + } + err = en.WriteInt(z.ErasureN) + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + // write "EcBSize" + err = en.Append(0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ErasureBlockSize) + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + // write "EcIndex" + err = en.Append(0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.ErasureIndex) + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + // write "EcDist" + err = en.Append(0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.ErasureDist))) + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + for za0003 := range z.ErasureDist { + err = en.WriteUint8(z.ErasureDist[za0003]) + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + // write "CSumAlgo" + err = en.Append(0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.BitrotChecksumAlgo)) + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + // write "PartNums" + err = en.Append(0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartNumbers))) + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + for za0004 := range z.PartNumbers { + err = en.WriteInt(z.PartNumbers[za0004]) + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + // write "PartETags" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartETags))) + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + for za0005 := range z.PartETags { + err = en.WriteString(z.PartETags[za0005]) + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + // write "PartSizes" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartSizes))) + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + for za0006 := range z.PartSizes { + err = en.WriteInt64(z.PartSizes[za0006]) + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + if (zb0001Mask & 0x1000) == 0 { // if not empty + // write "PartASizes" + err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartActualSizes))) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + for za0007 := range z.PartActualSizes { + err = en.WriteInt64(z.PartActualSizes[za0007]) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + } + // write "Size" + err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.StatSize) + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.StatModTime) + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + if (zb0001Mask & 0x8000) == 0 { // if not empty + // write "MetaSys" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.MetaSys))) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + for za0008, za0009 := range z.MetaSys { + err = en.WriteString(za0008) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + err = en.WriteBytes(za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + } + } + if (zb0001Mask & 0x10000) == 0 { // if not empty + // write "MetaUsr" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.MetaUser))) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + for za0010, za0011 := range z.MetaUser { + err = en.WriteString(za0010) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + err = en.WriteString(za0011) + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(17) + var zb0001Mask uint32 /* 17 bits */ + if z.PartActualSizes == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + if z.MetaSys == nil { + zb0001Len-- + zb0001Mask |= 0x8000 + } + if z.MetaUser == nil { + zb0001Len-- + zb0001Mask |= 0x10000 + } + // variable map header, size zb0001Len + o = msgp.AppendMapHeader(o, zb0001Len) + if zb0001Len == 0 { + return + } + // string "ID" + o = append(o, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "DDir" + o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72) + o = msgp.AppendBytes(o, (z.DataDir)[:]) + // string "EcAlgo" + o = append(o, 0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.ErasureAlgorithm)) + // string "EcM" + o = append(o, 0xa3, 0x45, 0x63, 0x4d) + o = msgp.AppendInt(o, z.ErasureM) + // string "EcN" + o = append(o, 0xa3, 0x45, 0x63, 0x4e) + o = msgp.AppendInt(o, z.ErasureN) + // string "EcBSize" + o = append(o, 0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.ErasureBlockSize) + // string "EcIndex" + o = append(o, 0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.ErasureIndex) + // string "EcDist" + o = append(o, 0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + o = msgp.AppendArrayHeader(o, uint32(len(z.ErasureDist))) + for za0003 := range z.ErasureDist { + o = msgp.AppendUint8(o, z.ErasureDist[za0003]) + } + // string "CSumAlgo" + o = append(o, 0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.BitrotChecksumAlgo)) + // string "PartNums" + o = append(o, 0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartNumbers))) + for za0004 := range z.PartNumbers { + o = msgp.AppendInt(o, z.PartNumbers[za0004]) + } + // string "PartETags" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags))) + for za0005 := range z.PartETags { + o = msgp.AppendString(o, z.PartETags[za0005]) + } + // string "PartSizes" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartSizes))) + for za0006 := range z.PartSizes { + o = msgp.AppendInt64(o, z.PartSizes[za0006]) + } + if (zb0001Mask & 0x1000) == 0 { // if not empty + // string "PartASizes" + o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartActualSizes))) + for za0007 := range z.PartActualSizes { + o = msgp.AppendInt64(o, z.PartActualSizes[za0007]) + } + } + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.StatSize) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.StatModTime) + if (zb0001Mask & 0x8000) == 0 { // if not empty + // string "MetaSys" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) + for za0008, za0009 := range z.MetaSys { + o = msgp.AppendString(o, za0008) + o = msgp.AppendBytes(o, za0009) + } + } + if (zb0001Mask & 0x10000) == 0 { // if not empty + // string "MetaUsr" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser))) + for za0010, za0011 := range z.MetaUser { + o = msgp.AppendString(o, za0010) + o = msgp.AppendString(o, za0011) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DDir": + bts, err = msgp.ReadExactBytes(bts, (z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + case "EcAlgo": + { + var zb0002 uint8 + zb0002, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + z.ErasureAlgorithm = ErasureAlgo(zb0002) + } + case "EcM": + z.ErasureM, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + case "EcN": + z.ErasureN, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + case "EcBSize": + z.ErasureBlockSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + case "EcIndex": + z.ErasureIndex, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + case "EcDist": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + if cap(z.ErasureDist) >= int(zb0003) { + z.ErasureDist = (z.ErasureDist)[:zb0003] + } else { + z.ErasureDist = make([]uint8, zb0003) + } + for za0003 := range z.ErasureDist { + z.ErasureDist[za0003], bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + case "CSumAlgo": + { + var zb0004 uint8 + zb0004, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + z.BitrotChecksumAlgo = ChecksumAlgo(zb0004) + } + case "PartNums": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + if cap(z.PartNumbers) >= int(zb0005) { + z.PartNumbers = (z.PartNumbers)[:zb0005] + } else { + z.PartNumbers = make([]int, zb0005) + } + for za0004 := range z.PartNumbers { + z.PartNumbers[za0004], bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + case "PartETags": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + if cap(z.PartETags) >= int(zb0006) { + z.PartETags = (z.PartETags)[:zb0006] + } else { + z.PartETags = make([]string, zb0006) + } + for za0005 := range z.PartETags { + z.PartETags[za0005], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + case "PartSizes": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + if cap(z.PartSizes) >= int(zb0007) { + z.PartSizes = (z.PartSizes)[:zb0007] + } else { + z.PartSizes = make([]int64, zb0007) + } + for za0006 := range z.PartSizes { + z.PartSizes[za0006], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + case "PartASizes": + var zb0008 uint32 + zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + if cap(z.PartActualSizes) >= int(zb0008) { + z.PartActualSizes = (z.PartActualSizes)[:zb0008] + } else { + z.PartActualSizes = make([]int64, zb0008) + } + for za0007 := range z.PartActualSizes { + z.PartActualSizes[za0007], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + case "Size": + z.StatSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + case "MTime": + z.StatModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + case "MetaSys": + var zb0009 uint32 + zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + if z.MetaSys == nil { + z.MetaSys = make(map[string][]byte, zb0009) + } else if len(z.MetaSys) > 0 { + for key := range z.MetaSys { + delete(z.MetaSys, key) + } + } + for zb0009 > 0 { + var za0008 string + var za0009 []byte + zb0009-- + za0008, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + za0009, bts, err = msgp.ReadBytesBytes(bts, za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + z.MetaSys[za0008] = za0009 + } + case "MetaUsr": + var zb0010 uint32 + zb0010, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + if z.MetaUser == nil { + z.MetaUser = make(map[string]string, zb0010) + } else if len(z.MetaUser) > 0 { + for key := range z.MetaUser { + delete(z.MetaUser, key) + } + } + for zb0010 > 0 { + var za0010 string + var za0011 string + zb0010-- + za0010, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + za0011, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + z.MetaUser[za0010] = za0011 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2Object) Msgsize() (s int) { + s = 3 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 5 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 7 + msgp.Uint8Size + 4 + msgp.IntSize + 4 + msgp.IntSize + 8 + msgp.Int64Size + 8 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ErasureDist) * (msgp.Uint8Size)) + 9 + msgp.Uint8Size + 9 + msgp.ArrayHeaderSize + (len(z.PartNumbers) * (msgp.IntSize)) + 10 + msgp.ArrayHeaderSize + for za0005 := range z.PartETags { + s += msgp.StringPrefixSize + len(z.PartETags[za0005]) + } + s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize + if z.MetaSys != nil { + for za0008, za0009 := range z.MetaSys { + _ = za0009 + s += msgp.StringPrefixSize + len(za0008) + msgp.BytesPrefixSize + len(za0009) + } + } + s += 8 + msgp.MapHeaderSize + if z.MetaUser != nil { + for za0010, za0011 := range z.MetaUser { + _ = za0011 + s += msgp.StringPrefixSize + len(za0010) + msgp.StringPrefixSize + len(za0011) + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 uint8 + zb0002, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = VersionType(zb0002) + } + case "V1Obj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + z.ObjectV1 = nil + } else { + if z.ObjectV1 == nil { + z.ObjectV1 = new(xlMetaV1Object) + } + err = z.ObjectV1.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + case "V2Obj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + z.ObjectV2 = nil + } else { + if z.ObjectV2 == nil { + z.ObjectV2 = new(xlMetaV2Object) + } + err = z.ObjectV2.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + case "DelObj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + z.DeleteMarker = nil + } else { + if z.DeleteMarker == nil { + z.DeleteMarker = new(xlMetaV2DeleteMarker) + } + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + case "MTime": + z.DeleteMarker.ModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2Version) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.ObjectV1 == nil { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ObjectV2 == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.DeleteMarker == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "Type" + err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + if (zb0001Mask & 0x2) == 0 { // if not empty + // write "V1Obj" + err = en.Append(0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.ObjectV1 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV1.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "V2Obj" + err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.ObjectV2 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV2.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // write "DelObj" + err = en.Append(0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.DeleteMarker == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + // map header, size 2 + // write "ID" + err = en.Append(0x82, 0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.DeleteMarker.ModTime) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2Version) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.ObjectV1 == nil { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ObjectV2 == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.DeleteMarker == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "Type" + o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendUint8(o, uint8(z.Type)) + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "V1Obj" + o = append(o, 0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + if z.ObjectV1 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV1.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "V2Obj" + o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) + if z.ObjectV2 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV2.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "DelObj" + o = append(o, 0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + if z.DeleteMarker == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "ID" + o = append(o, 0x82, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.DeleteMarker.VersionID)[:]) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.DeleteMarker.ModTime) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 uint8 + zb0002, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = VersionType(zb0002) + } + case "V1Obj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ObjectV1 = nil + } else { + if z.ObjectV1 == nil { + z.ObjectV1 = new(xlMetaV1Object) + } + bts, err = z.ObjectV1.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + case "V2Obj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ObjectV2 = nil + } else { + if z.ObjectV2 == nil { + z.ObjectV2 = new(xlMetaV2Object) + } + bts, err = z.ObjectV2.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + case "DelObj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.DeleteMarker = nil + } else { + if z.DeleteMarker == nil { + z.DeleteMarker = new(xlMetaV2DeleteMarker) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + case "MTime": + z.DeleteMarker.ModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2Version) Msgsize() (s int) { + s = 1 + 5 + msgp.Uint8Size + 6 + if z.ObjectV1 == nil { + s += msgp.NilSize + } else { + s += z.ObjectV1.Msgsize() + } + s += 6 + if z.ObjectV2 == nil { + s += msgp.NilSize + } else { + s += z.ObjectV2.Msgsize() + } + s += 7 + if z.DeleteMarker == nil { + s += msgp.NilSize + } else { + s += 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 6 + msgp.Int64Size + } + return +} diff --git a/cmd/xl-storage-format-v2_gen_test.go b/cmd/xl-storage-format-v2_gen_test.go new file mode 100644 index 000000000..39f03c898 --- /dev/null +++ b/cmd/xl-storage-format-v2_gen_test.go @@ -0,0 +1,462 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalxlMetaV2(t *testing.T) { + v := xlMetaV2{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2(b *testing.B) { + v := xlMetaV2{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2(b *testing.B) { + v := xlMetaV2{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2(b *testing.B) { + v := xlMetaV2{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2(t *testing.T) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2 Msgsize() is inaccurate") + } + + vn := xlMetaV2{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2(b *testing.B) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2(b *testing.B) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2DeleteMarker(t *testing.T) { + v := xlMetaV2DeleteMarker{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2DeleteMarker(t *testing.T) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2DeleteMarker Msgsize() is inaccurate") + } + + vn := xlMetaV2DeleteMarker{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2Object(t *testing.T) { + v := xlMetaV2Object{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2Object(t *testing.T) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2Object Msgsize() is inaccurate") + } + + vn := xlMetaV2Object{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2Version(t *testing.T) { + v := xlMetaV2Version{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2Version(t *testing.T) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2Version Msgsize() is inaccurate") + } + + vn := xlMetaV2Version{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/xl-v1-utils_test.go b/cmd/xl-storage-format_test.go similarity index 59% rename from cmd/xl-v1-utils_test.go rename to cmd/xl-storage-format_test.go index 7ab5e0cd1..aa612b6d4 100644 --- a/cmd/xl-v1-utils_test.go +++ b/cmd/xl-storage-format_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,130 +18,60 @@ package cmd import ( "bytes" - "context" "encoding/hex" "encoding/json" - "reflect" "testing" - humanize "github.com/dustin/go-humanize" + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" ) -// Tests caclculating disk count. -func TestDiskCount(t *testing.T) { - testCases := []struct { - disks []StorageAPI - diskCount int +func TestIsXLMetaFormatValid(t *testing.T) { + tests := []struct { + name int + version string + format string + want bool }{ - // Test case - 1 - { - disks: []StorageAPI{&posix{}, &posix{}, &posix{}, &posix{}}, - diskCount: 4, - }, - // Test case - 2 - { - disks: []StorageAPI{nil, &posix{}, &posix{}, &posix{}}, - diskCount: 3, - }, + {1, "123", "fs", false}, + {2, "123", xlMetaFormat, false}, + {3, xlMetaVersion100, "test", false}, + {4, xlMetaVersion101, "hello", false}, + {5, xlMetaVersion100, xlMetaFormat, true}, + {6, xlMetaVersion101, xlMetaFormat, true}, } - for i, testCase := range testCases { - cdiskCount := diskCount(testCase.disks) - if cdiskCount != testCase.diskCount { - t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.diskCount, cdiskCount) + for _, tt := range tests { + if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want { + t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) } } } -// Test for reduceErrs, reduceErr reduces collection -// of errors into a single maximal error with in the list. -func TestReduceErrs(t *testing.T) { - // List all of all test cases to validate various cases of reduce errors. - testCases := []struct { - errs []error - ignoredErrs []error - err error +func TestIsXLMetaErasureInfoValid(t *testing.T) { + tests := []struct { + name int + data int + parity int + want bool }{ - // Validate if have reduced properly. - {[]error{ - errDiskNotFound, - errDiskNotFound, - errDiskFull, - }, []error{}, errXLReadQuorum}, - // Validate if have no consensus. - {[]error{ - errDiskFull, - errDiskNotFound, - nil, nil, - }, []error{}, errXLReadQuorum}, - // Validate if have consensus and errors ignored. - {[]error{ - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errDiskNotFound, - errDiskNotFound, - }, []error{errDiskNotFound}, errVolumeNotFound}, - {[]error{}, []error{}, errXLReadQuorum}, - {[]error{errFileNotFound, errFileNotFound, errFileNotFound, - errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil}, - nil, nil}, + {1, 5, 6, false}, + {2, 5, 5, true}, + {3, 0, 5, false}, + {4, 5, 0, false}, + {5, 5, 0, false}, + {6, 5, 4, true}, } - // Validates list of all the testcases for returning valid errors. - for i, testCase := range testCases { - gotErr := reduceReadQuorumErrs(GlobalContext, testCase.errs, testCase.ignoredErrs, 5) - if gotErr != testCase.err { - t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) - } - gotNewErr := reduceWriteQuorumErrs(GlobalContext, testCase.errs, testCase.ignoredErrs, 6) - if gotNewErr != errXLWriteQuorum { - t.Errorf("Test %d : expected %s, got %s", i+1, errXLWriteQuorum, gotErr) + for _, tt := range tests { + if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want { + t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) } } } -// TestHashOrder - test order of ints in array -func TestHashOrder(t *testing.T) { - testCases := []struct { - objectName string - hashedOrder []int - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}, - {"The Shining Script .pdf", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, - {"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, - {"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - {"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, - {"/a/b/c", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}}, - {string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - hashedOrder := hashOrder(testCase.objectName, 16) - if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) { - t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.hashedOrder, hashedOrder) - } - } - - // Tests hashing order to fail for when order is '-1'. - if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil { - t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) - } - - if hashedOrder := hashOrder("This will fail", 0); hashedOrder != nil { - t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) - } -} - -// newTestXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info and metadata. -func newTestXLMetaV1() xlMetaV1 { - xlMeta := xlMetaV1{} - xlMeta.Version = xlMetaVersion +// newTestXLMetaV1 - initializes new xlMetaV1Object, adds version, allocates a fresh erasure info and metadata. +func newTestXLMetaV1() xlMetaV1Object { + xlMeta := xlMetaV1Object{} + xlMeta.Version = xlMetaVersion101 xlMeta.Format = xlMetaFormat xlMeta.Minio.Release = "test" xlMeta.Erasure = ErasureInfo{ @@ -152,7 +82,7 @@ func newTestXLMetaV1() xlMetaV1 { Index: 10, Distribution: []int{9, 10, 1, 2, 3, 4, 5, 6, 7, 8}, } - xlMeta.Stat = statInfo{ + xlMeta.Stat = StatInfo{ Size: int64(20), ModTime: UTCNow(), } @@ -163,7 +93,7 @@ func newTestXLMetaV1() xlMetaV1 { return xlMeta } -func (m *xlMetaV1) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) { +func (m *xlMetaV1Object) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) { checksum, err := hex.DecodeString(hash) if err != nil { panic(err) @@ -172,7 +102,7 @@ func (m *xlMetaV1) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorit } // AddTestObjectPart - add a new object part in order. -func (m *xlMetaV1) AddTestObjectPart(partNumber int, partSize int64) { +func (m *xlMetaV1Object) AddTestObjectPart(partNumber int, partSize int64) { partInfo := ObjectPartInfo{ Number: partNumber, Size: partSize, @@ -182,7 +112,7 @@ func (m *xlMetaV1) AddTestObjectPart(partNumber int, partSize int64) { m.Parts[partNumber-1] = partInfo } -// Constructs xlMetaV1{} for given number of parts and converts it into bytes. +// Constructs xlMetaV1Object{} for given number of parts and converts it into bytes. func getXLMetaBytes(totalParts int) []byte { xlSampleMeta := getSampleXLMeta(totalParts) xlMetaBytes, err := json.Marshal(xlSampleMeta) @@ -192,15 +122,15 @@ func getXLMetaBytes(totalParts int) []byte { return xlMetaBytes } -// Returns sample xlMetaV1{} for number of parts. -func getSampleXLMeta(totalParts int) xlMetaV1 { +// Returns sample xlMetaV1Object{} for number of parts. +func getSampleXLMeta(totalParts int) xlMetaV1Object { xlMeta := newTestXLMetaV1() // Number of checksum info == total parts. xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts) // total number of parts. xlMeta.Parts = make([]ObjectPartInfo, totalParts) for i := 0; i < totalParts; i++ { - // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.json the magnitude doesn't affect the test, + // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test, // The magnitude doesn't make a difference, only the size does. xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a") xlMeta.AddTestObjectPart(i+1, 67108864) @@ -209,8 +139,8 @@ func getSampleXLMeta(totalParts int) xlMetaV1 { } // Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing. -func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1) { - // Start comparing the fields of xlMetaV1 obtained from jsoniter parsing with one parsed using json unmarshaling. +func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) { + // Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshaling. if unMarshalXLMeta.Version != jsoniterXLMeta.Version { t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version) } @@ -297,13 +227,14 @@ func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1) { func TestGetXLMetaV1Jsoniter1(t *testing.T) { xlMetaJSON := getXLMetaBytes(1) - var unMarshalXLMeta xlMetaV1 + var unMarshalXLMeta xlMetaV1Object if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil { t.Errorf("Unmarshalling failed: %v", err) } - jsoniterXLMeta, err := xlMetaV1UnmarshalJSON(GlobalContext, xlMetaJSON) - if err != nil { + var jsoniterXLMeta xlMetaV1Object + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta) @@ -315,14 +246,17 @@ func TestGetXLMetaV1Jsoniter10(t *testing.T) { xlMetaJSON := getXLMetaBytes(10) - var unMarshalXLMeta xlMetaV1 + var unMarshalXLMeta xlMetaV1Object if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil { t.Errorf("Unmarshalling failed: %v", err) } - jsoniterXLMeta, err := xlMetaV1UnmarshalJSON(GlobalContext, xlMetaJSON) - if err != nil { + + var jsoniterXLMeta xlMetaV1Object + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } + compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta) } @@ -382,70 +316,3 @@ func TestGetPartSizeFromIdx(t *testing.T) { } } } - -func TestShuffleDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - nDisks := 16 - disks, err := getRandomDisks(nDisks) - if err != nil { - t.Fatal(err) - } - objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) - if err != nil { - removeRoots(disks) - t.Fatal(err) - } - defer removeRoots(disks) - z := objLayer.(*xlZones) - testShuffleDisks(t, z) -} - -// Test shuffleDisks which returns shuffled slice of disks for their actual distribution. -func testShuffleDisks(t *testing.T, z *xlZones) { - disks := z.zones[0].GetDisks(0)() - distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} - shuffledDisks := shuffleDisks(disks, distribution) - // From the "distribution" above you can notice that: - // 1st data block is in the 9th disk (i.e distribution index 8) - // 2nd data block is in the 8th disk (i.e distribution index 7) and so on. - if shuffledDisks[0] != disks[8] || - shuffledDisks[1] != disks[7] || - shuffledDisks[2] != disks[9] || - shuffledDisks[3] != disks[6] || - shuffledDisks[4] != disks[10] || - shuffledDisks[5] != disks[5] || - shuffledDisks[6] != disks[11] || - shuffledDisks[7] != disks[4] || - shuffledDisks[8] != disks[12] || - shuffledDisks[9] != disks[3] || - shuffledDisks[10] != disks[13] || - shuffledDisks[11] != disks[2] || - shuffledDisks[12] != disks[14] || - shuffledDisks[13] != disks[1] || - shuffledDisks[14] != disks[15] || - shuffledDisks[15] != disks[0] { - t.Errorf("shuffleDisks returned incorrect order.") - } -} - -// TestEvalDisks tests the behavior of evalDisks -func TestEvalDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - nDisks := 16 - disks, err := getRandomDisks(nDisks) - if err != nil { - t.Fatal(err) - } - objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) - if err != nil { - removeRoots(disks) - t.Fatal(err) - } - defer removeRoots(disks) - z := objLayer.(*xlZones) - testShuffleDisks(t, z) -} diff --git a/cmd/posix.go b/cmd/xl-storage.go similarity index 64% rename from cmd/posix.go rename to cmd/xl-storage.go index cd96bdd8a..42fe7c9a7 100644 --- a/cmd/posix.go +++ b/cmd/xl-storage.go @@ -23,6 +23,7 @@ import ( "crypto/rand" "encoding/hex" "errors" + "fmt" "io" "io/ioutil" "os" @@ -46,6 +47,7 @@ import ( ) const ( + nullVersionID = "null" diskMinFreeSpace = 900 * humanize.MiByte // Min 900MiB free space. diskMinTotalSpace = diskMinFreeSpace // Min 900MiB total space. readBlockSize = 4 * humanize.MiByte // Default read block size 4MiB. @@ -60,6 +62,9 @@ const ( // Wait interval to check if active IO count is low // to proceed crawling to compute data usage lowActiveIOWaitTick = 100 * time.Millisecond + + // XL metadata file carries per object metadata. + xlStorageFormatFile = "xl.meta" ) // isValidVolname verifies a volname name in accordance with object @@ -77,8 +82,8 @@ func isValidVolname(volname string) bool { return true } -// posix - implements StorageAPI interface. -type posix struct { +// xlStorage - implements StorageAPI interface. +type xlStorage struct { // Disk usage metrics totalUsed uint64 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG @@ -96,9 +101,8 @@ type posix struct { formatFileInfo os.FileInfo formatLastCheck time.Time - // Disk usage metrics - stopUsageCh chan struct{} + ctx context.Context sync.RWMutex } @@ -165,7 +169,7 @@ func getValidPath(path string, requireDirectIO bool) (string, error) { } } if fi != nil && !fi.IsDir() { - return path, syscall.ENOTDIR + return path, errDiskNotDir } di, err := getDiskInfo(path) @@ -206,7 +210,7 @@ func getValidPath(path string, requireDirectIO bool) (string, error) { // isDirEmpty - returns whether given directory is empty or not. func isDirEmpty(dirname string) bool { - f, err := os.Open((dirname)) + f, err := os.Open(dirname) if err != nil { if !os.IsNotExist(err) { logger.LogIf(GlobalContext, err) @@ -216,12 +220,10 @@ func isDirEmpty(dirname string) bool { } defer f.Close() // List one entry. - _, err = f.Readdirnames(1) - if err != io.EOF { + if _, err = f.Readdirnames(1); err != io.EOF { if !os.IsNotExist(err) { logger.LogIf(GlobalContext, err) } - return false } // Returns true if we have reached EOF, directory is indeed empty. @@ -229,16 +231,13 @@ func isDirEmpty(dirname string) bool { } // Initialize a new storage disk. -func newPosix(path string, hostname string) (*posix, error) { +func newXLStorage(path string, hostname string) (*xlStorage, error) { var err error if path, err = getValidPath(path, true); err != nil { return nil, err } - _, err = os.Stat(path) - if err != nil { - return nil, err - } - p := &posix{ + + p := &xlStorage{ diskPath: path, hostname: hostname, pool: sync.Pool{ @@ -247,13 +246,13 @@ func newPosix(path string, hostname string) (*posix, error) { return &b }, }, - stopUsageCh: make(chan struct{}), - diskMount: mountinfo.IsLikelyMountPoint(path), + diskMount: mountinfo.IsLikelyMountPoint(path), // Allow disk usage crawler to run with up to 2 concurrent // I/O ops, if and when activeIOCount reaches this // value disk usage routine suspends the crawler // and waits until activeIOCount reaches below this threshold. maxActiveIOCount: 3, + ctx: GlobalContext, } // Success. @@ -319,7 +318,7 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) { } var di disk.Info - di, err = getDiskInfo((diskPath)) + di, err = getDiskInfo(diskPath) if err != nil { return err } @@ -337,34 +336,33 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) { } // Implements stringer compatible interface. -func (s *posix) String() string { +func (s *xlStorage) String() string { return s.diskPath } -func (s *posix) Hostname() string { +func (s *xlStorage) Hostname() string { return s.hostname } -func (s *posix) Close() error { - close(s.stopUsageCh) +func (*xlStorage) Close() error { return nil } -func (s *posix) IsOnline() bool { +func (s *xlStorage) IsOnline() bool { return true } -func (s *posix) IsLocal() bool { +func (s *xlStorage) IsLocal() bool { return true } -func (s *posix) waitForLowActiveIO() { +func (s *xlStorage) waitForLowActiveIO() { for atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount { time.Sleep(lowActiveIOWaitTick) } } -func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { +func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { // Check if the current bucket has a configured lifecycle policy lc, err := globalLifecycleSys.Get(cache.Info.Name) if err == nil && lc.HasActiveRules("", true) { @@ -374,21 +372,18 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) // Get object api objAPI := newObjectLayerWithoutSafeModeFn() if objAPI == nil { - return cache, errors.New("object layer not initialized") + return cache, errServerNotInitialized } + dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, s.waitForLowActiveIO, func(item crawlItem) (int64, error) { - // Look for `xl.json' at the leaf. - if !strings.HasSuffix(item.Path, SlashSeparator+xlMetaJSONFile) { - // if no xl.json found, skip the file. + // Look for `xl.meta/xl.json' at the leaf. + if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && + !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) { + // if no xl.meta/xl.json found, skip the file. return 0, errSkipFile } - xlMetaBuf, err := ioutil.ReadFile(item.Path) - if err != nil { - return 0, errSkipFile - } - - meta, err := xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) + buf, err := ioutil.ReadFile(item.Path) if err != nil { return 0, errSkipFile } @@ -396,11 +391,25 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) // Remove filename which is the meta file. item.transformMetaDir() - return item.applyActions(ctx, objAPI, - actionMeta{oi: meta.ToObjectInfo(item.bucket, item.objectPath()), - meta: meta.Meta, - }), nil + fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath()) + if err != nil { + return 0, errSkipFile + } + + var totalSize int64 + for _, version := range fivs.Versions { + size := item.applyActions(ctx, objAPI, actionMeta{oi: version.ToObjectInfo(item.bucket, item.objectPath())}) + totalSize += size + } + + // Delete markers have no size, nothing to do here. + for _, deleted := range fivs.Deleted { + item.applyActions(ctx, objAPI, actionMeta{oi: deleted.ToObjectInfo(item.bucket, item.objectPath())}) + } + + return totalSize, nil }) + if err != nil { return dataUsageInfo, err } @@ -411,6 +420,7 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) total = &dataUsageEntry{} } atomic.StoreUint64(&s.totalUsed, uint64(total.Size)) + return dataUsageInfo, nil } @@ -427,7 +437,7 @@ type DiskInfo struct { // DiskInfo provides current information about disk space usage, // total free inodes and underlying filesystem. -func (s *posix) DiskInfo() (info DiskInfo, err error) { +func (s *xlStorage) DiskInfo() (info DiskInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -461,7 +471,7 @@ func (s *posix) DiskInfo() (info DiskInfo, err error) { // corresponding valid volume names on the backend in a platform // compatible way for all operating systems. If volume is not found // an error is generated. -func (s *posix) getVolDir(volume string) (string, error) { +func (s *xlStorage) getVolDir(volume string) (string, error) { if volume == "" || volume == "." || volume == ".." { return "", errVolumeNotFound } @@ -470,7 +480,7 @@ func (s *posix) getVolDir(volume string) (string, error) { } // GetDiskID - returns the cached disk uuid -func (s *posix) GetDiskID() (string, error) { +func (s *xlStorage) GetDiskID() (string, error) { s.RLock() diskID := s.diskID fileInfo := s.formatFileInfo @@ -510,24 +520,24 @@ func (s *posix) GetDiskID() (string, error) { if err != nil { return "", errCorruptedFormat } - format := &formatXLV3{} + format := &formatErasureV3{} var json = jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &format); err != nil { return "", errCorruptedFormat } - s.diskID = format.XL.This + s.diskID = format.Erasure.This s.formatFileInfo = fi s.formatLastCheck = time.Now() return s.diskID, nil } // Make a volume entry. -func (s *posix) SetDiskID(id string) { - // NO-OP for posix as it is handled either by posixDiskIDCheck{} for local disks or +func (s *xlStorage) SetDiskID(id string) { + // NO-OP for xlStorage as it is handled either by xlStorageDiskIDCheck{} for local disks or // storage rest server for remote disks. } -func (s *posix) MakeVolBulk(volumes ...string) (err error) { +func (s *xlStorage) MakeVolBulk(volumes ...string) (err error) { for _, volume := range volumes { if err = s.MakeVol(volume); err != nil { if os.IsPermission(err) { @@ -539,7 +549,7 @@ func (s *posix) MakeVolBulk(volumes ...string) (err error) { } // Make a volume entry. -func (s *posix) MakeVol(volume string) (err error) { +func (s *xlStorage) MakeVol(volume string) (err error) { if !isValidVolname(volume) { return errInvalidArgument } @@ -573,7 +583,7 @@ func (s *posix) MakeVol(volume string) (err error) { } // ListVols - list volumes. -func (s *posix) ListVols() (volsInfo []VolInfo, err error) { +func (s *xlStorage) ListVols() (volsInfo []VolInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -633,7 +643,7 @@ func listVols(dirPath string) ([]VolInfo, error) { } // StatVol - get volume info. -func (s *posix) StatVol(volume string) (volInfo VolInfo, err error) { +func (s *xlStorage) StatVol(volume string) (volInfo VolInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -665,7 +675,7 @@ func (s *posix) StatVol(volume string) (volInfo VolInfo, err error) { } // DeleteVol - delete a volume. -func (s *posix) DeleteVol(volume string, forceDelete bool) (err error) { +func (s *xlStorage) DeleteVol(volume string, forceDelete bool) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -704,7 +714,7 @@ const guidSplunk = "guidSplunk" // ListDirSplunk - return all the entries at the given directory path. // If an entry is a directory it will be returned with a trailing SlashSeparator. -func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []string, err error) { +func (s *xlStorage) ListDirSplunk(volume, dirPath string, count int) (entries []string, err error) { guidIndex := strings.Index(dirPath, guidSplunk) if guidIndex != -1 { return nil, nil @@ -746,8 +756,16 @@ func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []stri if entry != receiptJSON { continue } - if _, serr := os.Stat(pathJoin(dirPath, entry, xlMetaJSONFile)); serr == nil { + _, err = os.Stat(pathJoin(dirPath, entry, xlStorageFormatFile)) + if err == nil { entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue + } + if os.IsNotExist(err) { + if err = s.renameLegacyMetadata(volume, entry); err == nil { + // Rename was successful means we found old `xl.json` + entries[i] = strings.TrimSuffix(entry, SlashSeparator) + } } } @@ -758,7 +776,7 @@ func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []stri // sorted order, additionally along with metadata about each of those entries. // Implemented specifically for Splunk backend structure and List call with // delimiter as "guidSplunk" -func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { +func (s *xlStorage) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { // Verify if volume is valid and it exists. volumeDir, err := s.getVolDir(volume) if err != nil { @@ -805,11 +823,20 @@ func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan stru Mode: os.ModeDir, } } else { - xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlMetaJSONFile)) + var err error + var xlMetaBuf []byte + xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) if err != nil { continue } - fi = readMetadata(xlMetaBuf, volume, walkResult.entry) + fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "") + if err != nil { + continue + } + if fi.Deleted { + // Ignore delete markers. + continue + } } select { case ch <- fi: @@ -822,11 +849,89 @@ func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan stru return ch, nil } +// WalkVersions - is a sorted walker which returns file entries in lexically sorted order, +// additionally along with metadata version info about each of those entries. +func (s *xlStorage) WalkVersions(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfoVersions, err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + // Verify if volume is valid and it exists. + volumeDir, err := s.getVolDir(volume) + if err != nil { + return nil, err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return nil, errVolumeNotFound + } else if isSysErrIO(err) { + return nil, errFaultyDisk + } + return nil, err + } + + // buffer channel matches the S3 ListObjects implementation + ch = make(chan FileInfoVersions, maxObjectList) + go func() { + defer close(ch) + listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := s.ListDir(volume, dirPath, -1) + if err != nil { + return false, nil + } + if len(entries) == 0 { + return true, nil + } + sort.Strings(entries) + return false, filterMatchingPrefix(entries, dirEntry) + } + + walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, recursive, listDir, endWalkCh) + for { + walkResult, ok := <-walkResultCh + if !ok { + return + } + var fiv FileInfoVersions + if HasSuffix(walkResult.entry, SlashSeparator) { + fiv = FileInfoVersions{ + Versions: []FileInfo{ + { + Volume: volume, + Name: walkResult.entry, + Mode: os.ModeDir, + }, + }, + } + } else { + xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) + if err != nil { + continue + } + + fiv, err = getFileInfoVersions(xlMetaBuf, volume, walkResult.entry) + if err != nil { + continue + } + } + select { + case ch <- fiv: + case <-endWalkCh: + return + } + } + }() + + return ch, nil +} + // Walk - is a sorted walker which returns file entries in lexically // sorted order, additionally along with metadata about each of those entries. -func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { - +func (s *xlStorage) Walk(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -853,8 +958,8 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st ch = make(chan FileInfo, maxObjectList) go func() { defer close(ch) - listDir := func(volume, dirPath, dirEntry string) (bool, []string) { - entries, err := s.ListDir(volume, dirPath, -1, leafFile) + listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := s.ListDir(volume, dirPath, -1) if err != nil { return false, nil } @@ -879,11 +984,20 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st Mode: os.ModeDir, } } else { - xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, leafFile)) + var err error + var xlMetaBuf []byte + xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) if err != nil { continue } - fi = readMetadataFn(xlMetaBuf, volume, walkResult.entry) + fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "") + if err != nil { + continue + } + if fi.Deleted { + // Ignore delete markers. + continue + } } select { case ch <- fi: @@ -898,7 +1012,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st // ListDir - return all the entries at the given directory path. // If an entry is a directory it will be returned with a trailing SlashSeparator. -func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) { +func (s *xlStorage) ListDir(volume, dirPath string, count int) (entries []string, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -929,16 +1043,241 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent return nil, err } - // If leaf file is specified, filter out the entries. - if leafFile != "" { - for i, entry := range entries { - if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil { + for i, entry := range entries { + _, err = os.Stat(pathJoin(dirPath, entry, xlStorageFormatFile)) + if err == nil { + entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue + } + if os.IsNotExist(err) { + if err = s.renameLegacyMetadata(volume, entry); err == nil { + // if rename was successful, means we did find old `xl.json` entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue } } } - return entries, err + return entries, nil +} + +// DeleteVersions deletes slice of versions, it can be same object +// or multiple objects. +func (s *xlStorage) DeleteVersions(volume string, versions []FileInfo) []error { + errs := make([]error, len(versions)) + for i, version := range versions { + if err := s.DeleteVersion(volume, version.Name, version); err != nil { + errs[i] = err + } + } + + return errs +} + +// DeleteVersion - deletes FileInfo metadata for path at `xl.meta` +func (s *xlStorage) DeleteVersion(volume, path string, fi FileInfo) error { + if HasSuffix(path, SlashSeparator) { + return s.DeleteFile(volume, path) + } + + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + return err + } + + if len(buf) == 0 { + return errFileNotFound + } + + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + if !isXL2V1Format(buf) { + // Delete the meta file, if there are no more versions the + // top level parent is automatically removed. + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + return deleteFile(volumeDir, filePath, false) + } + + var xlMeta xlMetaV2 + if err = xlMeta.Load(buf); err != nil { + return err + } + + if fi.Deleted { + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) + } + + dataDir, lastVersion, err := xlMeta.DeleteVersion(fi) + if err != nil { + return err + } + + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + + // when data-dir is specified. + if dataDir != "" { + filePath := pathJoin(volumeDir, path, dataDir) + if err = checkPathLength(filePath); err != nil { + return err + } + + if err = removeAll(filePath); err != nil { + return err + } + } + + if !lastVersion { + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) + } + + // Delete the meta file, if there are no more versions the + // top level parent is automatically removed. + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + return deleteFile(volumeDir, filePath, false) +} + +// WriteMetadata - writes FileInfo metadata for path at `xl.meta` +func (s *xlStorage) WriteMetadata(volume, path string, fi FileInfo) error { + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil && err != errFileNotFound { + return err + } + + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + var xlMeta xlMetaV2 + if !isXL2V1Format(buf) { + xlMeta, err = newXLMetaV2(fi) + if err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + } else { + if err = xlMeta.Load(buf); err != nil { + return err + } + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + } + + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) +} + +func (s *xlStorage) renameLegacyMetadata(volume, path string) error { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + //gi Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } else if isSysErrTooManyFiles(err) { + return errTooManyOpenFiles + } + return err + } + + // Validate file path length, before reading. + filePath := pathJoin(volumeDir, path) + if err = checkPathLength(filePath); err != nil { + return err + } + + srcFilePath := pathJoin(filePath, xlStorageFormatFileV1) + dstFilePath := pathJoin(filePath, xlStorageFormatFile) + if err = os.Rename(srcFilePath, dstFilePath); err != nil { + switch { + case isSysErrNotDir(err): + return errFileNotFound + case isSysErrPathNotFound(err): + return errFileNotFound + case isSysErrCrossDevice(err): + return fmt.Errorf("%w (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath) + case os.IsNotExist(err): + return errFileNotFound + case os.IsExist(err): + // This is returned only when destination is a directory and we + // are attempting a rename from file to directory. + return errIsNotRegular + default: + return err + } + } + return nil +} + +// ReadVersion - reads metadata and returns FileInfo at path `xl.meta` +func (s *xlStorage) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + if err == errFileNotFound { + if err = s.renameLegacyMetadata(volume, path); err != nil { + return fi, err + } + buf, err = s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + return fi, err + } + } else { + return fi, err + } + } + + if len(buf) == 0 { + if versionID != "" { + return fi, errFileVersionNotFound + } + return fi, errFileNotFound + } + + return getFileInfo(buf, volume, path, versionID) } // ReadAll reads from r until an error or EOF and returns the data it read. @@ -947,7 +1286,7 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent // as an error to be reported. // This API is meant to be used on files which have small memory footprint, do // not use this on large files as it would cause server to crash. -func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { +func (s *xlStorage) ReadAll(volume, path string) (buf []byte, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -957,8 +1296,9 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { if err != nil { return nil, err } + // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -972,12 +1312,12 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { // Validate file path length, before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } // Open the file for reading. - buf, err = ioutil.ReadFile((filePath)) + buf, err = ioutil.ReadFile(filePath) if err != nil { if os.IsNotExist(err) { return nil, errFileNotFound @@ -1009,7 +1349,7 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { // // Additionally ReadFile also starts reading from an offset. ReadFile // semantics are same as io.ReadFull. -func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) { +func (s *xlStorage) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) { if offset < 0 { return 0, errInvalidArgument } @@ -1027,7 +1367,7 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif var n int // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return 0, errVolumeNotFound @@ -1039,12 +1379,12 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif // Validate effective path length before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return 0, err } // Open the file for reading. - file, err := os.Open((filePath)) + file, err := os.Open(filePath) if err != nil { switch { case os.IsNotExist(err): @@ -1108,13 +1448,13 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif return int64(len(buffer)), nil } -func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) { +func (s *xlStorage) openFile(volume, path string, mode int) (f *os.File, err error) { volumeDir, err := s.getVolDir(volume) if err != nil { return nil, err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -1125,7 +1465,7 @@ func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) } filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } @@ -1164,7 +1504,7 @@ func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) } // ReadFileStream - Returns the read stream of the file. -func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { +func (s *xlStorage) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { if offset < 0 { return nil, errInvalidArgument } @@ -1174,7 +1514,7 @@ func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.Re return nil, err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -1186,12 +1526,12 @@ func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.Re // Validate effective path length before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } // Open the file for reading. - file, err := os.Open((filePath)) + file, err := os.Open(filePath) if err != nil { switch { case os.IsNotExist(err): @@ -1257,7 +1597,7 @@ func (c closeWrapper) Close() error { } // CreateFile - creates the file. -func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (err error) { +func (s *xlStorage) CreateFile(volume, path string, fileSize int64, r io.Reader) (err error) { if fileSize < -1 { return errInvalidArgument } @@ -1280,7 +1620,7 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er return err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return errVolumeNotFound @@ -1291,7 +1631,7 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er } filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return err } @@ -1360,15 +1700,13 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er return nil } -func (s *posix) WriteAll(volume, path string, reader io.Reader) (err error) { +func (s *xlStorage) WriteAll(volume, path string, reader io.Reader) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) }() - // Create file if not found. Note that it is created with os.O_EXCL flag as the file - // always is supposed to be created in the tmp directory with a unique file name. - w, err := s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_WRONLY|os.O_EXCL) + w, err := s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_WRONLY) if err != nil { return err } @@ -1384,7 +1722,7 @@ func (s *posix) WriteAll(volume, path string, reader io.Reader) (err error) { // AppendFile - append a byte array at path, if file doesn't exist at // path this call explicitly creates it. -func (s *posix) AppendFile(volume, path string, buf []byte) (err error) { +func (s *xlStorage) AppendFile(volume, path string, buf []byte) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1405,8 +1743,8 @@ func (s *posix) AppendFile(volume, path string, buf []byte) (err error) { return w.Close() } -// StatFile - get file info. -func (s *posix) StatFile(volume, path string) (file FileInfo, err error) { +// CheckParts check if path has necessary parts available. +func (s *xlStorage) CheckParts(volume, path string, fi FileInfo) error { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1414,48 +1752,83 @@ func (s *posix) StatFile(volume, path string) (file FileInfo, err error) { volumeDir, err := s.getVolDir(volume) if err != nil { - return FileInfo{}, err - } - // Stat a volume entry. - _, err = os.Stat((volumeDir)) - if err != nil { - if os.IsNotExist(err) { - return FileInfo{}, errVolumeNotFound - } - return FileInfo{}, err + return err } - filePath := slashpath.Join(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { - return FileInfo{}, err + // Stat a volume entry. + if _, err = os.Stat(volumeDir); err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } + return err } - st, err := os.Stat((filePath)) - if err != nil { - switch { - case os.IsNotExist(err): - // File is really not found. - return FileInfo{}, errFileNotFound - case isSysErrIO(err): - return FileInfo{}, errFaultyDisk - case isSysErrNotDir(err): - // File path cannot be verified since one of the parents is a file. - return FileInfo{}, errFileNotFound - default: - // Return all errors here. - return FileInfo{}, err + + for _, part := range fi.Parts { + partPath := pathJoin(path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) + filePath := pathJoin(volumeDir, partPath) + if err = checkPathLength(filePath); err != nil { + return err + } + st, err := os.Stat(filePath) + if err != nil { + return osErrToFileErr(err) + } + if st.Mode().IsDir() { + return errFileNotFound } } + + return nil +} + +// CheckFile check if path has necessary metadata. +func (s *xlStorage) CheckFile(volume, path string) error { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } + return err + } + + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + filePathOld := pathJoin(volumeDir, path, xlStorageFormatFileV1) + if err = checkPathLength(filePathOld); err != nil { + return err + } + + st, err := os.Stat(filePath) + if err != nil && !os.IsNotExist(err) { + return osErrToFileErr(err) + } + if st == nil { + st, err = os.Stat(filePathOld) + if err != nil { + return osErrToFileErr(err) + } + } + // If its a directory its not a regular file. if st.Mode().IsDir() { - return FileInfo{}, errFileNotFound + return errFileNotFound } - return FileInfo{ - Volume: volume, - Name: path, - ModTime: st.ModTime(), - Size: st.Size(), - Mode: st.Mode(), - }, nil + + return nil } // deleteFile deletes a file or a directory if its empty unless recursive @@ -1467,6 +1840,7 @@ func deleteFile(basePath, deletePath string, recursive bool) error { if basePath == "" || deletePath == "" { return nil } + isObjectDir := HasSuffix(deletePath, SlashSeparator) basePath = filepath.Clean(basePath) deletePath = filepath.Clean(deletePath) if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath { @@ -1482,6 +1856,11 @@ func deleteFile(basePath, deletePath string, recursive bool) error { if err != nil { switch { case isSysErrNotEmpty(err): + // if object is a directory, but if its not empty + // return FileNotFound to indicate its an empty prefix. + if isObjectDir { + return errFileNotFound + } // Ignore errors if the directory is not empty. The server relies on // this functionality, and sometimes uses recursion that should not // error on parent directories. @@ -1506,53 +1885,8 @@ func deleteFile(basePath, deletePath string, recursive bool) error { return nil } -// DeletePrefixes forcibly deletes all the contents of a set of specified paths. -// Parent directories are automatically removed if they become empty. err can -// bil nil while errs can contain some errors for corresponding objects. No error -// is set if a specified prefix path does not exist. -func (s *posix) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - atomic.AddInt32(&s.activeIOCount, 1) - defer func() { - atomic.AddInt32(&s.activeIOCount, -1) - }() - - volumeDir, err := s.getVolDir(volume) - if err != nil { - return nil, err - } - - // Stat a volume entry. - _, err = os.Stat(volumeDir) - if err != nil { - if os.IsNotExist(err) { - return nil, errVolumeNotFound - } else if os.IsPermission(err) { - return nil, errVolumeAccessDenied - } else if isSysErrIO(err) { - return nil, errFaultyDisk - } - return nil, err - } - - errs = make([]error, len(paths)) - // Following code is needed so that we retain SlashSeparator - // suffix if any in path argument. - for idx, path := range paths { - filePath := pathJoin(volumeDir, path) - errs[idx] = checkPathLength(filePath) - if errs[idx] != nil { - continue - } - // Delete file or a directory recursively, delete parent - // directory as well if its empty. - errs[idx] = deleteFile(volumeDir, filePath, true) - } - - return -} - // DeleteFile - delete a file at path. -func (s *posix) DeleteFile(volume, path string) (err error) { +func (s *xlStorage) DeleteFile(volume, path string) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1587,7 +1921,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) { return deleteFile(volumeDir, filePath, false) } -func (s *posix) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { +func (s *xlStorage) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1626,8 +1960,215 @@ func (s *posix) DeleteFileBulk(volume string, paths []string) (errs []error, err return } +// RenameData - rename source path to destination path atomically, metadata and data directory. +func (s *xlStorage) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + srcVolumeDir, err := s.getVolDir(srcVolume) + if err != nil { + return err + } + + dstVolumeDir, err := s.getVolDir(dstVolume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(srcVolumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + _, err = os.Stat(dstVolumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + + srcFilePath := slashpath.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile)) + dstFilePath := slashpath.Join(dstVolumeDir, pathJoin(dstPath, xlStorageFormatFile)) + + var srcDataPath string + var dstDataPath string + if dataDir != "" { + srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir)) + // make sure to always use path.Join here, do not use pathJoin as + // it would additionally add `/` at the end and it comes in the + // way of renameAll(), parentDir creation. + dstDataPath = slashpath.Join(dstVolumeDir, dstPath, dataDir) + } + + if err = checkPathLength(srcFilePath); err != nil { + return err + } + + if err = checkPathLength(dstFilePath); err != nil { + return err + } + + srcBuf, err := ioutil.ReadFile(srcFilePath) + if err != nil { + return osErrToFileErr(err) + } + + fi, err := getFileInfo(srcBuf, dstVolume, dstPath, "") + if err != nil { + return err + } + + dstBuf, err := ioutil.ReadFile(dstFilePath) + if err != nil && !os.IsNotExist(err) { + return osErrToFileErr(err) + } + + var xlMeta xlMetaV2 + var legacyPreserved bool + if len(dstBuf) > 0 { + if isXL2V1Format(dstBuf) { + if err = xlMeta.Load(dstBuf); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + } else { + // This code-path is to preserve the legacy data. + xlMetaLegacy := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + legacyPreserved = true + } + } else { + // It is possible that some drives may not have `xl.meta` file + // in such scenarios verify if atleast `part.1` files exist + // to verify for legacy version. + currentDataPath := pathJoin(dstVolumeDir, dstPath) + entries, err := readDirN(currentDataPath, 1) + if err != nil && err != errFileNotFound { + return osErrToFileErr(err) + } + for _, entry := range entries { + if entry == xlStorageFormatFile { + continue + } + if strings.HasSuffix(entry, slashSeparator) { + continue + } + if strings.HasPrefix(entry, "part.") { + legacyPreserved = true + break + } + } + } + + if legacyPreserved { + // Preserve all the legacy data, could be slow, but at max there can be 10,000 parts. + currentDataPath := pathJoin(dstVolumeDir, dstPath) + entries, err := readDir(currentDataPath) + if err != nil { + return osErrToFileErr(err) + } + legacyDataPath := pathJoin(dstVolumeDir, dstPath, legacyDataDir) + // legacy data dir means its old content, honor system umask. + if err = os.Mkdir(legacyDataPath, 0777); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return osErrToFileErr(err) + } + + for _, entry := range entries { + if entry == xlStorageFormatFile { + continue + } + + if err = os.Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return osErrToFileErr(err) + } + + // Sync all the metadata operations once renames are done. + globalSync() + } + } + + var oldDstDataPath string + if fi.VersionID == "" { + // return the latest "null" versionId info + ofi, err := xlMeta.ToFileInfo(dstVolume, dstPath, nullVersionID) + if err == nil { + // Purge the destination path as we are not preserving anything + // versioned object was not requested. + oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir) + } + } + + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + + dstBuf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return errFileCorrupt + } + + if err = s.WriteAll(srcVolume, pathJoin(srcPath, xlStorageFormatFile), bytes.NewReader(dstBuf)); err != nil { + return err + } + + if err = renameAll(srcFilePath, dstFilePath); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + + if srcDataPath != "" { + removeAll(oldDstDataPath) + removeAll(dstDataPath) + if err = renameAll(srcDataPath, dstDataPath); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + } + + // Remove parent dir of the source file if empty + if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) { + deleteFile(srcVolumeDir, parentDir, false) + } + + if srcDataPath != "" { + if parentDir := slashpath.Dir(srcDataPath); isDirEmpty(parentDir) { + deleteFile(srcVolumeDir, parentDir, false) + } + } + + return nil +} + // RenameFile - rename source path to destination path atomically. -func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { +func (s *xlStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1715,40 +2256,11 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e return nil } -func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) { - atomic.AddInt32(&s.activeIOCount, 1) - defer func() { - atomic.AddInt32(&s.activeIOCount, -1) - }() - - volumeDir, err := s.getVolDir(volume) - if err != nil { - return err - } - - // Stat a volume entry. - _, err = os.Stat(volumeDir) - if err != nil { - if os.IsNotExist(err) { - return errVolumeNotFound - } else if isSysErrIO(err) { - return errFaultyDisk - } else if os.IsPermission(err) { - return errVolumeAccessDenied - } - return err - } - - // Validate effective path length before reading. - filePath := pathJoin(volumeDir, path) - if err = checkPathLength(filePath); err != nil { - return err - } - +func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { // Open the file for reading. - file, err := os.Open(filePath) + file, err := os.Open(partPath) if err != nil { - return osErrToFSFileErr(err) + return osErrToFileErr(err) } // Close the file descriptor. @@ -1783,7 +2295,7 @@ func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgor // Calculate the size of the bitrot file and compare // it with the actual file size. - if size != bitrotShardFileSize(fileSize, shardSize, algo) { + if size != bitrotShardFileSize(partSize, shardSize, algo) { return errFileCorrupt } @@ -1814,3 +2326,50 @@ func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgor } } } + +func (s *xlStorage) VerifyFile(volume, path string, fi FileInfo) (err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } else if os.IsPermission(err) { + return errVolumeAccessDenied + } + return err + } + + erasure := fi.Erasure + for _, part := range fi.Parts { + checksumInfo := erasure.GetChecksumInfo(part.Number) + partPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) + if err := s.bitrotVerify(partPath, + erasure.ShardFileSize(part.Size), + checksumInfo.Algorithm, + checksumInfo.Hash, erasure.ShardSize()); err != nil { + if !IsErr(err, []error{ + errFileNotFound, + errVolumeNotFound, + errFileCorrupt, + }...) { + logger.GetReqInfo(s.ctx).AppendTags("disk", s.String()) + logger.LogIf(s.ctx, err) + } + return err + } + } + + return nil +} diff --git a/cmd/posix_test.go b/cmd/xl-storage_test.go similarity index 69% rename from cmd/posix_test.go rename to cmd/xl-storage_test.go index 0c3cb23f4..dfbaa5ab8 100644 --- a/cmd/posix_test.go +++ b/cmd/xl-storage_test.go @@ -113,15 +113,16 @@ func TestIsValidVolname(t *testing.T) { } } -// creates a temp dir and sets up posix layer. -// returns posix layer, temp dir path to be used for the purpose of tests. -func newPosixTestSetup() (StorageAPI, string, error) { +// creates a temp dir and sets up xlStorage layer. +// returns xlStorage layer, temp dir path to be used for the purpose of tests. +func newXLStorageTestSetup() (*xlStorageDiskIDCheck, string, error) { diskPath, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { return nil, "", err } - // Initialize a new posix layer. - storage, err := newPosix(diskPath, "") + + // Initialize a new xlStorage layer. + storage, err := newXLStorage(diskPath, "") if err != nil { return nil, "", err } @@ -134,7 +135,7 @@ func newPosixTestSetup() (StorageAPI, string, error) { if err != nil { return nil, "", err } - return &posixDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil + return &xlStorageDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil } // createPermDeniedFile - creates temporary directory and file with path '/mybucket/myobject' @@ -190,8 +191,8 @@ func removePermDeniedFile(permDeniedDir string) { } } -// TestPosixs posix.getDiskInfo() -func TestPosixGetDiskInfo(t *testing.T) { +// TestXLStorages xlStorage.getDiskInfo() +func TestXLStorageGetDiskInfo(t *testing.T) { path, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) @@ -214,7 +215,7 @@ func TestPosixGetDiskInfo(t *testing.T) { } } -func TestPosixIsDirEmpty(t *testing.T) { +func TestXLStorageIsDirEmpty(t *testing.T) { tmp, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatal(err) @@ -250,51 +251,51 @@ func TestPosixIsDirEmpty(t *testing.T) { } } -// TestPosixReadAll - TestPosixs the functionality implemented by posix ReadAll storage API. -func TestPosixReadAll(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageReadAll - TestXLStorages the functionality implemented by xlStorage ReadAll storage API. +func TestXLStorageReadAll(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Create files for the test cases. - if err = posixStorage.MakeVol("exists"); err != nil { + if err = xlStorage.MakeVol("exists"); err != nil { t.Fatalf("Unable to create a volume \"exists\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-directory/as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-directory/as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-file-parent", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-file-parent", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file-parent\", %s", err) } - // TestPosixcases to validate different conditions for ReadAll API. + // TestXLStoragecases to validate different conditions for ReadAll API. testCases := []struct { volume string path string err error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // Validate volume does not exist. { volume: "i-dont-exist", path: "", err: errVolumeNotFound, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // Validate bad condition file does not exist. { volume: "exists", path: "as-file-not-found", err: errFileNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. // Validate bad condition file exists as prefix/directory and // we are attempting to read it. { @@ -302,21 +303,21 @@ func TestPosixReadAll(t *testing.T) { path: "as-directory", err: errFileNotFound, }, - // TestPosix case - 4. + // TestXLStorage case - 4. { volume: "exists", path: "as-file-parent/as-file", err: errFileNotFound, }, - // TestPosix case - 5. + // TestXLStorage case - 5. // Validate the good condition file exists and we are able to read it. { volume: "exists", path: "as-file", err: nil, }, - // TestPosix case - 6. - // TestPosix case with invalid volume name. + // TestXLStorage case - 6. + // TestXLStorage case with invalid volume name. { volume: "ab", path: "as-file", @@ -327,20 +328,20 @@ func TestPosixReadAll(t *testing.T) { var dataRead []byte // Run through all the test cases and validate for ReadAll. for i, testCase := range testCases { - dataRead, err = posixStorage.ReadAll(testCase.volume, testCase.path) + dataRead, err = xlStorage.ReadAll(testCase.volume, testCase.path) if err != testCase.err { - t.Fatalf("TestPosix %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) + t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) } if err == nil { if string(dataRead) != string([]byte("Hello, World")) { - t.Errorf("TestPosix %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) + t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) } } } } -// TestPosixNewPosix all the cases handled in posix storage layer initialization. -func TestPosixNewPosix(t *testing.T) { +// TestNewXLStorage all the cases handled in xlStorage storage layer initialization. +func TestNewXLStorage(t *testing.T) { // Temporary dir name. tmpDirName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix() // Temporary file name. @@ -349,7 +350,7 @@ func TestPosixNewPosix(t *testing.T) { f.Close() defer os.Remove(tmpFileName) - // List of all tests for posix initialization. + // List of all tests for xlStorage initialization. testCases := []struct { name string err error @@ -369,27 +370,27 @@ func TestPosixNewPosix(t *testing.T) { // not a directory. { tmpFileName, - syscall.ENOTDIR, + errDiskNotDir, }, } // Validate all test cases. for i, testCase := range testCases { - // Initialize a new posix layer. - _, err := newPosix(testCase.name, "") + // Initialize a new xlStorage layer. + _, err := newXLStorage(testCase.name, "") if err != testCase.err { - t.Fatalf("TestPosix %d failed wanted: %s, got: %s", i+1, err, testCase.err) + t.Fatalf("TestXLStorage %d failed wanted: %s, got: %s", i+1, err, testCase.err) } } } -// TestPosixMakeVol - TestPosix validate the logic for creation of new posix volume. +// TestXLStorageMakeVol - TestXLStorage validate the logic for creation of new xlStorage volume. // Asserts the failures too against the expected failures. -func TestPosixMakeVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +func TestXLStorageMakeVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -407,25 +408,25 @@ func TestPosixMakeVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // A valid case, volume creation is expected to succeed. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // Case where a file exists by the name of the volume to be created. { volName: "vol-as-file", expectedErr: errVolumeExists, }, - // TestPosix case - 3. + // TestXLStorage case - 3. { volName: "existing-vol", expectedErr: errVolumeExists, }, - // TestPosix case - 5. - // TestPosix case with invalid volume name. + // TestXLStorage case - 5. + // TestXLStorage case with invalid volume name. { volName: "ab", expectedErr: errInvalidArgument, @@ -433,15 +434,12 @@ func TestPosixMakeVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if err := posixStorage.MakeVol(testCase.volName); err != testCase.expectedErr { - t.Fatalf("TestPosix %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err := xlStorage.MakeVol(testCase.volName); err != testCase.expectedErr { + t.Fatalf("TestXLStorage %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { @@ -452,19 +450,19 @@ func TestPosixMakeVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // change backend permissions for MakeVol error. @@ -472,27 +470,27 @@ func TestPosixMakeVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err := posixStorage.MakeVol("test-vol"); err != errDiskAccessDenied { + if err := xlStorageNew.MakeVol("test-vol"); err != errDiskAccessDenied { t.Fatalf("expected: %s, got: %s", errDiskAccessDenied, err) } } } -// TestPosixDeleteVol - Validates the expected behavior of posix.DeleteVol for various cases. -func TestPosixDeleteVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageDeleteVol - Validates the expected behavior of xlStorage.DeleteVol for various cases. +func TestXLStorageDeleteVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - // TestPosix failure cases. + // TestXLStorage failure cases. vol := slashpath.Join(path, "nonempty-vol") if err = os.Mkdir(vol, 0777); err != nil { t.Fatalf("Unable to create directory, %s", err) @@ -505,25 +503,25 @@ func TestPosixDeleteVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // A valida case. Empty vol, should be possible to delete. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // volume is non-existent. { volName: "nonexistent-vol", expectedErr: errVolumeNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. // It shouldn't be possible to delete an non-empty volume, validating the same. { volName: "nonempty-vol", expectedErr: errVolumeNotEmpty, }, - // TestPosix case - 5. + // TestXLStorage case - 5. // Invalid volume name. { volName: "ab", @@ -532,15 +530,12 @@ func TestPosixDeleteVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posixDiskIDCheck") - } - if err = posixStorage.DeleteVol(testCase.volName, false); err != testCase.expectedErr { - t.Fatalf("TestPosix: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) + if err = xlStorage.DeleteVol(testCase.volName, false); err != testCase.expectedErr { + t.Fatalf("TestXLStorage: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { var permDeniedDir string if permDeniedDir, err = ioutil.TempDir(globalTestTmpDir, "minio-"); err != nil { @@ -554,19 +549,19 @@ func TestPosixDeleteVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // change backend permissions for MakeVol error. @@ -574,37 +569,37 @@ func TestPosixDeleteVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err = posixStorage.DeleteVol("mybucket", false); err != errDiskAccessDenied { + if err = xlStorageNew.DeleteVol("mybucket", false); err != errDiskAccessDenied { t.Fatalf("expected: Permission error, got: %s", err) } } - posixDeletedStorage, diskPath, err := newPosixTestSetup() + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteVol("Del-Vol", false) + err = xlStorageDeletedStorage.DeleteVol("Del-Vol", false) if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixStatVol - TestPosixs validate the volume info returned by posix.StatVol() for various inputs. -func TestPosixStatVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageStatVol - TestXLStorages validate the volume info returned by xlStorage.StatVol() for various inputs. +func TestXLStorageStatVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -612,17 +607,17 @@ func TestPosixStatVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { volName: "nonexistent-vol", expectedErr: errVolumeNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. { volName: "ab", expectedErr: errVolumeNotFound, @@ -630,58 +625,57 @@ func TestPosixStatVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - volInfo, err := posixStorage.StatVol(testCase.volName) + var volInfo VolInfo + volInfo, err = xlStorage.StatVol(testCase.volName) if err != testCase.expectedErr { - t.Fatalf("TestPosix case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestXLStorage case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { if volInfo.Name != testCase.volName { - t.Errorf("TestPosix case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, testCase.volName) + t.Errorf("TestXLStorage case %d: Expected the volume name to be \"%s\", instead found \"%s\"", + i+1, volInfo.Name, testCase.volName) } } } - posixDeletedStorage, diskPath, err := newPosixTestSetup() + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - _, err = posixDeletedStorage.StatVol("Stat vol") + _, err = xlStorageDeletedStorage.StatVol("Stat vol") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixListVols - Validates the result and the error output for posix volume listing functionality posix.ListVols(). -func TestPosixListVols(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageListVols - Validates the result and the error output for xlStorage volume listing functionality xlStorage.ListVols(). +func TestXLStorageListVols(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } var volInfos []VolInfo - // TestPosix empty list vols. - if volInfos, err = posixStorage.ListVols(); err != nil { + // TestXLStorage empty list vols. + if volInfos, err = xlStorage.ListVols(); err != nil { t.Fatalf("expected: , got: %s", err) } else if len(volInfos) != 1 { t.Fatalf("expected: one entry, got: %s", volInfos) } - // TestPosix non-empty list vols. - if err = posixStorage.MakeVol("success-vol"); err != nil { + // TestXLStorage non-empty list vols. + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - volInfos, err = posixStorage.ListVols() + volInfos, err = xlStorage.ListVols() if err != nil { t.Fatalf("expected: , got: %s", err) } @@ -702,35 +696,35 @@ func TestPosixListVols(t *testing.T) { // removing the path and simulating disk failure os.RemoveAll(path) // should fail with errDiskNotFound. - if _, err = posixStorage.ListVols(); err != errDiskNotFound { + if _, err = xlStorage.ListVols(); err != errDiskNotFound { t.Errorf("Expected to fail with \"%s\", but instead failed with \"%s\"", errDiskNotFound, err) } } -// TestPosixPosixListDir - TestPosixs validate the directory listing functionality provided by posix.ListDir . -func TestPosixPosixListDir(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageXlStorageListDir - TestXLStorages validate the directory listing functionality provided by xlStorage.ListDir . +func TestXLStorageXlStorageListDir(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - // create posix test setup. - posixDeletedStorage, diskPath, err := newPosixTestSetup() + // create xlStorage test setup. + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = posixStorage.AppendFile("success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = posixStorage.AppendFile("success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -741,7 +735,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir []string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -749,7 +743,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"def/", "xyz/"}, expectedErr: nil, }, - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -757,7 +751,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"ghi/"}, expectedErr: nil, }, - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -765,21 +759,21 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"success-file"}, expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { srcVol: "success-vol", srcPath: "abcdef", expectedErr: errFileNotFound, }, - // TestPosix case - 3. - // TestPosix case with invalid volume name. + // TestXLStorage case - 3. + // TestXLStorage case with invalid volume name. { srcVol: "ab", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 4. - // TestPosix case with non existent volume. + // TestXLStorage case - 4. + // TestXLStorage case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -789,83 +783,80 @@ func TestPosixPosixListDir(t *testing.T) { for i, testCase := range testCases { var dirList []string - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - dirList, err = posixStorage.ListDir(testCase.srcVol, testCase.srcPath, -1, "") + dirList, err = xlStorage.ListDir(testCase.srcVol, testCase.srcPath, -1) if err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { for _, expected := range testCase.expectedListDir { if !strings.Contains(strings.Join(dirList, ","), expected) { - t.Errorf("TestPosix case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) + t.Errorf("TestXLStorage case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) } } } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixStorage.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { + if err = xlStorageNew.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteFile("del-vol", "my-file") + err = xlStorageDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. -func TestPosixDeleteFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. +func TestXLStorageDeleteFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - // create posix test setup - posixDeletedStorage, diskPath, err := newPosixTestSetup() + // create xlStorage test setup + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = posixStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = posixStorage.MakeVol("no-permissions"); err != nil { + if err = xlStorage.MakeVol("no-permissions"); err != nil { t.Fatalf("Unable to create volume, %s", err.Error()) } - if err = posixStorage.AppendFile("no-permissions", "dir/file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("no-permissions", "dir/file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err.Error()) } // Parent directory must have write permissions, this is read + execute. @@ -878,43 +869,43 @@ func TestPosixDeleteFile(t *testing.T) { srcPath string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", srcPath: "success-file", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // The file was deleted in the last case, so DeleteFile should fail. { srcVol: "success-vol", srcPath: "success-file", expectedErr: errFileNotFound, }, - // TestPosix case - 3. - // TestPosix case with segment of the volume name > 255. + // TestXLStorage case - 3. + // TestXLStorage case with segment of the volume name > 255. { srcVol: "my", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 4. - // TestPosix case with non-existent volume. + // TestXLStorage case - 4. + // TestXLStorage case with non-existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 5. - // TestPosix case with src path segment > 255. + // TestXLStorage case - 5. + // TestXLStorage case with src path segment > 255. { srcVol: "success-vol", srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", expectedErr: errFileNameTooLong, }, - // TestPosix case - 6. - // TestPosix case with undeletable parent directory. + // TestXLStorage case - 6. + // TestXLStorage case with undeletable parent directory. // File can delete, dir cannot delete because no-permissions doesn't have write perms. { srcVol: "no-permissions", @@ -924,59 +915,56 @@ func TestPosixDeleteFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if err = posixStorage.DeleteFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Errorf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err = xlStorage.DeleteFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { + t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixStorage.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { + if err = xlStorageNew.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteFile("del-vol", "my-file") + err = xlStorageDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixReadFile - TestPosixs posix.ReadFile with wide range of cases and asserts the result and error response. -func TestPosixReadFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageReadFile - TestXLStorages xlStorage.ReadFile with wide range of cases and asserts the result and error response. +func TestXLStorageReadFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) volume := "success-vol" // Setup test environment. - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1060,7 +1048,7 @@ func TestPosixReadFile(t *testing.T) { v := NewBitrotVerifier(SHA256, getSHA256Sum([]byte("hello, world"))) // Create test files for further reading. for i, appendFile := range appendFiles { - err = posixStorage.AppendFile(volume, appendFile.fileName, []byte("hello, world")) + err = xlStorage.AppendFile(volume, appendFile.fileName, []byte("hello, world")) if err != appendFile.expectedErr { t.Fatalf("Creating file failed: %d %#v, expected: %s, got: %s", i+1, appendFile, appendFile.expectedErr, err) } @@ -1069,7 +1057,7 @@ func TestPosixReadFile(t *testing.T) { { buf := make([]byte, 5) // Test for negative offset. - if _, err = posixStorage.ReadFile(volume, "myobject", -1, buf, v); err == nil { + if _, err = xlStorage.ReadFile(volume, "myobject", -1, buf, v); err == nil { t.Fatalf("expected: error, got: ") } } @@ -1079,7 +1067,7 @@ func TestPosixReadFile(t *testing.T) { var n int64 // Common read buffer. var buf = make([]byte, testCase.bufSize) - n, err = posixStorage.ReadFile(testCase.volume, testCase.fileName, testCase.offset, buf, v) + n, err = xlStorage.ReadFile(testCase.volume, testCase.fileName, testCase.offset, buf, v) if err != nil && testCase.expectedErr != nil { // Validate if the type string of the errors are an exact match. if err.Error() != testCase.expectedErr.Error() { @@ -1130,35 +1118,35 @@ func TestPosixReadFile(t *testing.T) { } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixPermStorage, err := newPosix(permDeniedDir, "") + xlStoragePermStorage, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // Common read buffer. var buf = make([]byte, 10) - if _, err = posixPermStorage.ReadFile("mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { + if _, err = xlStoragePermStorage.ReadFile("mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } } -var posixReadFileWithVerifyTests = []struct { +var xlStorageReadFileWithVerifyTests = []struct { file string offset int length int @@ -1183,18 +1171,18 @@ var posixReadFileWithVerifyTests = []struct { {file: "myobject", offset: 1000, length: 1001, algorithm: BLAKE2b512, expError: nil}, // 15 } -// TestPosixReadFile with bitrot verification - tests the posix level +// TestXLStorageReadFile with bitrot verification - tests the xlStorage level // ReadFile API with a BitrotVerifier. Only tests hashing related // functionality. Other functionality is tested with -// TestPosixReadFile. -func TestPosixReadFileWithVerify(t *testing.T) { +// TestXLStorageReadFile. +func TestXLStorageReadFileWithVerify(t *testing.T) { volume, object := "test-vol", "myobject" - posixStorage, path, err := newPosixTestSetup() + xlStorage, path, err := newXLStorageTestSetup() if err != nil { os.RemoveAll(path) - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { os.RemoveAll(path) t.Fatalf("Unable to create volume %s: %v", volume, err) } @@ -1203,12 +1191,12 @@ func TestPosixReadFileWithVerify(t *testing.T) { os.RemoveAll(path) t.Fatalf("Unable to create generate random data: %v", err) } - if err = posixStorage.AppendFile(volume, object, data); err != nil { + if err = xlStorage.AppendFile(volume, object, data); err != nil { os.RemoveAll(path) t.Fatalf("Unable to create object: %v", err) } - for i, test := range posixReadFileWithVerifyTests { + for i, test := range xlStorageReadFileWithVerifyTests { h := test.algorithm.New() h.Write(data) if test.expError != nil { @@ -1216,7 +1204,7 @@ func TestPosixReadFileWithVerify(t *testing.T) { } buffer := make([]byte, test.length) - n, err := posixStorage.ReadFile(volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) + n, err := xlStorage.ReadFile(volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) switch { case err == nil && test.expError != nil: @@ -1231,40 +1219,40 @@ func TestPosixReadFileWithVerify(t *testing.T) { } } -// TestPosixFormatFileChange - to test if changing the diskID makes the calls fail. -func TestPosixFormatFileChange(t *testing.T) { - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageFormatFileChange - to test if changing the diskID makes the calls fail. +func TestXLStorageFormatFileChange(t *testing.T) { + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { t.Fatalf("MakeVol failed with %s", err) } // Change the format.json such that "this" is changed to "randomid". - if err = ioutil.WriteFile(pathJoin(posixStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0644); err != nil { + if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0644); err != nil { t.Fatalf("ioutil.WriteFile failed with %s", err) } - err = posixStorage.MakeVol(volume) + err = xlStorage.MakeVol(volume) if err != errVolumeExists { t.Fatalf("MakeVol expected to fail with errDiskNotFound but failed with %s", err) } } -// TestPosix posix.AppendFile() -func TestPosixAppendFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.AppendFile() +func TestXLStorageAppendFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1279,11 +1267,11 @@ func TestPosixAppendFile(t *testing.T) { }{ {"myobject", nil}, {"path/to/my/object", nil}, - // TestPosix to append to previously created file. + // TestXLStorage to append to previously created file. {"myobject", nil}, - // TestPosix to use same path of previously created file. + // TestXLStorage to use same path of previously created file. {"path/to/my/testobject", nil}, - // TestPosix to use object is a directory now. + // TestXLStorage to use object is a directory now. {"object-as-dir", errIsNotRegular}, // path segment uses previously uploaded object. {"myobject/testobject", errFileAccessDenied}, @@ -1294,81 +1282,81 @@ func TestPosixAppendFile(t *testing.T) { } for i, testCase := range testCases { - if err = posixStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { + if err = xlStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { t.Errorf("Case: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - var posixPermStorage StorageAPI - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + var xlStoragePermStorage StorageAPI + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixPermStorage, err = newPosix(permDeniedDir, "") + xlStoragePermStorage, err = newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixPermStorage.AppendFile("mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { + if err = xlStoragePermStorage.AppendFile("mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { t.Fatalf("expected: Permission error, got: %s", err) } } - // TestPosix case with invalid volume name. + // TestXLStorage case with invalid volume name. // A valid volume name should be atleast of size 3. - err = posixStorage.AppendFile("bn", "yes", []byte("hello, world")) + err = xlStorage.AppendFile("bn", "yes", []byte("hello, world")) if err != errVolumeNotFound { t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err) } } -// TestPosix posix.RenameFile() -func TestPosixRenameFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.RenameFile() +func TestXLStorageRenameFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err := posixStorage.MakeVol("src-vol"); err != nil { + if err := xlStorage.MakeVol("src-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.MakeVol("dest-vol"); err != nil { + if err := xlStorage.MakeVol("dest-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file2", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file2", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file3", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file3", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file4", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file4", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file5", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file5", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "path/to/file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "path/to/file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -1379,7 +1367,7 @@ func TestPosixRenameFile(t *testing.T) { destPath string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. { srcVol: "src-vol", destVol: "dest-vol", @@ -1387,7 +1375,7 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { srcVol: "src-vol", destVol: "dest-vol", @@ -1395,8 +1383,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: nil, }, - // TestPosix case - 3. - // TestPosix to overwrite destination file. + // TestXLStorage case - 3. + // TestXLStorage to overwrite destination file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1404,8 +1392,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: nil, }, - // TestPosix case - 4. - // TestPosix case with io error count set to 1. + // TestXLStorage case - 4. + // TestXLStorage case with io error count set to 1. // expected not to fail. { srcVol: "src-vol", @@ -1414,8 +1402,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-two", expectedErr: nil, }, - // TestPosix case - 5. - // TestPosix case with io error count set to maximum allowed count. + // TestXLStorage case - 5. + // TestXLStorage case with io error count set to maximum allowed count. // expected not to fail. { srcVol: "src-vol", @@ -1424,8 +1412,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-three", expectedErr: nil, }, - // TestPosix case - 6. - // TestPosix case with non-existent source file. + // TestXLStorage case - 6. + // TestXLStorage case with non-existent source file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1433,8 +1421,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-three", expectedErr: errFileNotFound, }, - // TestPosix case - 7. - // TestPosix to check failure of source and destination are not same type. + // TestXLStorage case - 7. + // TestXLStorage to check failure of source and destination are not same type. { srcVol: "src-vol", destVol: "dest-vol", @@ -1442,8 +1430,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: errFileAccessDenied, }, - // TestPosix case - 8. - // TestPosix to check failure of destination directory exists. + // TestXLStorage case - 8. + // TestXLStorage to check failure of destination directory exists. { srcVol: "src-vol", destVol: "dest-vol", @@ -1451,8 +1439,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errFileAccessDenied, }, - // TestPosix case - 9. - // TestPosix case with source being a file and destination being a directory. + // TestXLStorage case - 9. + // TestXLStorage case with source being a file and destination being a directory. // Either both have to be files or directories. // Expecting to fail with `errFileAccessDenied`. { @@ -1462,8 +1450,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errFileAccessDenied, }, - // TestPosix case - 10. - // TestPosix case with non-existent source volume. + // TestXLStorage case - 10. + // TestXLStorage case with non-existent source volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol-non-existent", @@ -1472,8 +1460,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 11. - // TestPosix case with non-existent destination volume. + // TestXLStorage case - 11. + // TestXLStorage case with non-existent destination volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol", @@ -1482,8 +1470,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 12. - // TestPosix case with invalid src volume name. Length should be atleast 3. + // TestXLStorage case - 12. + // TestXLStorage case with invalid src volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "ab", @@ -1492,8 +1480,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 13. - // TestPosix case with invalid destination volume name. Length should be atleast 3. + // TestXLStorage case - 13. + // TestXLStorage case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1502,8 +1490,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 14. - // TestPosix case with invalid destination volume name. Length should be atleast 3. + // TestXLStorage case - 14. + // TestXLStorage case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1512,8 +1500,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 15. - // TestPosix case with the parent of the destination being a file. + // TestXLStorage case - 15. + // TestXLStorage case with the parent of the destination being a file. // expected to fail with `errFileAccessDenied`. { srcVol: "src-vol", @@ -1522,8 +1510,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one/parent-is-file", expectedErr: errFileAccessDenied, }, - // TestPosix case - 16. - // TestPosix case with segment of source file name more than 255. + // TestXLStorage case - 16. + // TestXLStorage case with segment of source file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1532,8 +1520,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-six", expectedErr: errFileNameTooLong, }, - // TestPosix case - 17. - // TestPosix case with segment of destination file name more than 255. + // TestXLStorage case - 17. + // TestXLStorage case with segment of destination file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1545,35 +1533,31 @@ func TestPosixRenameFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Fatalf("Expected the StorageAPI to be of type *posix") - } - - if err := posixStorage.RenameFile(testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { - t.Fatalf("TestPosix %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) + if err := xlStorage.RenameFile(testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { + t.Fatalf("TestXLStorage %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) } } } -// TestPosix posix.StatFile() -func TestPosixStatFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.CheckFile() +func TestXLStorageCheckFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err := posixStorage.MakeVol("success-vol"); err != nil { + if err := xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("success-vol", "path/to/success-file", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -1582,43 +1566,43 @@ func TestPosixStatFile(t *testing.T) { srcPath string expectedErr error }{ - // TestPosix case - 1. - // TestPosix case with valid inputs, expected to pass. + // TestXLStorage case - 1. + // TestXLStorage case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "success-file", expectedErr: nil, }, - // TestPosix case - 2. - // TestPosix case with valid inputs, expected to pass. + // TestXLStorage case - 2. + // TestXLStorage case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "path/to/success-file", expectedErr: nil, }, - // TestPosix case - 3. - // TestPosix case with non-existent file. + // TestXLStorage case - 3. + // TestXLStorage case with non-existent file. { srcVol: "success-vol", srcPath: "nonexistent-file", expectedErr: errFileNotFound, }, - // TestPosix case - 4. - // TestPosix case with non-existent file path. + // TestXLStorage case - 4. + // TestXLStorage case with non-existent file path. { srcVol: "success-vol", srcPath: "path/2/success-file", expectedErr: errFileNotFound, }, - // TestPosix case - 5. - // TestPosix case with path being a directory. + // TestXLStorage case - 5. + // TestXLStorage case with path being a directory. { srcVol: "success-vol", srcPath: "path", expectedErr: errFileNotFound, }, - // TestPosix case - 6. - // TestPosix case with non existent volume. + // TestXLStorage case - 6. + // TestXLStorage case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -1627,33 +1611,30 @@ func TestPosixStatFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if _, err := posixStorage.StatFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err := xlStorage.CheckFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { + t.Fatalf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } } -// Test posix.VerifyFile() -func TestPosixVerifyFile(t *testing.T) { +// Test xlStorage.VerifyFile() +func TestXLStorageVerifyFile(t *testing.T) { // We test 4 cases: // 1) Whole-file bitrot check on proper file // 2) Whole-file bitrot check on corrupted file // 3) Streaming bitrot check on proper file // 4) Streaming bitrot check on corrupted file - // create posix test setup - posixStorage, path, err := newPosixTestSetup() + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) volName := "testvol" fileName := "testfile" - if err := posixStorage.MakeVol(volName); err != nil { + if err := xlStorage.MakeVol(volName); err != nil { t.Fatal(err) } @@ -1667,29 +1648,29 @@ func TestPosixVerifyFile(t *testing.T) { h := algo.New() h.Write(data) hashBytes := h.Sum(nil) - if err := posixStorage.WriteAll(volName, fileName, bytes.NewBuffer(data)); err != nil { + if err := xlStorage.WriteAll(volName, fileName, bytes.NewBuffer(data)); err != nil { t.Fatal(err) } - if err := posixStorage.VerifyFile(volName, fileName, size, algo, hashBytes, 0); err != nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil { t.Fatal(err) } // 2) Whole-file bitrot check on corrupted file - if err := posixStorage.AppendFile(volName, fileName, []byte("a")); err != nil { + if err := xlStorage.AppendFile(volName, fileName, []byte("a")); err != nil { t.Fatal(err) } // Check if VerifyFile reports the incorrect file length (the correct length is `size+1`) - if err := posixStorage.VerifyFile(volName, fileName, size, algo, hashBytes, 0); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } // Check if bitrot fails - if err := posixStorage.VerifyFile(volName, fileName, size+1, algo, hashBytes, 0); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } - if err := posixStorage.DeleteFile(volName, fileName); err != nil { + if err := xlStorage.DeleteFile(volName, fileName); err != nil { t.Fatal(err) } @@ -1697,7 +1678,7 @@ func TestPosixVerifyFile(t *testing.T) { algo = HighwayHash256S shardSize := int64(1024 * 1024) shard := make([]byte, shardSize) - w := newStreamingBitrotWriter(posixStorage, volName, fileName, size, algo, shardSize) + w := newStreamingBitrotWriter(xlStorage, volName, fileName, size, algo, shardSize) reader := bytes.NewReader(data) for { // Using io.CopyBuffer instead of this loop will not work for us as io.CopyBuffer @@ -1713,12 +1694,12 @@ func TestPosixVerifyFile(t *testing.T) { t.Fatal(err) } w.Close() - if err := posixStorage.VerifyFile(volName, fileName, size, algo, nil, shardSize); err != nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil { t.Fatal(err) } // 4) Streaming bitrot check on corrupted file - filePath := pathJoin(posixStorage.String(), volName, fileName) + filePath := pathJoin(xlStorage.String(), volName, fileName) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0644) if err != nil { t.Fatal(err) @@ -1727,10 +1708,10 @@ func TestPosixVerifyFile(t *testing.T) { t.Fatal(err) } f.Close() - if err := posixStorage.VerifyFile(volName, fileName, size, algo, nil, shardSize); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } - if err := posixStorage.VerifyFile(volName, fileName, size+1, algo, nil, shardSize); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } } diff --git a/cmd/posix_unix_test.go b/cmd/xl-storage_unix_test.go similarity index 79% rename from cmd/posix_unix_test.go rename to cmd/xl-storage_unix_test.go index a6dedd847..8dc76b02b 100644 --- a/cmd/posix_unix_test.go +++ b/cmd/xl-storage_unix_test.go @@ -1,7 +1,7 @@ // +build linux darwin dragonfly freebsd netbsd openbsd /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,10 +48,10 @@ func TestIsValidUmaskVol(t *testing.T) { } testCase := testCases[0] - // Initialize a new posix layer. - disk, err := newPosix(tmpPath, "") + // Initialize a new xlStorage layer. + disk, err := newXLStorage(tmpPath, "") if err != nil { - t.Fatalf("Initializing posix failed with %s.", err) + t.Fatalf("Initializing xlStorage failed with %s.", err) } // Attempt to create a volume to verify the permissions later. @@ -90,10 +90,10 @@ func TestIsValidUmaskFile(t *testing.T) { } testCase := testCases[0] - // Initialize a new posix layer. - disk, err := newPosix(tmpPath, "") + // Initialize a new xlStorage layer. + disk, err := newXLStorage(tmpPath, "") if err != nil { - t.Fatalf("Initializing posix failed with %s.", err) + t.Fatalf("Initializing xlStorage failed with %s.", err) } // Attempt to create a volume to verify the permissions later. @@ -106,21 +106,12 @@ func TestIsValidUmaskFile(t *testing.T) { // Attempt to create a file to verify the permissions later. // AppendFile creates file with 0666 perms. - if err = disk.AppendFile(testCase.volName, "hello-world.txt", []byte("Hello World")); err != nil { + if err = disk.AppendFile(testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil { t.Fatalf("Create a file `test` failed with %s expected to pass.", err) } - // StatFile - stat the file. - fi, err := disk.StatFile(testCase.volName, "hello-world.txt") - if err != nil { + // CheckFile - stat the file. + if err := disk.CheckFile(testCase.volName, "hello-world.txt"); err != nil { t.Fatalf("Stat failed with %s expected to pass.", err) } - - // Get umask of the bits stored. - currentUmask := 0666 - uint32(fi.Mode.Perm()) - - // Verify if umask is correct. - if int(currentUmask) != testCase.expectedUmask { - t.Fatalf("Umask check failed expected %d, got %d", testCase.expectedUmask, currentUmask) - } } diff --git a/cmd/posix_windows_test.go b/cmd/xl-storage_windows_test.go similarity index 92% rename from cmd/posix_windows_test.go rename to cmd/xl-storage_windows_test.go index 2ae38d8c7..38e82db51 100644 --- a/cmd/posix_windows_test.go +++ b/cmd/xl-storage_windows_test.go @@ -1,7 +1,7 @@ // +build windows /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ func TestUNCPaths(t *testing.T) { // Instantiate posix object to manage a disk var fs StorageAPI - fs, err = newPosix(dir, "") + fs, err = newXLStorage(dir, "") if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestUNCPaths(t *testing.T) { } } -// Test to validate posix behavior on windows when a non-final path component is a file. +// Test to validate xlStorage behavior on windows when a non-final path component is a file. func TestUNCPathENOTDIR(t *testing.T) { // Instantiate posix object to manage a disk dir, err := ioutil.TempDir("", "testdisk-") @@ -83,7 +83,7 @@ func TestUNCPathENOTDIR(t *testing.T) { defer os.RemoveAll(dir) var fs StorageAPI - fs, err = newPosix(dir, "") + fs, err = newXLStorage(dir, "") if err != nil { t.Fatal(err) } diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go deleted file mode 100644 index 4974b7721..000000000 --- a/cmd/xl-v1-list-objects-heal.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/madmin" -) - -// This is not implemented/needed anymore, look for xl-sets.ListBucketHeal() -func (xl xlObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return nil, NotImplemented{} -} - -// This is not implemented/needed anymore, look for xl-sets.HealObjects() -func (xl xlObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// this is not implemented/needed anymore, look for xl-sets.Walk() -func (xl xlObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} diff --git a/cmd/xl-v1-list-objects.go b/cmd/xl-v1-list-objects.go deleted file mode 100644 index 16e267293..000000000 --- a/cmd/xl-v1-list-objects.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" -) - -// ListObjects - list all objects at prefix, delimited by '/', shouldn't be called. -func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { - // Shouldn't be called - return loi, NotImplemented{} -} diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go deleted file mode 100644 index 4941d2685..000000000 --- a/cmd/xl-v1-metadata.go +++ /dev/null @@ -1,459 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - "fmt" - "net/http" - "path" - "sort" - "time" - - jsoniter "github.com/json-iterator/go" - xhttp "github.com/minio/minio/cmd/http" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/sync/errgroup" - "github.com/minio/sha256-simd" -) - -const erasureAlgorithmKlauspost = "klauspost/reedsolomon/vandermonde" - -// ObjectPartInfo Info of each part kept in the multipart metadata -// file after CompleteMultipartUpload() is called. -type ObjectPartInfo struct { - ETag string `json:"etag,omitempty"` - Number int `json:"number"` - Size int64 `json:"size"` - ActualSize int64 `json:"actualSize"` -} - -// byObjectPartNumber is a collection satisfying sort.Interface. -type byObjectPartNumber []ObjectPartInfo - -func (t byObjectPartNumber) Len() int { return len(t) } -func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number } - -// ChecksumInfo - carries checksums of individual scattered parts per disk. -type ChecksumInfo struct { - PartNumber int - Algorithm BitrotAlgorithm - Hash []byte -} - -type checksumInfoJSON struct { - Name string `json:"name"` - Algorithm string `json:"algorithm"` - Hash string `json:"hash,omitempty"` -} - -// MarshalJSON marshals the ChecksumInfo struct -func (c ChecksumInfo) MarshalJSON() ([]byte, error) { - info := checksumInfoJSON{ - Name: fmt.Sprintf("part.%d", c.PartNumber), - Algorithm: c.Algorithm.String(), - Hash: hex.EncodeToString(c.Hash), - } - return json.Marshal(info) -} - -// UnmarshalJSON - should never be called, instead xlMetaV1UnmarshalJSON() should be used. -func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { - var info checksumInfoJSON - var json = jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(data, &info); err != nil { - return err - } - sum, err := hex.DecodeString(info.Hash) - if err != nil { - return err - } - c.Algorithm = BitrotAlgorithmFromString(info.Algorithm) - c.Hash = sum - if _, err = fmt.Sscanf(info.Name, "part.%d", &c.PartNumber); err != nil { - return err - } - - if !c.Algorithm.Available() { - logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) - return errBitrotHashAlgoInvalid - } - return nil -} - -// ErasureInfo holds erasure coding and bitrot related information. -type ErasureInfo struct { - // Algorithm is the string representation of erasure-coding-algorithm - Algorithm string `json:"algorithm"` - // DataBlocks is the number of data blocks for erasure-coding - DataBlocks int `json:"data"` - // ParityBlocks is the number of parity blocks for erasure-coding - ParityBlocks int `json:"parity"` - // BlockSize is the size of one erasure-coded block - BlockSize int64 `json:"blockSize"` - // Index is the index of the current disk - Index int `json:"index"` - // Distribution is the distribution of the data and parity blocks - Distribution []int `json:"distribution"` - // Checksums holds all bitrot checksums of all erasure encoded blocks - Checksums []ChecksumInfo `json:"checksum,omitempty"` -} - -// AddChecksumInfo adds a checksum of a part. -func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) { - for i, sum := range e.Checksums { - if sum.PartNumber == ckSumInfo.PartNumber { - e.Checksums[i] = ckSumInfo - return - } - } - e.Checksums = append(e.Checksums, ckSumInfo) -} - -// GetChecksumInfo - get checksum of a part. -func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) { - for _, sum := range e.Checksums { - if sum.PartNumber == partNumber { - // Return the checksum - return sum - } - } - return ChecksumInfo{} -} - -// ShardFileSize - returns final erasure size from original size. -func (e ErasureInfo) ShardFileSize(totalLength int64) int64 { - if totalLength == 0 { - return 0 - } - if totalLength == -1 { - return -1 - } - numShards := totalLength / e.BlockSize - lastBlockSize := totalLength % e.BlockSize - lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks)) - return numShards*e.ShardSize() + lastShardSize -} - -// ShardSize - returns actual shared size from erasure blockSize. -func (e ErasureInfo) ShardSize() int64 { - return ceilFrac(e.BlockSize, int64(e.DataBlocks)) -} - -// statInfo - carries stat information of the object. -type statInfo struct { - Size int64 `json:"size"` // Size of the object `xl.json`. - ModTime time.Time `json:"modTime"` // ModTime of the object `xl.json`. -} - -// A xlMetaV1 represents `xl.json` metadata header. -type xlMetaV1 struct { - Version string `json:"version"` // Version of the current `xl.json`. - Format string `json:"format"` // Format of the current `xl.json`. - Stat statInfo `json:"stat"` // Stat of the current object `xl.json`. - // Erasure coded info for the current object `xl.json`. - Erasure ErasureInfo `json:"erasure"` - // MinIO release tag for current object `xl.json`. - Minio struct { - Release string `json:"release"` - } `json:"minio"` - // Metadata map for current object `xl.json`. - Meta map[string]string `json:"meta,omitempty"` - // Captures all the individual object `xl.json`. - Parts []ObjectPartInfo `json:"parts,omitempty"` -} - -// XL metadata constants. -const ( - // XL meta version. - xlMetaVersion = "1.0.1" - - // XL meta version. - xlMetaVersion100 = "1.0.0" - - // XL meta format string. - xlMetaFormat = "xl" - - // Add new constants here. -) - -// newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info. -func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) { - xlMeta = xlMetaV1{} - xlMeta.Version = xlMetaVersion - xlMeta.Format = xlMetaFormat - xlMeta.Minio.Release = ReleaseTag - xlMeta.Erasure = ErasureInfo{ - Algorithm: erasureAlgorithmKlauspost, - DataBlocks: dataBlocks, - ParityBlocks: parityBlocks, - BlockSize: blockSizeV1, - Distribution: hashOrder(object, dataBlocks+parityBlocks), - } - return xlMeta -} - -// Return a new xlMetaV1 initialized using the given xlMetaV1. Used in healing to make sure that we do not copy -// over any part's checksum info which will differ for different disks. -func newXLMetaFromXLMeta(meta xlMetaV1) xlMetaV1 { - xlMeta := meta - xlMeta.Erasure.Checksums = nil - xlMeta.Parts = nil - return xlMeta -} - -// IsValid - tells if the format is sane by validating the version -// string, format and erasure info fields. -func (m xlMetaV1) IsValid() bool { - return isXLMetaFormatValid(m.Version, m.Format) && - isXLMetaErasureInfoValid(m.Erasure.DataBlocks, m.Erasure.ParityBlocks) -} - -// Verifies if the backend format metadata is sane by validating -// the version string and format style. -func isXLMetaFormatValid(version, format string) bool { - return ((version == xlMetaVersion || version == xlMetaVersion100) && - format == xlMetaFormat) -} - -// Verifies if the backend format metadata is sane by validating -// the ErasureInfo, i.e. data and parity blocks. -func isXLMetaErasureInfoValid(data, parity int) bool { - return ((data >= parity) && (data != 0) && (parity != 0)) -} - -// Converts metadata to object info. -func (m xlMetaV1) ToObjectInfo(bucket, object string) ObjectInfo { - objInfo := ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: m.Stat.Size, - ModTime: m.Stat.ModTime, - ContentType: m.Meta["content-type"], - ContentEncoding: m.Meta["content-encoding"], - } - // Update expires - var ( - t time.Time - e error - ) - if exp, ok := m.Meta["expires"]; ok { - if t, e = time.Parse(http.TimeFormat, exp); e == nil { - objInfo.Expires = t.UTC() - } - } - objInfo.backendType = BackendErasure - - // Extract etag from metadata. - objInfo.ETag = extractETag(m.Meta) - - // Add user tags to the object info - objInfo.UserTags = m.Meta[xhttp.AmzObjectTagging] - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - // Tags have also been extracted, we remove that as well. - objInfo.UserDefined = cleanMetadata(m.Meta) - - // All the parts per object. - objInfo.Parts = m.Parts - - // Update storage class - if sc, ok := m.Meta[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } - - // Success. - return objInfo -} - -// objectPartIndex - returns the index of matching object part number. -func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { - for i, part := range parts { - if partNumber == part.Number { - return i - } - } - return -1 -} - -// AddObjectPart - add a new object part in order. -func (m *xlMetaV1) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) { - partInfo := ObjectPartInfo{ - Number: partNumber, - ETag: partETag, - Size: partSize, - ActualSize: actualSize, - } - - // Update part info if it already exists. - for i, part := range m.Parts { - if partNumber == part.Number { - m.Parts[i] = partInfo - return - } - } - - // Proceed to include new part info. - m.Parts = append(m.Parts, partInfo) - - // Parts in xlMeta should be in sorted order by part number. - sort.Sort(byObjectPartNumber(m.Parts)) -} - -// ObjectToPartOffset - translate offset of an object to offset of its individual part. -func (m xlMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { - if offset == 0 { - // Special case - if offset is 0, then partIndex and partOffset are always 0. - return 0, 0, nil - } - partOffset = offset - // Seek until object offset maps to a particular part offset. - for i, part := range m.Parts { - partIndex = i - // Offset is smaller than size we have reached the proper part offset. - if partOffset < part.Size { - return partIndex, partOffset, nil - } - // Continue to towards the next part. - partOffset -= part.Size - } - logger.LogIf(ctx, InvalidRange{}) - // Offset beyond the size of the object return InvalidRange. - return 0, 0, InvalidRange{} -} - -func getXLMetaInQuorum(ctx context.Context, metaArr []xlMetaV1, modTime time.Time, quorum int) (xmv xlMetaV1, e error) { - metaHashes := make([]string, len(metaArr)) - for i, meta := range metaArr { - if meta.IsValid() && meta.Stat.ModTime.Equal(modTime) { - h := sha256.New() - for _, part := range meta.Parts { - h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) - } - metaHashes[i] = hex.EncodeToString(h.Sum(nil)) - } - } - - metaHashCountMap := make(map[string]int) - for _, hash := range metaHashes { - if hash == "" { - continue - } - metaHashCountMap[hash]++ - } - - maxHash := "" - maxCount := 0 - for hash, count := range metaHashCountMap { - if count > maxCount { - maxCount = count - maxHash = hash - } - } - - if maxCount < quorum { - return xlMetaV1{}, errXLReadQuorum - } - - for i, hash := range metaHashes { - if hash == maxHash { - return metaArr[i], nil - } - } - - return xlMetaV1{}, errXLReadQuorum -} - -// pickValidXLMeta - picks one valid xlMeta content and returns from a -// slice of xlmeta content. -func pickValidXLMeta(ctx context.Context, metaArr []xlMetaV1, modTime time.Time, quorum int) (xmv xlMetaV1, e error) { - return getXLMetaInQuorum(ctx, metaArr, modTime, quorum) -} - -// writeXLMetadata - writes `xl.json` to a single disk. -func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) error { - jsonFile := path.Join(prefix, xlMetaJSONFile) - - // Marshal json. - metadataBytes, err := json.Marshal(&xlMeta) - if err != nil { - logger.LogIf(ctx, err) - return err - } - - // Persist marshaled data. - err = disk.WriteAll(bucket, jsonFile, bytes.NewReader(metadataBytes)) - logger.LogIf(ctx, err) - return err -} - -// Rename `xl.json` content to destination location for each disk in order. -func renameXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) { - isDir := false - srcXLJSON := path.Join(srcEntry, xlMetaJSONFile) - dstXLJSON := path.Join(dstEntry, xlMetaJSONFile) - return rename(ctx, disks, srcBucket, srcXLJSON, dstBucket, dstXLJSON, isDir, quorum, []error{errFileNotFound}) -} - -// writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order. -func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) ([]StorageAPI, error) { - g := errgroup.WithNErrs(len(disks)) - - // Start writing `xl.json` to all disks in parallel. - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound - } - // Pick one xlMeta for a disk at index. - xlMetas[index].Erasure.Index = index + 1 - return writeXLMetadata(ctx, disks[index], bucket, prefix, xlMetas[index]) - }, index) - } - - // Wait for all the routines. - mErrs := g.Wait() - - err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) - return evalDisks(disks, mErrs), err -} - -// Returns per object readQuorum and writeQuorum -// readQuorum is the min required disks to read data. -// writeQuorum is the min required disks to write data. -func objectQuorumFromMeta(ctx context.Context, xl xlObjects, partsMetaData []xlMetaV1, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) { - // get the latest updated Metadata and a count of all the latest updated xlMeta(s) - latestXLMeta, err := getLatestXLMeta(ctx, partsMetaData, errs) - - if err != nil { - return 0, 0, err - } - - // Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks - // from latestXLMeta to get the quorum - return latestXLMeta.Erasure.DataBlocks, latestXLMeta.Erasure.DataBlocks + 1, nil -} diff --git a/cmd/xl-v1-metadata_test.go b/cmd/xl-v1-metadata_test.go deleted file mode 100644 index fad2cd3cb..000000000 --- a/cmd/xl-v1-metadata_test.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "testing" - "time" - - humanize "github.com/dustin/go-humanize" -) - -const ActualSize = 1000 - -// Test xlMetaV1.AddObjectPart() -func TestAddObjectPart(t *testing.T) { - testCases := []struct { - partNum int - expectedIndex int - }{ - {1, 0}, - {2, 1}, - {4, 2}, - {5, 3}, - {7, 4}, - // Insert part. - {3, 2}, - // Replace existing part. - {4, 3}, - // Missing part. - {6, -1}, - } - - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Test them. - for _, testCase := range testCases { - if testCase.expectedIndex > -1 { - xlMeta.AddObjectPart(testCase.partNum, "", int64(testCase.partNum+humanize.MiByte), ActualSize) - } - - if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex { - t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - } -} - -// Test objectPartIndex(). -// generates a sample xlMeta data and asserts the output of objectPartIndex() with the expected value. -func TestObjectPartIndex(t *testing.T) { - testCases := []struct { - partNum int - expectedIndex int - }{ - {2, 1}, - {1, 0}, - {5, 3}, - {4, 2}, - {7, 4}, - } - - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Add some parts for testing. - for _, testCase := range testCases { - xlMeta.AddObjectPart(testCase.partNum, "", int64(testCase.partNum+humanize.MiByte), ActualSize) - } - - // Add failure test case. - testCases = append(testCases, struct { - partNum int - expectedIndex int - }{6, -1}) - - // Test them. - for _, testCase := range testCases { - if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex { - t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - } -} - -// Test xlMetaV1.ObjectToPartOffset(). -func TestObjectToPartOffset(t *testing.T) { - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Add some parts for testing. - // Total size of all parts is 5,242,899 bytes. - for _, partNum := range []int{1, 2, 4, 5, 7} { - xlMeta.AddObjectPart(partNum, "", int64(partNum+humanize.MiByte), ActualSize) - } - - testCases := []struct { - offset int64 - expectedIndex int - expectedOffset int64 - expectedErr error - }{ - {0, 0, 0, nil}, - {1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil}, - {1 + humanize.MiByte, 1, 0, nil}, - {2 + humanize.MiByte, 1, 1, nil}, - // Its valid for zero sized object. - {-1, 0, -1, nil}, - // Max fffset is always (size - 1). - {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil}, - // Error if offset is size. - {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}}, - } - - // Test them. - for _, testCase := range testCases { - index, offset, err := xlMeta.ObjectToPartOffset(GlobalContext, testCase.offset) - if err != testCase.expectedErr { - t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) - } - if index != testCase.expectedIndex { - t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - if offset != testCase.expectedOffset { - t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset) - } - } -} - -// Helper function to check if two xlMetaV1 values are similar. -func isXLMetaSimilar(m, n xlMetaV1) bool { - if m.Version != n.Version { - return false - } - if m.Format != n.Format { - return false - } - if len(m.Parts) != len(n.Parts) { - return false - } - return true -} - -func TestPickValidXLMeta(t *testing.T) { - obj := "object" - x1 := newXLMetaV1(obj, 4, 4) - now := UTCNow() - x1.Stat.ModTime = now - invalidX1 := x1 - invalidX1.Version = "invalid-version" - xs := []xlMetaV1{x1, x1, x1, x1} - invalidXS := []xlMetaV1{invalidX1, invalidX1, invalidX1, invalidX1} - testCases := []struct { - metaArr []xlMetaV1 - modTime time.Time - xlMeta xlMetaV1 - expectedErr error - }{ - { - metaArr: xs, - modTime: now, - xlMeta: x1, - expectedErr: nil, - }, - { - metaArr: invalidXS, - modTime: now, - xlMeta: invalidX1, - expectedErr: errXLReadQuorum, - }, - } - for i, test := range testCases { - xlMeta, err := pickValidXLMeta(GlobalContext, test.metaArr, test.modTime, len(test.metaArr)/2) - if test.expectedErr != nil { - if err.Error() != test.expectedErr.Error() { - t.Errorf("Test %d: Expected to fail with %v but received %v", - i+1, test.expectedErr, err) - } - } else { - if !isXLMetaSimilar(xlMeta, test.xlMeta) { - t.Errorf("Test %d: Expected %v but received %v", - i+1, test.xlMeta, xlMeta) - } - } - } -} - -func TestIsXLMetaFormatValid(t *testing.T) { - tests := []struct { - name int - version string - format string - want bool - }{ - {1, "123", "fs", false}, - {2, "123", xlMetaFormat, false}, - {3, xlMetaVersion, "test", false}, - {4, xlMetaVersion100, "hello", false}, - {5, xlMetaVersion, xlMetaFormat, true}, - {6, xlMetaVersion100, xlMetaFormat, true}, - } - for _, tt := range tests { - if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} - -func TestIsXLMetaErasureInfoValid(t *testing.T) { - tests := []struct { - name int - data int - parity int - want bool - }{ - {1, 5, 6, false}, - {2, 5, 5, true}, - {3, 0, 5, false}, - {4, 5, 0, false}, - {5, 5, 0, false}, - {6, 5, 4, true}, - } - for _, tt := range tests { - if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} diff --git a/cmd/xl-v1-multipart_test.go b/cmd/xl-v1-multipart_test.go deleted file mode 100644 index f2f14ed96..000000000 --- a/cmd/xl-v1-multipart_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2014, 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "sync" - "testing" - "time" -) - -// Tests cleanup multipart uploads for erasure coded backend. -func TestXLCleanupStaleMultipartUploads(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create an instance of xl backend - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal(err) - } - // Defer cleanup of backend directories - defer removeRoots(fsDirs) - - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - - bucketName := "bucket" - objectName := "object" - var opts ObjectOptions - - obj.MakeBucketWithLocation(ctx, bucketName, "", false) - uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, opts) - if err != nil { - t.Fatal("Unexpected err: ", err) - } - - var cleanupWg sync.WaitGroup - cleanupWg.Add(1) - go func() { - defer cleanupWg.Done() - xl.cleanupStaleMultipartUploads(GlobalContext, time.Millisecond, 0, ctx.Done()) - }() - - // Wait for 100ms such that - we have given enough time for cleanup routine to kick in. - // Flaky on slow systems :/ - time.Sleep(100 * time.Millisecond) - - // Exit cleanup.. - cancel() - cleanupWg.Wait() - - // Check if upload id was already purged. - if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil { - if _, ok := err.(InvalidUploadID); !ok { - t.Fatal("Unexpected err: ", err) - } - } else { - t.Error("Item was not cleaned up.") - } -} diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go deleted file mode 100644 index b5c154390..000000000 --- a/cmd/xl-v1.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/bpool" - "github.com/minio/minio/pkg/color" - "github.com/minio/minio/pkg/dsync" - "github.com/minio/minio/pkg/madmin" - "github.com/minio/minio/pkg/sync/errgroup" -) - -// XL constants. -const ( - // XL metadata file carries per object metadata. - xlMetaJSONFile = "xl.json" -) - -// OfflineDisk represents an unavailable disk. -var OfflineDisk StorageAPI // zero value is nil - -// partialUpload is a successful upload of an object -// but not written in all disks (having quorum) -type partialUpload struct { - bucket string - object string - failedSet int -} - -// xlObjects - Implements XL object layer. -type xlObjects struct { - GatewayUnsupported - - // getDisks returns list of storageAPIs. - getDisks func() []StorageAPI - - // getLockers returns list of remote and local lockers. - getLockers func() []dsync.NetLocker - - // getEndpoints returns list of endpoint strings belonging this set. - // some may be local and some remote. - getEndpoints func() []string - - // Locker mutex map. - nsMutex *nsLockMap - - // Byte pools used for temporary i/o buffers. - bp *bpool.BytePoolCap - - mrfUploadCh chan partialUpload -} - -// NewNSLock - initialize a new namespace RWLocker instance. -func (xl xlObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { - return xl.nsMutex.NewNSLock(ctx, xl.getLockers, bucket, objects...) -} - -// Shutdown function for object storage interface. -func (xl xlObjects) Shutdown(ctx context.Context) error { - // Add any object layer shutdown activities here. - closeStorageDisks(xl.getDisks()) - return nil -} - -// byDiskTotal is a collection satisfying sort.Interface. -type byDiskTotal []DiskInfo - -func (d byDiskTotal) Len() int { return len(d) } -func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d byDiskTotal) Less(i, j int) bool { - return d[i].Total < d[j].Total -} - -// getDisksInfo - fetch disks info across all other storage API. -func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) { - disksInfo = make([]DiskInfo, len(disks)) - onlineDisks = make(madmin.BackendDisks) - offlineDisks = make(madmin.BackendDisks) - - for _, ep := range endpoints { - if _, ok := offlineDisks[ep]; !ok { - offlineDisks[ep] = 0 - } - if _, ok := onlineDisks[ep]; !ok { - onlineDisks[ep] = 0 - } - } - - g := errgroup.WithNErrs(len(disks)) - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == OfflineDisk { - // Storage disk is empty, perhaps ignored disk or not available. - return errDiskNotFound - } - info, err := disks[index].DiskInfo() - if err != nil { - if !IsErr(err, baseErrs...) { - reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String()) - ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogIf(ctx, err) - } - return err - } - disksInfo[index] = info - return nil - }, index) - } - - errs = g.Wait() - // Wait for the routines. - for i, diskInfoErr := range errs { - if disks[i] == OfflineDisk { - continue - } - ep := endpoints[i] - if diskInfoErr != nil { - offlineDisks[ep]++ - continue - } - onlineDisks[ep]++ - } - - // Success. - return disksInfo, errs, onlineDisks, offlineDisks -} - -// Get an aggregated storage info across all disks. -func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []error) { - disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, endpoints) - - // Sort so that the first element is the smallest. - sort.Sort(byDiskTotal(disksInfo)) - - // Combine all disks to get total usage - usedList := make([]uint64, len(disksInfo)) - totalList := make([]uint64, len(disksInfo)) - availableList := make([]uint64, len(disksInfo)) - mountPaths := make([]string, len(disksInfo)) - - for i, di := range disksInfo { - usedList[i] = di.Used - totalList[i] = di.Total - availableList[i] = di.Free - mountPaths[i] = di.MountPath - } - - storageInfo := StorageInfo{ - Used: usedList, - Total: totalList, - Available: availableList, - MountPaths: mountPaths, - } - - storageInfo.Backend.Type = BackendErasure - storageInfo.Backend.OnlineDisks = onlineDisks - storageInfo.Backend.OfflineDisks = offlineDisks - - return storageInfo, errs -} - -// StorageInfo - returns underlying storage statistics. -func (xl xlObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { - - disks := xl.getDisks() - endpoints := xl.getEndpoints() - if local { - var localDisks []StorageAPI - var localEndpoints []string - for i, disk := range disks { - if disk != nil { - if disk.IsLocal() { - // Append this local disk since local flag is true - localDisks = append(localDisks, disk) - localEndpoints = append(localEndpoints, endpoints[i]) - } - } - } - disks = localDisks - endpoints = localEndpoints - } - return getStorageInfo(disks, endpoints) -} - -// GetMetrics - is not implemented and shouldn't be called. -func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) { - logger.LogIf(ctx, NotImplemented{}) - return &Metrics{}, NotImplemented{} -} - -// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. -// Updates are sent on a regular basis and the caller *must* consume them. -func (xl xlObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { - // This should only be called from runDataCrawler and this setup should not happen (zones). - return errors.New("xlObjects CrawlAndGetDataUsage not implemented") -} - -// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. -// Updates are sent on a regular basis and the caller *must* consume them. -func (xl xlObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error { - var disks []StorageAPI - - for _, d := range xl.getLoadBalancedDisks() { - if d == nil || !d.IsOnline() { - continue - } - disks = append(disks, d) - } - if len(disks) == 0 || len(buckets) == 0 { - return nil - } - - // Load bucket totals - oldCache := dataUsageCache{} - err := oldCache.load(ctx, xl, dataUsageCacheName) - if err != nil { - return err - } - - // New cache.. - cache := dataUsageCache{ - Info: dataUsageCacheInfo{ - Name: dataUsageRoot, - NextCycle: oldCache.Info.NextCycle, - }, - Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)), - } - - // Put all buckets into channel. - bucketCh := make(chan BucketInfo, len(buckets)) - // Add new buckets first - for _, b := range buckets { - if oldCache.find(b.Name) == nil { - bucketCh <- b - } - } - // Add existing buckets. - for _, b := range buckets { - e := oldCache.find(b.Name) - if e != nil { - if bf == nil || bf.containsDir(b.Name) { - bucketCh <- b - cache.replace(b.Name, dataUsageRoot, *e) - } else { - if intDataUpdateTracker.debug { - logger.Info(color.Green("crawlAndGetDataUsage:")+" Skipping bucket %v, not updated", b.Name) - } - } - } - } - - close(bucketCh) - bucketResults := make(chan dataUsageEntryInfo, len(disks)) - - // Start async collector/saver. - // This goroutine owns the cache. - var saverWg sync.WaitGroup - saverWg.Add(1) - go func() { - const updateTime = 30 * time.Second - t := time.NewTicker(updateTime) - defer t.Stop() - defer saverWg.Done() - var lastSave time.Time - - saveLoop: - for { - select { - case <-ctx.Done(): - // Return without saving. - return - case <-t.C: - if cache.Info.LastUpdate.Equal(lastSave) { - continue - } - logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName)) - updates <- cache.clone() - lastSave = cache.Info.LastUpdate - case v, ok := <-bucketResults: - if !ok { - break saveLoop - } - cache.replace(v.Name, v.Parent, v.Entry) - cache.Info.LastUpdate = time.Now() - } - } - // Save final state... - cache.Info.NextCycle++ - cache.Info.LastUpdate = time.Now() - logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName)) - if intDataUpdateTracker.debug { - logger.Info(color.Green("crawlAndGetDataUsage:")+" Cache saved, Next Cycle: %d", cache.Info.NextCycle) - } - updates <- cache - }() - - // Start one crawler per disk - var wg sync.WaitGroup - wg.Add(len(disks)) - for i := range disks { - go func(i int) { - defer wg.Done() - disk := disks[i] - - for bucket := range bucketCh { - select { - case <-ctx.Done(): - return - default: - } - - // Load cache for bucket - cacheName := pathJoin(bucket.Name, dataUsageCacheName) - cache := dataUsageCache{} - logger.LogIf(ctx, cache.load(ctx, xl, cacheName)) - if cache.Info.Name == "" { - cache.Info.Name = bucket.Name - } - if cache.Info.Name != bucket.Name { - logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) - cache.Info = dataUsageCacheInfo{ - Name: bucket.Name, - LastUpdate: time.Time{}, - NextCycle: 0, - } - } - - // Calc usage - before := cache.Info.LastUpdate - if bf != nil { - cache.Info.BloomFilter = bf.bytes() - } - cache, err = disk.CrawlAndGetDataUsage(ctx, cache) - cache.Info.BloomFilter = nil - if err != nil { - logger.LogIf(ctx, err) - if cache.Info.LastUpdate.After(before) { - logger.LogIf(ctx, cache.save(ctx, xl, cacheName)) - } - continue - } - - var root dataUsageEntry - if r := cache.root(); r != nil { - root = cache.flatten(*r) - } - bucketResults <- dataUsageEntryInfo{ - Name: cache.Info.Name, - Parent: dataUsageRoot, - Entry: root, - } - // Save cache - logger.LogIf(ctx, cache.save(ctx, xl, cacheName)) - } - }(i) - } - wg.Wait() - close(bucketResults) - saverWg.Wait() - - return nil -} - -// IsReady - shouldn't be called will panic. -func (xl xlObjects) IsReady(ctx context.Context) bool { - logger.CriticalIf(ctx, NotImplemented{}) - return true -} diff --git a/docs/bucket/versioning/DESIGN.md b/docs/bucket/versioning/DESIGN.md new file mode 100644 index 000000000..4593df048 --- /dev/null +++ b/docs/bucket/versioning/DESIGN.md @@ -0,0 +1,100 @@ +# Bucket Versioning Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) + +Example of a version enabled bucket `engineering` +``` +/mnt/data02/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data03/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data04/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data05/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta +``` + +`xl.meta` is a msgpack file with following data structure, this is converted from binary format to JSON for convenience. +```json +{ + "Versions": [ + { + "Type": 1, + "V2Obj": { + "ID": "KWUs8S+8RZq4Vp5TWy6KFg==", + "DDir": "X3pDAFu8Rjyft7QD6t7W5g==", + "EcAlgo": 1, + "EcM": 2, + "EcN": 2, + "EcBSize": 10485760, + "EcIndex": 3, + "EcDist": [ + 3, + 4, + 1, + 2 + ], + "CSumAlgo": 1, + "PartNums": [ + 1 + ], + "PartETags": [ + "" + ], + "PartSizes": [ + 314 + ], + "PartASizes": [ + 282 + ], + "Size": 314, + "MTime": 1591820730, + "MetaSys": { + "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "bXktbWluaW8ta2V5", + "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "ZXlKaFpXRmtJam9pUVVWVExUSTFOaTFIUTAwdFNFMUJReTFUU0VFdE1qVTJJaXdpYVhZaU9pSkJMMVZzZFVnelZYVjZSR2N6UkhGWUwycEViRmRCUFQwaUxDSnViMjVqWlNJNklpdE9lbkJXVWtseFlWSlNVa2t2UVhNaUxDSmllWFJsY3lJNklrNDBabVZsZG5WU1NWVnRLMFoyUWpBMVlYTk9aMU41YVhoU1RrNUpkMDlhTkdKa2RuaGpLMjFuVDNnMFFYbFJhbE15V0hkU1pEZzNRMk54ZUN0SFFuSWlmUT09", + "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "REFSRXYyLUhNQUMtU0hBMjU2", + "X-Minio-Internal-Server-Side-Encryption-Iv": "bW5YRDhRUGczMVhkc2pJT1V1UVlnbWJBcndIQVhpTUN1dnVBS0QwNUVpaz0=", + "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "SUFBZkFPeUo5ZHVVSEkxYXFLU0NSRkJTTnM0QkVJNk9JWU1QcFVTSXFhK2dHVThXeE9oSHJCZWwwdnRvTldUNE8zS1BtcWluR0cydmlNNFRWa0N0Mmc9PQ==" + }, + "MetaUsr": { + "content-type": "application/octet-stream", + "etag": "20000f00f58c508b40720270929bd90e9f07b9bd78fb605e5432a67635fc34722e4fc53b1d5fab9ff8400eb9ded4fba2" + } + } + } + ] +} +``` diff --git a/docs/bucket/versioning/README.md b/docs/bucket/versioning/README.md new file mode 100644 index 000000000..40b6d2229 --- /dev/null +++ b/docs/bucket/versioning/README.md @@ -0,0 +1,37 @@ +# Bucket Versioning Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) + +MinIO versioning is designed to keep multiple versions of an object in one bucket. For example, you could store `spark.csv` (version `ede336f2`) and `spark.csv` (version `fae684da`) in a single bucket. Versioning protects you from unintended overwrites, deletions, to apply retention policies and archive your objects. + +To custom data retention and storage usage, use object versioning with object lifecycle management. If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when on versioning-enabled bucket, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the versioning-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) + +Versioning must be explicitly enabled on a bucket, versioning is not enabled by default. Object locking enabled buckets have versioning enabled automatically. Enabling and suspending versioning is done at the bucket level. + +Only MinIO generates version IDs, and they can't be edited. Version IDs are simply of `DCE 1.1 v4 UUID 4` (random data based), UUIDs are 128 bit numbers which are intended to have a high likelihood of uniqueness over space and time and are computationally difficult to guess. They are globally unique identifiers which can be locally generated without contacting a global registration authority. UUIDs are intended as unique identifiers for both mass tagging objects with an extremely short lifetime and to reliably identifying very persistent objects across a network. + +When you PUT an object in a versioning-enabled bucket, the noncurrent version is not overwritten. The following figure shows that when a new version of `spark.csv` is PUT into a bucket that already contains an object with the same name, the original object (ID = `ede336f2`) remains in the bucket, MinIO generates a new version (ID = `fae684da`), and adds the newer version to the bucket. + +![put](versioning_PUT_versionEnabled.png) + +This means accidental overwrites or deletes of objects are protected, allows previous version of on object to be retrieved. + +When you DELETE an object, all versions remain in the bucket and MinIO adds a delete marker, as shown below: + +![delete](versioning_DELETE_versionEnabled.png) + +Now the delete marker becomes the current version of the object. GET requests by default always retrieve the latest stored version. So performing a simple GET object request when the current version is a delete marker would return `404` `The specified key does not exist` as shown below: + +![get](versioning_GET_versionEnabled.png) + +GET requests by specifying a verison ID as shown below, you can retrieve the specific object version `fae684da`. + +![get_version_id](versioning_GET_versionEnabled_id.png) + +To permanently delete an object you need to specify the version you want to delete, only the user with appropriate permissions can permanently delete a version. As shown below DELETE request called with a specific version id permenantly deletes an object from a bucket. Delete marker is not added for DELETE requets with version id. + +![delete_version_id](versioning_DELETE_versionEnabled_id.png) + +## Features +- All Buckets on MinIO are always in one of the following states: unversioned (the default), versioning-enabled, or versioning-suspended. +- Versioning state applies to all of the objects in the versioning enabled bucket. The first time you enable a bucket for versioning, objects in the bucket are thereafter always versioned and given a unique version ID. +- Existing or newer buckets can be created with versioning enabled and eventually can be suspended as well. Existing versions of objects stay as is and can still be accessed using the version ID. +- All versions, including delete-markers should be deleted before deleting a bucket. diff --git a/docs/bucket/versioning/versioning_DELETE_versionEnabled.png b/docs/bucket/versioning/versioning_DELETE_versionEnabled.png new file mode 100644 index 000000000..6394dc8ac Binary files /dev/null and b/docs/bucket/versioning/versioning_DELETE_versionEnabled.png differ diff --git a/docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png b/docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png new file mode 100644 index 000000000..c3beeffd9 Binary files /dev/null and b/docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png differ diff --git a/docs/bucket/versioning/versioning_GET_versionEnabled.png b/docs/bucket/versioning/versioning_GET_versionEnabled.png new file mode 100644 index 000000000..b7f2b3077 Binary files /dev/null and b/docs/bucket/versioning/versioning_GET_versionEnabled.png differ diff --git a/docs/bucket/versioning/versioning_GET_versionEnabled_id.png b/docs/bucket/versioning/versioning_GET_versionEnabled_id.png new file mode 100644 index 000000000..570ba4718 Binary files /dev/null and b/docs/bucket/versioning/versioning_GET_versionEnabled_id.png differ diff --git a/docs/bucket/versioning/versioning_PUT_versionEnabled.png b/docs/bucket/versioning/versioning_PUT_versionEnabled.png new file mode 100644 index 000000000..388ab4924 Binary files /dev/null and b/docs/bucket/versioning/versioning_PUT_versionEnabled.png differ diff --git a/docs/bucket/versioning/xl-meta-to-json.go b/docs/bucket/versioning/xl-meta-to-json.go new file mode 100644 index 000000000..4ce63a57a --- /dev/null +++ b/docs/bucket/versioning/xl-meta-to-json.go @@ -0,0 +1,36 @@ +package main + +import ( + "io" + "os" + + "github.com/minio/cli" + "github.com/tinylib/msgp/msgp" +) + +func main() { + app := cli.NewApp() + app.Copyright = "MinIO, Inc." + app.Usage = "xl.meta to JSON" + app.Version = "0.0.1" + app.HideHelpCommand = true + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Usage: "path to xl.meta file", + Name: "f, file", + }, + } + + app.Action = func(c *cli.Context) error { + r, err := os.Open(c.String("file")) + if err != nil { + return err + } + r.Seek(8, io.SeekStart) + defer r.Close() + _, err = msgp.CopyToJSON(os.Stdout, r) + return err + } + app.Run(os.Args) +} diff --git a/docs/minio-limits.md b/docs/minio-limits.md index 41190fe56..82a1734e4 100644 --- a/docs/minio-limits.md +++ b/docs/minio-limits.md @@ -39,7 +39,6 @@ We found the following APIs to be redundant or less useful outside of AWS S3. If - BucketACL (Use [bucket policies](https://docs.min.io/docs/minio-client-complete-guide#policy) instead) - BucketCORS (CORS enabled by default on all buckets for all HTTP verbs) -- BucketReplication (Use [`mc mirror`](https://docs.min.io/docs/minio-client-complete-guide#mirror) instead) - BucketWebsite (Use [`caddy`](https://github.com/mholt/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/)) - BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](https://docs.min.io/docs/minio-client-complete-guide#events) APIs) - BucketRequestPayment diff --git a/docs/zh_CN/backend/README.md b/docs/zh_CN/backend/README.md deleted file mode 100644 index c4fd882f8..000000000 --- a/docs/zh_CN/backend/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## 后端 - -Minio目前支持两种类型的后端。 - -| MinIO | FS | Erasure | Stability | -|:-----------:|:----:|:----:|:---:| -| Standalone | x | x | Stable | -| Distributed | x | x | Stable | diff --git a/docs/zh_CN/backend/fs/README.md b/docs/zh_CN/backend/fs/README.md deleted file mode 100644 index b2a753517..000000000 --- a/docs/zh_CN/backend/fs/README.md +++ /dev/null @@ -1,24 +0,0 @@ -### Backend format `fs.json` - -```go -// ObjectPartInfo Info of each part kept in the multipart metadata -// file after CompleteMultipartUpload() is called. -type ObjectPartInfo struct { - Number int `json:"number"` - Name string `json:"name"` - ETag string `json:"etag"` - Size int64 `json:"size"` -} - -// A fsMetaV1 represents a metadata header mapping keys to sets of values. -type fsMetaV1 struct { - Version string `json:"version"` - Format string `json:"format"` - MinIO struct { - Release string `json:"release"` - } `json:"minio"` - // Metadata map for current object `fs.json`. - Meta map[string]string `json:"meta,omitempty"` - Parts []ObjectPartInfo `json:"parts,omitempty"` -} -``` diff --git a/docs/zh_CN/backend/xl/README.md b/docs/zh_CN/backend/xl/README.md deleted file mode 100644 index 095ca6cca..000000000 --- a/docs/zh_CN/backend/xl/README.md +++ /dev/null @@ -1,54 +0,0 @@ -### Backend format `xl.json` - -```go -// ObjectPartInfo Info of each part kept in the multipart metadata -// file after CompleteMultipartUpload() is called. -type ObjectPartInfo struct { - Number int `json:"number"` - Name string `json:"name"` - ETag string `json:"etag"` - Size int64 `json:"size"` -} - -// checkSumInfo - carries checksums of individual scattered parts per disk. -type checkSumInfo struct { - Name string `json:"name"` - Algorithm string `json:"algorithm"` - Hash string `json:"hash"` -} - -// erasureInfo - carries erasure coding related information, block -// distribution and checksums. -type erasureInfo struct { - Algorithm string `json:"algorithm"` - DataBlocks int `json:"data"` - ParityBlocks int `json:"parity"` - BlockSize int64 `json:"blockSize"` - Index int `json:"index"` - Distribution []int `json:"distribution"` - Checksum []checkSumInfo `json:"checksum,omitempty"` -} - -// statInfo - carries stat information of the object. -type statInfo struct { - Size int64 `json:"size"` // Size of the object `xl.json`. - ModTime time.Time `json:"modTime"` // ModTime of the object `xl.json`. -} - -// A xlMetaV1 represents `xl.json` metadata header. -type xlMetaV1 struct { - Version string `json:"version"` // Version of the current `xl.json`. - Format string `json:"format"` // Format of the current `xl.json`. - Stat statInfo `json:"stat"` // Stat of the current object `xl.json`. - // Erasure coded info for the current object `xl.json`. - Erasure erasureInfo `json:"erasure"` - // MinIO release tag for current object `xl.json`. - MinIO struct { - Release string `json:"release"` - } `json:"minio"` - // Metadata map for current object `xl.json`. - Meta map[string]string `json:"meta,omitempty"` - // Captures all the individual object `xl.json`. - Parts []ObjectPartInfo `json:"parts,omitempty"` -} -``` diff --git a/go.mod b/go.mod index 8ea64a9c1..beebe94d2 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/dchest/siphash v1.2.1 github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/djherbis/atime v1.0.0 github.com/dustin/go-humanize v1.0.0 @@ -102,7 +103,6 @@ require ( github.com/soheilhy/cmux v0.1.4 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 - github.com/stretchr/testify v1.5.1 // indirect github.com/tinylib/msgp v1.1.1 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect github.com/ugorji/go v1.1.5-pre // indirect diff --git a/go.sum b/go.sum index f55cf5583..ee2523c10 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4= +github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= diff --git a/mint/preinstall.sh b/mint/preinstall.sh index 44d7a0b8f..52c56c218 100755 --- a/mint/preinstall.sh +++ b/mint/preinstall.sh @@ -38,7 +38,7 @@ $APT update $APT install gnupg ca-certificates # download and install golang -GO_VERSION="1.13.10" +GO_VERSION="1.14.3" GO_INSTALL_PATH="/usr/local" download_url="https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" if ! $WGET --output-document=- "$download_url" | tar -C "${GO_INSTALL_PATH}" -zxf -; then diff --git a/mint/run/core/awscli/test.sh b/mint/run/core/awscli/test.sh index 096f4e5d9..78e623365 100755 --- a/mint/run/core/awscli/test.sh +++ b/mint/run/core/awscli/test.sh @@ -1518,177 +1518,177 @@ function test_serverside_encryption_error() { return $rv } -# WORM bucket tests. -function test_worm_bucket() { - # log start time - start_time=$(get_time) +# # WORM bucket tests. +# function test_worm_bucket() { +# # log start time +# start_time=$(get_time) - # Make bucket - bucket_name="awscli-mint-test-bucket-$RANDOM" - function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" +# # Make bucket +# bucket_name="awscli-mint-test-bucket-$RANDOM" +# function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" - # execute the test - out=$($function 2>&1) - rv=$? +# # execute the test +# out=$($function 2>&1) +# rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi - # if make bucket succeeds set object lock configuration - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object-lock-configuration --bucket ${bucket_name} --object-lock-configuration ObjectLockEnabled=Enabled" - out=$($function 2>&1) - rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if make bucket succeeds set object lock configuration +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object-lock-configuration --bucket ${bucket_name} --object-lock-configuration ObjectLockEnabled=Enabled" +# out=$($function 2>&1) +# rv=$? +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # if setting object lock configuration succeeds, upload a file first time - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" - out=$($function 2>&1) - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if setting object lock configuration succeeds, upload a file first time +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" +# out=$($function 2>&1) +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # second time upload of same file should fail due to WORM setting - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" - out=$($function 2>&1) - rv=$? - else - out="First time object upload failed" - fi +# # second time upload will succeed and there shall be now two versions of the object +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" +# out=$($function 2>&1) +# rv=$? +# else +# out="First time object upload failed" +# fi - if [ $rv -eq 0 ]; then - log_success "$(get_duration "$start_time")" "${test_function}" - else - # cleanup is not possible due to one day validity of object lock configurataion - log_failure "$(get_duration "$start_time")" "${function}" "${out}" - fi +# if [ $rv -eq 0 ]; then +# log_success "$(get_duration "$start_time")" "${test_function}" +# else +# # cleanup is not possible due to one day validity of object lock configurataion +# log_failure "$(get_duration "$start_time")" "${function}" "${out}" +# fi - return $rv -} +# return $rv +# } -# Tests creating and deleting an object with legal hold. -function test_legal_hold() { - # log start time - start_time=$(get_time) +# # Tests creating and deleting an object with legal hold. +# function test_legal_hold() { +# # log start time +# start_time=$(get_time) - # Make bucket - bucket_name="awscli-mint-test-bucket-$RANDOM" - function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" +# # Make bucket +# bucket_name="awscli-mint-test-bucket-$RANDOM" +# function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" - # execute the test - out=$($function 2>&1) - rv=$? +# # execute the test +# out=$($function 2>&1) +# rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi - # if make bucket succeeds upload a file - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --object-lock-legal-hold-status ON" - out=$($function 2>&1) - errcnt=$(echo "$out" | sed -n '/Bucket is missing ObjectLockConfiguration/p' | wc -l) - # skip test for gateways - if [ "$errcnt" -eq 1 ]; then - return 0 - fi - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if make bucket succeeds upload a file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --object-lock-legal-hold-status ON" +# out=$($function 2>&1) +# errcnt=$(echo "$out" | sed -n '/Bucket is missing ObjectLockConfiguration/p' | wc -l) +# # skip test for gateways +# if [ "$errcnt" -eq 1 ]; then +# return 0 +# fi +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # if upload succeeds stat the file - if [ $rv -eq 0 ]; then - function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - lhold=$(echo "$out" | jq -r .ObjectLockLegalHoldStatus) - rv=$? - fi +# # if upload succeeds stat the file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# lhold=$(echo "$out" | jq -r .ObjectLockLegalHoldStatus) +# rv=$? +# fi - # if head-object succeeds, verify metadata has legal hold status - if [ $rv -eq 0 ]; then - if [ "${lhold}" == "" ]; then - rv=1 - out="Legal hold was not applied" - fi - if [ "${lhold}" == "OFF" ]; then - rv=1 - out="Legal hold was not applied" - fi - fi - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB --legal-hold Status=OFF" - out=$($function 2>&1) - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi - # if upload succeeds download the file - if [ $rv -eq 0 ]; then - function="${AWS} s3api get-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - lhold=$(echo "$out" | jq -r .LegalHold.Status) - rv=$? - fi +# # if head-object succeeds, verify metadata has legal hold status +# if [ $rv -eq 0 ]; then +# if [ "${lhold}" == "" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# if [ "${lhold}" == "OFF" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# fi +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB --legal-hold Status=OFF" +# out=$($function 2>&1) +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi +# # if upload succeeds download the file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api get-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# lhold=$(echo "$out" | jq -r .LegalHold.Status) +# rv=$? +# fi - # if head-object succeeds, verify metadata has legal hold status - if [ $rv -eq 0 ]; then - if [ "${lhold}" == "" ]; then - rv=1 - out="Legal hold was not applied" - fi - if [ "${lhold}" == "ON" ]; then - rv=1 - out="Legal hold status not turned off" - fi - fi - # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it. - if [ $rv -eq 0 ]; then - function="${AWS} s3api delete-object --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - rv=$? - fi - if [ $rv -eq 0 ]; then - log_success "$(get_duration "$start_time")" "${test_function}" - else - # clean up and log error - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - log_failure "$(get_duration "$start_time")" "${function}" "${out}" - fi +# # if head-object succeeds, verify metadata has legal hold status +# if [ $rv -eq 0 ]; then +# if [ "${lhold}" == "" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# if [ "${lhold}" == "ON" ]; then +# rv=1 +# out="Legal hold status not turned off" +# fi +# fi +# # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it. +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api delete-object --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# rv=$? +# fi +# if [ $rv -eq 0 ]; then +# log_success "$(get_duration "$start_time")" "${test_function}" +# else +# # clean up and log error +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# log_failure "$(get_duration "$start_time")" "${function}" "${out}" +# fi - return $rv -} +# return $rv +# } # main handler for all the tests. main() { @@ -1716,9 +1716,9 @@ main() { # Error tests test_list_objects_error && \ test_put_object_error && \ - test_serverside_encryption_error && \ - test_worm_bucket && \ - test_legal_hold + test_serverside_encryption_error + # test_worm_bucket && \ + # test_legal_hold return $? } diff --git a/pkg/bucket/lifecycle/expiration.go b/pkg/bucket/lifecycle/expiration.go index 2748d7efd..f220e3fa8 100644 --- a/pkg/bucket/lifecycle/expiration.go +++ b/pkg/bucket/lifecycle/expiration.go @@ -22,10 +22,11 @@ import ( ) var ( - errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format") - errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration") - errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration") - errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT") + errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format") + errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration") + errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration") + errLifecycleInvalidDeleteMarker = Errorf("Delete marker cannot be specified with Days or Date in a Lifecycle Expiration Policy") + errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT") ) // ExpirationDays is a type alias to unmarshal Days in Expiration @@ -96,17 +97,49 @@ func (eDate *ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartEl return e.EncodeElement(eDate.Format(time.RFC3339), startElement) } +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker bool + // Expiration - expiration actions for a rule in lifecycle configuration. type Expiration struct { - XMLName xml.Name `xml:"Expiration"` - Days ExpirationDays `xml:"Days,omitempty"` - Date ExpirationDate `xml:"Date,omitempty"` + XMLName xml.Name `xml:"Expiration"` + Days ExpirationDays `xml:"Days,omitempty"` + Date ExpirationDate `xml:"Date,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` +} + +// UnmarshalXML parses delete marker and validates if it is set. +func (b *ExpireDeleteMarker) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { + if !*b { + return nil + } + var deleteMarker bool + err := d.DecodeElement(&deleteMarker, &startElement) + if err != nil { + return err + } + *b = ExpireDeleteMarker(deleteMarker) + return nil +} + +// MarshalXML encodes delete marker boolean into an XML form. +func (b *ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !*b { + return nil + } + return e.EncodeElement(*b, startElement) } // Validate - validates the "Expiration" element func (e Expiration) Validate() error { + // DeleteMarker cannot be specified if date or dates are specified. + if (!e.IsDateNull() || !e.IsDateNull()) && bool(e.DeleteMarker) { + return errLifecycleInvalidDeleteMarker + } + // Neither expiration days or date is specified - if e.IsDaysNull() && e.IsDateNull() { + // if delete marker is false one of them should be specified + if !bool(e.DeleteMarker) && e.IsDaysNull() && e.IsDateNull() { return errLifecycleInvalidExpiration } @@ -114,6 +147,7 @@ func (e Expiration) Validate() error { if !e.IsDaysNull() && !e.IsDateNull() { return errLifecycleInvalidExpiration } + return nil } diff --git a/pkg/bucket/lifecycle/lifecycle.go b/pkg/bucket/lifecycle/lifecycle.go index fd401d3c1..5e62c2752 100644 --- a/pkg/bucket/lifecycle/lifecycle.go +++ b/pkg/bucket/lifecycle/lifecycle.go @@ -132,8 +132,8 @@ func (lc Lifecycle) Validate() error { // FilterActionableRules returns the rules actions that need to be executed // after evaluating prefix/tag filtering -func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule { - if objName == "" { +func (lc Lifecycle) FilterActionableRules(obj ObjectOpts) []Rule { + if obj.Name == "" { return nil } var rules []Rule @@ -141,30 +141,86 @@ func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule { if rule.Status == Disabled { continue } - if !strings.HasPrefix(objName, rule.Prefix()) { + if !strings.HasPrefix(obj.Name, rule.Prefix()) { continue } - tags := strings.Split(objTags, "&") - if rule.Filter.TestTags(tags) { + // Indicates whether MinIO will remove a delete marker with no + // noncurrent versions. If set to true, the delete marker will + // be expired; if set to false the policy takes no action. This + // cannot be specified with Days or Date in a Lifecycle + // Expiration Policy. + if rule.Expiration.DeleteMarker { + rules = append(rules, rule) + continue + } + // The NoncurrentVersionExpiration action requests MinIO to expire + // noncurrent versions of objects 100 days after the objects become + // noncurrent. + if !rule.NoncurrentVersionExpiration.IsDaysNull() { + rules = append(rules, rule) + continue + } + if rule.Filter.TestTags(strings.Split(obj.UserTags, "&")) { rules = append(rules, rule) } } return rules } +// ObjectOpts provides information to deduce the lifecycle actions +// which can be triggered on the resultant object. +type ObjectOpts struct { + Name string + UserTags string + ModTime time.Time + VersionID string + IsLatest bool + DeleteMarker bool +} + // ComputeAction returns the action to perform by evaluating all lifecycle rules // against the object name and its modification time. -func (lc Lifecycle) ComputeAction(objName, objTags string, modTime time.Time) (action Action) { - action = NoneAction - if modTime.IsZero() { - return +func (lc Lifecycle) ComputeAction(obj ObjectOpts) Action { + var action = NoneAction + if obj.ModTime.IsZero() { + return action } - _, expiryTime := lc.PredictExpiryTime(objName, modTime, objTags) - if !expiryTime.IsZero() && time.Now().After(expiryTime) { - return DeleteAction + for _, rule := range lc.FilterActionableRules(obj) { + if obj.DeleteMarker && obj.IsLatest && bool(rule.Expiration.DeleteMarker) { + // Indicates whether MinIO will remove a delete marker with no noncurrent versions. + // Only latest marker is removed. If set to true, the delete marker will be expired; + // if set to false the policy takes no action. This cannot be specified with Days or + // Date in a Lifecycle Expiration Policy. + return DeleteAction + } + + if !rule.NoncurrentVersionExpiration.IsDaysNull() { + if obj.VersionID != "" && !obj.IsLatest { + // Non current versions should be deleted. + if time.Now().After(expectedExpiryTime(obj.ModTime, rule.NoncurrentVersionExpiration.NoncurrentDays)) { + return DeleteAction + } + return NoneAction + } + return NoneAction + } + + // All other expiration only applies to latest versions. + if obj.IsLatest { + switch { + case !rule.Expiration.IsDateNull(): + if time.Now().UTC().After(rule.Expiration.Date.Time) { + action = DeleteAction + } + case !rule.Expiration.IsDaysNull(): + if time.Now().UTC().After(expectedExpiryTime(obj.ModTime, rule.Expiration.Days)) { + action = DeleteAction + } + } + } } - return + return action } // expectedExpiryTime calculates the expiry date/time based on a object modtime. @@ -179,13 +235,22 @@ func expectedExpiryTime(modTime time.Time, days ExpirationDays) time.Time { // PredictExpiryTime returns the expiry date/time of a given object // after evaluting the current lifecycle document. -func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags string) (string, time.Time) { +func (lc Lifecycle) PredictExpiryTime(obj ObjectOpts) (string, time.Time) { + if obj.DeleteMarker { + // We don't need to send any x-amz-expiration for delete marker. + return "", time.Time{} + } + var finalExpiryDate time.Time var finalExpiryRuleID string // Iterate over all actionable rules and find the earliest // expiration date and its associated rule ID. - for _, rule := range lc.FilterActionableRules(objName, objTags) { + for _, rule := range lc.FilterActionableRules(obj) { + if !rule.NoncurrentVersionExpiration.IsDaysNull() && !obj.IsLatest && obj.VersionID != "" { + return rule.ID, expectedExpiryTime(time.Now(), ExpirationDays(rule.NoncurrentVersionExpiration.NoncurrentDays)) + } + if !rule.Expiration.IsDateNull() { if finalExpiryDate.IsZero() || finalExpiryDate.After(rule.Expiration.Date.Time) { finalExpiryRuleID = rule.ID @@ -193,7 +258,7 @@ func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags } } if !rule.Expiration.IsDaysNull() { - expectedExpiry := expectedExpiryTime(modTime, rule.Expiration.Days) + expectedExpiry := expectedExpiryTime(obj.ModTime, rule.Expiration.Days) if finalExpiryDate.IsZero() || finalExpiryDate.After(expectedExpiry) { finalExpiryRuleID = rule.ID finalExpiryDate = expectedExpiry diff --git a/pkg/bucket/lifecycle/lifecycle_test.go b/pkg/bucket/lifecycle/lifecycle_test.go index 35b243841..2c27b897c 100644 --- a/pkg/bucket/lifecycle/lifecycle_test.go +++ b/pkg/bucket/lifecycle/lifecycle_test.go @@ -263,21 +263,21 @@ func TestComputeActions(t *testing.T) { }, // Too early to remove (test Date) { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago expectedAction: NoneAction, }, // Should remove (test Days) { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago expectedAction: DeleteAction, }, // Should remove (Tags match) { - inputConfig: `foodir/tag1value1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/tag1value1Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectTags: "tag1=value1&tag2=value2", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago @@ -310,7 +310,7 @@ func TestComputeActions(t *testing.T) { // Should not remove (Tags don't match) { - inputConfig: `foodir/tagvalue1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/tagvalue1Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectTags: "tag1=value1", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago @@ -333,14 +333,20 @@ func TestComputeActions(t *testing.T) { }, } - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { + for _, tc := range testCases { + tc := tc + t.Run("", func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { - t.Fatalf("%d: Got unexpected error: %v", i+1, err) + t.Fatalf("Got unexpected error: %v", err) } - if resultAction := lc.ComputeAction(tc.objectName, tc.objectTags, tc.objectModTime); resultAction != tc.expectedAction { - t.Fatalf("%d: Expected action: `%v`, got: `%v`", i+1, tc.expectedAction, resultAction) + if resultAction := lc.ComputeAction(ObjectOpts{ + Name: tc.objectName, + UserTags: tc.objectTags, + ModTime: tc.objectModTime, + IsLatest: true, + }); resultAction != tc.expectedAction { + t.Fatalf("Expected action: `%v`, got: `%v`", tc.expectedAction, resultAction) } }) diff --git a/pkg/bucket/lifecycle/noncurrentversion.go b/pkg/bucket/lifecycle/noncurrentversion.go index d879c12c4..a1bfa28df 100644 --- a/pkg/bucket/lifecycle/noncurrentversion.go +++ b/pkg/bucket/lifecycle/noncurrentversion.go @@ -22,26 +22,31 @@ import ( // NoncurrentVersionExpiration - an action for lifecycle configuration rule. type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration"` - NoncurrentDays int `xml:"NoncurrentDays,omitempty"` + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` } // NoncurrentVersionTransition - an action for lifecycle configuration rule. type NoncurrentVersionTransition struct { - NoncurrentDays int `xml:"NoncurrentDays"` - StorageClass string `xml:"StorageClass"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays"` + StorageClass string `xml:"StorageClass"` } var ( - errNoncurrentVersionExpirationUnsupported = Errorf("Specifying is not supported") errNoncurrentVersionTransitionUnsupported = Errorf("Specifying is not supported") ) -// UnmarshalXML is extended to indicate lack of support for -// NoncurrentVersionExpiration xml tag in object lifecycle -// configuration -func (n NoncurrentVersionExpiration) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - return errNoncurrentVersionExpirationUnsupported +// MarshalXML if non-current days not set returns empty tags +func (n *NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.NoncurrentDays == ExpirationDays(0) { + return nil + } + return e.EncodeElement(&n, start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) } // UnmarshalXML is extended to indicate lack of support for @@ -54,11 +59,8 @@ func (n NoncurrentVersionTransition) UnmarshalXML(d *xml.Decoder, startElement x // MarshalXML is extended to leave out // tags func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil + if n.NoncurrentDays == ExpirationDays(0) { + return nil + } + return e.EncodeElement(&n, start) } diff --git a/pkg/bucket/lifecycle/rule_test.go b/pkg/bucket/lifecycle/rule_test.go index 5a2da208d..7534fba17 100644 --- a/pkg/bucket/lifecycle/rule_test.go +++ b/pkg/bucket/lifecycle/rule_test.go @@ -25,8 +25,7 @@ import ( // TestUnsupportedRules checks if Rule xml with unsuported tags return // appropriate errors on parsing func TestUnsupportedRules(t *testing.T) { - // NoncurrentVersionTransition, NoncurrentVersionExpiration - // and Transition tags aren't supported + // NoncurrentVersionTransition, and Transition tags aren't supported unsupportedTestCases := []struct { inputXML string expectedErr error @@ -37,13 +36,6 @@ func TestUnsupportedRules(t *testing.T) { `, expectedErr: errNoncurrentVersionTransitionUnsupported, }, - { // Rule with unsupported NoncurrentVersionExpiration - - inputXML: ` - - `, - expectedErr: errNoncurrentVersionExpirationUnsupported, - }, { // Rule with unsupported Transition action inputXML: ` diff --git a/pkg/bucket/policy/action.go b/pkg/bucket/policy/action.go index 0e0a3a55e..43bebf18a 100644 --- a/pkg/bucket/policy/action.go +++ b/pkg/bucket/policy/action.go @@ -125,23 +125,48 @@ const ( PutBucketEncryptionAction = "s3:PutEncryptionConfiguration" // GetBucketEncryptionAction - GetBucketEncryption REST API action GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" + + // PutBucketVersioningAction - PutBucketVersioning REST API action + PutBucketVersioningAction = "s3:PutBucketVersioning" + // GetBucketVersioningAction - GetBucketVersioning REST API action + GetBucketVersioningAction = "s3:GetBucketVersioning" + + // DeleteObjectVersionAction - DeleteObjectVersion Rest API action. + DeleteObjectVersionAction = "s3:DeleteObjectVersion" + + // DeleteObjectVersionTaggingAction - DeleteObjectVersionTagging Rest API action. + DeleteObjectVersionTaggingAction = "s3:DeleteObjectVersionTagging" + + // GetObjectVersionAction - GetObjectVersionAction Rest API action. + GetObjectVersionAction = "s3:GetObjectVersion" + + // GetObjectVersionTaggingAction - GetObjectVersionTagging Rest API action. + GetObjectVersionTaggingAction = "s3:GetObjectVersionTagging" + + // PutObjectVersionTaggingAction - PutObjectVersionTagging Rest API action. + PutObjectVersionTaggingAction = "s3:PutObjectVersionTagging" ) // List of all supported object actions. var supportedObjectActions = map[Action]struct{}{ - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, + AbortMultipartUploadAction: {}, + DeleteObjectAction: {}, + GetObjectAction: {}, + ListMultipartUploadPartsAction: {}, + PutObjectAction: {}, + BypassGovernanceRetentionAction: {}, + PutObjectRetentionAction: {}, + GetObjectRetentionAction: {}, + PutObjectLegalHoldAction: {}, + GetObjectLegalHoldAction: {}, + GetObjectTaggingAction: {}, + PutObjectTaggingAction: {}, + DeleteObjectTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, } // isObjectAction - returns whether action is object type or not. @@ -181,12 +206,19 @@ var supportedActions = map[Action]struct{}{ GetBucketObjectLockConfigurationAction: {}, PutBucketTaggingAction: {}, GetBucketTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, BypassGovernanceRetentionAction: {}, GetObjectTaggingAction: {}, PutObjectTaggingAction: {}, DeleteObjectTaggingAction: {}, PutBucketEncryptionAction: {}, GetBucketEncryptionAction: {}, + PutBucketVersioningAction: {}, + GetBucketVersioningAction: {}, } // IsValid - checks if action is valid or not. @@ -246,7 +278,6 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ append([]condition.Key{ condition.S3XAmzServerSideEncryption, condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, }, condition.CommonKeys...)...), HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), @@ -308,4 +339,22 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + + PutObjectVersionTaggingAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + GetObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), } diff --git a/pkg/bucket/policy/condition/key.go b/pkg/bucket/policy/condition/key.go index 08f490a9e..b6fc6f531 100644 --- a/pkg/bucket/policy/condition/key.go +++ b/pkg/bucket/policy/condition/key.go @@ -59,6 +59,10 @@ const ( // S3Delimiter - key representing delimiter query parameter of ListBucket API only. S3Delimiter Key = "s3:delimiter" + // S3VersionID - Enables you to limit the permission for the + // s3:PutObjectVersionTagging action to a specific object version. + S3VersionID Key = "s3:versionid" + // S3MaxKeys - key representing max-keys query parameter of ListBucket API only. S3MaxKeys Key = "s3:max-keys" diff --git a/pkg/bucket/versioning/error.go b/pkg/bucket/versioning/error.go new file mode 100644 index 000000000..e1f8dcc13 --- /dev/null +++ b/pkg/bucket/versioning/error.go @@ -0,0 +1,44 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package versioning + +import ( + "fmt" +) + +// Error is the generic type for any error happening during tag +// parsing. +type Error struct { + err error +} + +// Errorf - formats according to a format specifier and returns +// the string as a value that satisfies error of type tagging.Error +func Errorf(format string, a ...interface{}) error { + return Error{err: fmt.Errorf(format, a...)} +} + +// Unwrap the internal error. +func (e Error) Unwrap() error { return e.err } + +// Error 'error' compatible method. +func (e Error) Error() string { + if e.err == nil { + return "versioning: cause " + } + return e.err.Error() +} diff --git a/pkg/bucket/versioning/versioning.go b/pkg/bucket/versioning/versioning.go new file mode 100644 index 000000000..078525b4f --- /dev/null +++ b/pkg/bucket/versioning/versioning.go @@ -0,0 +1,79 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package versioning + +import ( + "encoding/xml" + "io" +) + +// State - enabled/disabled/suspended states +// for multifactor and status of versioning. +type State string + +// Various supported states +const ( + Enabled State = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended State = "Suspended" +) + +// Versioning - Configuration for bucket versioning. +type Versioning struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"VersioningConfiguration"` + // MFADelete State `xml:"MFADelete,omitempty"` // not supported yet. + Status State `xml:"Status,omitempty"` +} + +// Validate - validates the versioning configuration +func (v Versioning) Validate() error { + // Not supported yet + // switch v.MFADelete { + // case Enabled, Disabled: + // default: + // return Errorf("unsupported MFADelete state %s", v.MFADelete) + // } + switch v.Status { + case Enabled, Suspended: + default: + return Errorf("unsupported Versioning status %s", v.Status) + } + return nil +} + +// Enabled - returns true if versioning is enabled +func (v Versioning) Enabled() bool { + return v.Status == Enabled +} + +// Suspended - returns true if versioning is suspended +func (v Versioning) Suspended() bool { + return v.Status == Suspended +} + +// ParseConfig - parses data in given reader to VersioningConfiguration. +func ParseConfig(reader io.Reader) (*Versioning, error) { + var v Versioning + if err := xml.NewDecoder(reader).Decode(&v); err != nil { + return nil, err + } + if err := v.Validate(); err != nil { + return nil, err + } + return &v, nil +} diff --git a/pkg/event/name.go b/pkg/event/name.go index 03d000944..74ea8802f 100644 --- a/pkg/event/name.go +++ b/pkg/event/name.go @@ -41,6 +41,7 @@ const ( ObjectCreatedPutLegalHold ObjectRemovedAll ObjectRemovedDelete + ObjectRemovedDeleteMarkerCreated ) // Expand - returns expanded values of abbreviated event type. @@ -88,6 +89,8 @@ func (name Name) String() string { return "s3:ObjectRemoved:*" case ObjectRemovedDelete: return "s3:ObjectRemoved:Delete" + case ObjectRemovedDeleteMarkerCreated: + return "s3:ObjectRemoved:DeleteMarkerCreated" } return "" @@ -166,6 +169,8 @@ func ParseName(s string) (Name, error) { return ObjectRemovedAll, nil case "s3:ObjectRemoved:Delete": return ObjectRemovedDelete, nil + case "s3:ObjectRemoved:DeleteMarkerCreated": + return ObjectRemovedDeleteMarkerCreated, nil default: return 0, &ErrInvalidEventName{s} } diff --git a/pkg/iam/policy/action.go b/pkg/iam/policy/action.go index d5022b386..1c911e160 100644 --- a/pkg/iam/policy/action.go +++ b/pkg/iam/policy/action.go @@ -92,6 +92,21 @@ const ( // PutObjectAction - PutObject Rest API action. PutObjectAction = "s3:PutObject" + // DeleteObjectVersionAction - DeleteObjectVersion Rest API action. + DeleteObjectVersionAction = "s3:DeleteObjectVersion" + + // DeleteObjectVersionTaggingAction - DeleteObjectVersionTagging Rest API action. + DeleteObjectVersionTaggingAction = "s3:DeleteObjectVersionTagging" + + // GetObjectVersionAction - GetObjectVersionAction Rest API action. + GetObjectVersionAction = "s3:GetObjectVersion" + + // GetObjectVersionTaggingAction - GetObjectVersionTagging Rest API action. + GetObjectVersionTaggingAction = "s3:GetObjectVersionTagging" + + // PutObjectVersionTaggingAction - PutObjectVersionTagging Rest API action. + PutObjectVersionTaggingAction = "s3:PutObjectVersionTagging" + // BypassGovernanceRetentionAction - bypass governance retention for PutObjectRetention, PutObject and DeleteObject Rest API action. BypassGovernanceRetentionAction = "s3:BypassGovernanceRetention" @@ -134,6 +149,12 @@ const ( // GetBucketEncryptionAction - GetBucketEncryption REST API action GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" + // PutBucketVersioningAction - PutBucketVersioning REST API action + PutBucketVersioningAction = "s3:PutBucketVersioning" + + // GetBucketVersioningAction - GetBucketVersioning REST API action + GetBucketVersioningAction = "s3:GetBucketVersioning" + // AllActions - all API actions AllActions = "s3:*" ) @@ -170,30 +191,42 @@ var supportedActions = map[Action]struct{}{ PutBucketObjectLockConfigurationAction: {}, GetBucketTaggingAction: {}, PutBucketTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, GetObjectTaggingAction: {}, PutObjectTaggingAction: {}, DeleteObjectTaggingAction: {}, PutBucketEncryptionAction: {}, GetBucketEncryptionAction: {}, + PutBucketVersioningAction: {}, + GetBucketVersioningAction: {}, AllActions: {}, } // List of all supported object actions. var supportedObjectActions = map[Action]struct{}{ - AllActions: {}, - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, + AllActions: {}, + AbortMultipartUploadAction: {}, + DeleteObjectAction: {}, + GetObjectAction: {}, + ListMultipartUploadPartsAction: {}, + PutObjectAction: {}, + BypassGovernanceRetentionAction: {}, + PutObjectRetentionAction: {}, + GetObjectRetentionAction: {}, + PutObjectLegalHoldAction: {}, + GetObjectLegalHoldAction: {}, + GetObjectTaggingAction: {}, + PutObjectTaggingAction: {}, + DeleteObjectTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, } // isObjectAction - returns whether action is object type or not. @@ -235,7 +268,6 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ append([]condition.Key{ condition.S3XAmzServerSideEncryption, condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, }, condition.CommonKeys...)...), HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), @@ -303,4 +335,22 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + + PutObjectVersionTaggingAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + GetObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), } diff --git a/pkg/madmin/info-commands.go b/pkg/madmin/info-commands.go index b3a8e3a43..88a897471 100644 --- a/pkg/madmin/info-commands.go +++ b/pkg/madmin/info-commands.go @@ -254,8 +254,8 @@ type FSBackend struct { Type backendType `json:"backendType,omitempty"` } -// XLBackend contains specific erasure storage information -type XLBackend struct { +// ErasureBackend contains specific erasure storage information +type ErasureBackend struct { Type backendType `json:"backendType,omitempty"` OnlineDisks int `json:"onlineDisks,omitempty"` OfflineDisks int `json:"offlineDisks,omitempty"`