diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 936582732..7cf2bff4d 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -254,8 +254,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, cleanupBackend := func() { bkReader.Close() } cleanupPipe := func() { pipeReader.Close() } - gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe) - return gr, nil + return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe) } // Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index ad83e1999..080158f5d 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -499,7 +499,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, if hasSuffix(object, slashSeparator) { // The lock taken above is released when // objReader.Close() is called by the caller. - return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil + return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker) } // Take a rwPool lock for NFS gateway type deployment rwPoolUnlocker := func() {} @@ -516,7 +516,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) } } - objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, nsUnlocker, rwPoolUnlocker) + objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker, rwPoolUnlocker) if rErr != nil { return nil, rErr } @@ -544,7 +544,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, return nil, err } - return objReaderFn(reader, h, closeFn) + return objReaderFn(reader, h, opts.CheckCopyPrecondFn, closeFn) } // GetObject - reads an object from the disk. diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index 879b352af..fc7750ace 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -651,7 +651,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil + return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject - reads an object from azure. Supports additional @@ -829,6 +829,9 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r * // CopyObject - Copies a blob from source container to destination container. // Uses Azure equivalent CopyBlob API. func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { + if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { + return minio.ObjectInfo{}, minio.PreConditionFailed{} + } srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL() destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject) azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined) diff --git a/cmd/gateway/b2/gateway-b2.go b/cmd/gateway/b2/gateway-b2.go index a01e29196..03c547162 100644 --- a/cmd/gateway/b2/gateway-b2.go +++ b/cmd/gateway/b2/gateway-b2.go @@ -417,7 +417,7 @@ func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil + return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject reads an object from B2. Supports additional diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index 4bff7305b..f45964af9 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -753,7 +753,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil + return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject - reads an object from GCS. Supports additional @@ -929,7 +929,9 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r // CopyObject - Copies a blob from source container to destination container. func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { - + if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { + return minio.ObjectInfo{}, minio.PreConditionFailed{} + } src := l.client.Bucket(srcBucket).Object(srcObject) dst := l.client.Bucket(destBucket).Object(destObject) diff --git a/cmd/gateway/oss/gateway-oss.go b/cmd/gateway/oss/gateway-oss.go index cdb6c4d65..8271ce317 100644 --- a/cmd/gateway/oss/gateway-oss.go +++ b/cmd/gateway/oss/gateway-oss.go @@ -26,7 +26,7 @@ import ( "strings" "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/dustin/go-humanize" + humanize "github.com/dustin/go-humanize" "github.com/minio/cli" miniogopolicy "github.com/minio/minio-go/pkg/policy" @@ -570,7 +570,7 @@ func (l *ossObjects) GetObjectNInfo(ctx context.Context, bucket, object string, // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil + return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject reads an object on OSS. Supports additional @@ -665,6 +665,9 @@ func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, r *mi // CopyObject copies an object from source bucket to a destination bucket. func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { + if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { + return minio.ObjectInfo{}, minio.PreConditionFailed{} + } bkt, err := l.Client.Bucket(srcBucket) if err != nil { logger.LogIf(ctx, err) diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go index 3b51285c2..0d64701f1 100644 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ b/cmd/gateway/s3/gateway-s3-sse.go @@ -313,7 +313,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) } objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined) - fn, off, length, err := minio.NewGetObjectReader(rs, objInfo) + fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o.CheckCopyPrecondFn) if err != nil { return nil, minio.ErrorRespToObjectError(err) } @@ -329,7 +329,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return fn(pr, h, pipeCloser) + return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser) } // GetObjectInfo reads object info and replies back ObjectInfo diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go index 841571ef5..9ec42b3ad 100644 --- a/cmd/gateway/s3/gateway-s3.go +++ b/cmd/gateway/s3/gateway-s3.go @@ -400,7 +400,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r // Setup cleanup function to cause the above go-routine to // exit in case of partial read pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil + return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject reads an object from S3. Supports additional @@ -463,6 +463,9 @@ func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, // CopyObject copies an object from source bucket to a destination bucket. func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { + if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { + return minio.ObjectInfo{}, minio.PreConditionFailed{} + } // Set this header such that following CopyObject() always sets the right metadata on the destination. // metadata input is already a trickled down value from interpreting x-amz-metadata-directive at // handler layer. So what we have right now is supposed to be applied on the destination object anyways. @@ -533,6 +536,9 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str // existing object or a part of it. func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) { + if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { + return minio.PartInfo{}, minio.PreConditionFailed{} + } srcInfo.UserDefined = map[string]string{ "x-amz-copy-source-if-match": srcInfo.ETag, } diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index b0f7aca4d..8bf75f12f 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -383,3 +383,15 @@ func isErrObjectNotFound(err error) bool { _, ok := err.(ObjectNotFound) return ok } + +// PreConditionFailed - Check if copy precondition failed +type PreConditionFailed struct{} + +func (e PreConditionFailed) Error() string { + return "At least one of the pre-conditions you specified did not hold" +} + +func isErrPreconditionFailed(err error) bool { + _, ok := err.(PreConditionFailed) + return ok +} diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 933403dd7..134d8319b 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -26,10 +26,14 @@ import ( "github.com/minio/minio/pkg/policy" ) +// CheckCopyPreconditionFn returns true if copy precondition check failed. +type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool + // ObjectOptions represents object options for ObjectLayer operations type ObjectOptions struct { ServerSideEncryption encrypt.ServerSide UserDefined map[string]string + CheckCopyPrecondFn CheckCopyPreconditionFn } // LockType represents required locking for ObjectLayer operations diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 1bd9fc1b3..f2af7b2b4 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -429,30 +429,41 @@ type GetObjectReader struct { pReader io.Reader cleanUpFns []func() + precondFn func(ObjectInfo, string) bool once sync.Once } // NewGetObjectReaderFromReader sets up a GetObjectReader with a given // reader. This ignores any object properties. -func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, cleanupFns ...func()) *GetObjectReader { +func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (*GetObjectReader, error) { + if pcfn != nil { + if ok := pcfn(oi, ""); ok { + // Call the cleanup funcs + for i := len(cleanupFns) - 1; i >= 0; i-- { + cleanupFns[i]() + } + return nil, PreConditionFailed{} + } + } return &GetObjectReader{ ObjInfo: oi, pReader: r, cleanUpFns: cleanupFns, - } + precondFn: pcfn, + }, nil } // ObjReaderFn is a function type that takes a reader and returns // GetObjectReader and an error. Request headers are passed to provide // encryption parameters. cleanupFns allow cleanup funcs to be // registered for calling after usage of the reader. -type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error) +type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error) // NewGetObjectReader creates a new GetObjectReader. The cleanUpFns // are called on Close() in reverse order as passed here. NOTE: It is // assumed that clean up functions do not panic (otherwise, they may // not all run!). -func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) ( +func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanUpFns ...func()) ( fn ObjReaderFn, off, length int64, err error) { // Call the clean-up functions immediately in case of exit @@ -493,7 +504,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) // a reader that returns the desired range of // encrypted bytes. The header parameter is used to // provide encryption parameters. - fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) { + fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { copySource := h.Get(crypto.SSECopyAlgorithm) != "" cFns = append(cleanUpFns, cFns...) @@ -508,9 +519,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) } return nil, err } + encETag := oi.ETag + oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value. - // Decrypt the ETag before top layer consumes this value. - oi.ETag = getDecryptedETag(h, oi, copySource) + if pcfn != nil { + if ok := pcfn(oi, encETag); ok { + // Call the cleanup funcs + for i := len(cFns) - 1; i >= 0; i-- { + cFns[i]() + } + return nil, PreConditionFailed{} + } + } // Apply the skipLen and limit on the // decrypted stream @@ -521,6 +541,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) ObjInfo: oi, pReader: decReader, cleanUpFns: cFns, + precondFn: pcfn, } return r, nil } @@ -552,7 +573,17 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) return nil, 0, 0, errInvalidRange } } - fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) { + fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { + cFns = append(cleanUpFns, cFns...) + if pcfn != nil { + if ok := pcfn(oi, ""); ok { + // Call the cleanup funcs + for i := len(cFns) - 1; i >= 0; i-- { + cFns[i]() + } + return nil, PreConditionFailed{} + } + } // Decompression reader. snappyReader := snappy.NewReader(inputReader) // Apply the skipLen and limit on the @@ -564,7 +595,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) r = &GetObjectReader{ ObjInfo: oi, pReader: decReader, - cleanUpFns: append(cleanUpFns, cFns...), + cleanUpFns: cFns, + precondFn: pcfn, } return r, nil } @@ -574,11 +606,22 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) if err != nil { return nil, 0, 0, err } - fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) { + fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { + cFns = append(cleanUpFns, cFns...) + if pcfn != nil { + if ok := pcfn(oi, ""); ok { + // Call the cleanup funcs + for i := len(cFns) - 1; i >= 0; i-- { + cFns[i]() + } + return nil, PreConditionFailed{} + } + } r = &GetObjectReader{ ObjInfo: oi, pReader: inputReader, - cleanUpFns: append(cleanUpFns, cFns...), + cleanUpFns: cFns, + precondFn: pcfn, } return r, nil } diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 9e06f4edb..e514e337d 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/minio/minio/cmd/crypto" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/handlers" ) @@ -33,8 +34,8 @@ import ( // x-amz-copy-source-if-unmodified-since // x-amz-copy-source-if-match // x-amz-copy-source-if-none-match -func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool { - return checkCopyObjectPreconditions(ctx, w, r, objInfo) +func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool { + return checkCopyObjectPreconditions(ctx, w, r, objInfo, encETag) } // Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed. @@ -43,11 +44,14 @@ func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter // x-amz-copy-source-if-unmodified-since // x-amz-copy-source-if-match // x-amz-copy-source-if-none-match -func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool { +func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool { // Return false for methods other than GET and HEAD. if r.Method != "PUT" { return false } + if encETag == "" { + encETag = objInfo.ETag + } // If the object doesn't have a modtime (IsZero), or the modtime // is obviously garbage (Unix time == 0), then ignore modtimes // and don't process the If-Modified-Since header. @@ -95,11 +99,16 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r } } + ssec := crypto.SSECopy.IsRequested(r.Header) // x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the // same as the one specified; otherwise return a 412 (precondition failed). ifMatchETagHeader := r.Header.Get("x-amz-copy-source-if-match") if ifMatchETagHeader != "" { - if objInfo.ETag != "" && !isETagEqual(objInfo.ETag, ifMatchETagHeader) { + etag := objInfo.ETag + if ssec { + etag = encETag[len(encETag)-32:] + } + if objInfo.ETag != "" && !isETagEqual(etag, ifMatchETagHeader) { // If the object ETag does not match with the specified ETag. writeHeaders() writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r)) @@ -111,7 +120,11 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r // one specified otherwise, return a 304 (not modified). ifNoneMatchETagHeader := r.Header.Get("x-amz-copy-source-if-none-match") if ifNoneMatchETagHeader != "" { - if objInfo.ETag != "" && isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) { + etag := objInfo.ETag + if ssec { + etag = encETag[len(encETag)-32:] + } + if objInfo.ETag != "" && isETagEqual(etag, ifNoneMatchETagHeader) { // If the object ETag matches with the specified ETag. writeHeaders() writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r)) diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 2e2812ae9..f59515180 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -760,21 +760,23 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if !cpSrcDstSame { lock = readLock } - + checkCopyPrecondFn := func(o ObjectInfo, encETag string) bool { + return checkCopyObjectPreconditions(ctx, w, r, o, encETag) + } + getOpts.CheckCopyPrecondFn = checkCopyPrecondFn + srcOpts.CheckCopyPrecondFn = checkCopyPrecondFn var rs *HTTPRangeSpec gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts) if err != nil { + if isErrPreconditionFailed(err) { + return + } writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } defer gr.Close() srcInfo := gr.ObjInfo - // Verify before x-amz-copy-source preconditions before continuing with CopyObject. - if checkCopyObjectPreconditions(ctx, w, r, srcInfo) { - return - } - /// maximum Upload size for object in a single CopyObject operation. if isMaxObjectSize(srcInfo.Size) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) @@ -1573,9 +1575,17 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt } } + checkCopyPartPrecondFn := func(o ObjectInfo, encETag string) bool { + return checkCopyObjectPartPreconditions(ctx, w, r, o, encETag) + } + getOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn + srcOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts) if err != nil { + if isErrPreconditionFailed(err) { + return + } writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -1597,11 +1607,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return } - // Verify before x-amz-copy-source preconditions before continuing with CopyObject. - if checkCopyObjectPartPreconditions(ctx, w, r, srcInfo) { - return - } - // Get the object offset & length startOffset, length, err := rs.GetOffsetLength(actualPartSize) if err != nil { diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 0a4050312..5b6cf0253 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -182,7 +182,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r nsUnlocker() return nil, toObjectErr(err, bucket, object) } - return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil + return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker) } var objInfo ObjectInfo @@ -192,7 +192,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r return nil, toObjectErr(err, bucket, object) } - fn, off, length, nErr := NewGetObjectReader(rs, objInfo, nsUnlocker) + fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker) if nErr != nil { return nil, nErr } @@ -206,7 +206,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // case of incomplete read. pipeCloser := func() { pr.Close() } - return fn(pr, h, pipeCloser) + return fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser) } // GetObject - reads an object erasured coded across multiple