fix: return Range errors after If-Matches (#10045)

closes #7292
This commit is contained in:
Harshavardhana 2020-07-17 13:01:22 -07:00 committed by GitHub
parent d84fc58cac
commit 14b1c9f8e4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 208 additions and 183 deletions

View file

@ -916,7 +916,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
// case of incomplete read.
pipeCloser := func() { pr.Close() }
gr, gerr := fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser)
gr, gerr := fn(pr, h, opts.CheckPrecondFn, pipeCloser)
if gerr != nil {
return gr, numHits, gerr
}

View file

@ -613,7 +613,7 @@ func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool)
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
// Since server side copy with same source and dest just replaces the ETag, we save
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
//encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
// encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
return objInfo.ETag[len(objInfo.ETag)-32:]
}
@ -777,34 +777,6 @@ func (o *ObjectInfo) EncryptedSize() int64 {
return int64(size)
}
// DecryptCopyObjectInfo tries to decrypt the provided object if it is encrypted.
// It fails if the object is encrypted and the HTTP headers don't contain
// SSE-C headers or the object is not encrypted but SSE-C headers are provided. (AWS behavior)
// DecryptObjectInfo returns 'ErrNone' if the object is not encrypted or the
// decryption succeeded.
//
// DecryptCopyObjectInfo also returns whether the object is encrypted or not.
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIErrorCode, encrypted bool) {
// Directories are never encrypted.
if info.IsDir {
return ErrNone, false
}
if errCode, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
errCode = ErrInvalidEncryptionParameters
} else if encrypted {
if (!crypto.SSECopy.IsRequested(headers) && crypto.SSEC.IsEncrypted(info.UserDefined)) ||
(crypto.SSECopy.IsRequested(headers) && crypto.S3.IsEncrypted(info.UserDefined)) {
errCode = ErrSSEEncryptedObject
return
}
var err error
if info.Size, err = info.DecryptedSize(); err != nil {
errCode = toAPIErrorCode(GlobalContext, err)
}
}
return
}
// DecryptObjectInfo tries to decrypt the provided object if it is encrypted.
// It fails if the object is encrypted and the HTTP headers don't contain
// SSE-C headers or the object is not encrypted but SSE-C headers are provided. (AWS behavior)
@ -812,32 +784,46 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIEr
// decryption succeeded.
//
// DecryptObjectInfo also returns whether the object is encrypted or not.
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) {
func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err error) {
// Directories are never encrypted.
if info.IsDir {
return false, nil
}
// disallow X-Amz-Server-Side-Encryption header on HEAD and GET
if crypto.S3.IsRequested(headers) {
err = errInvalidEncryptionParameters
return
if r == nil {
return false, errInvalidArgument
}
if err, encrypted = nil, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSEC.IsRequested(headers) {
err = errInvalidEncryptionParameters
} else if encrypted {
headers := r.Header
// disallow X-Amz-Server-Side-Encryption header on HEAD and GET
switch r.Method {
case http.MethodGet, http.MethodHead:
if crypto.S3.IsRequested(headers) {
return false, errInvalidEncryptionParameters
}
}
encrypted = crypto.IsEncrypted(info.UserDefined)
if !encrypted && crypto.SSEC.IsRequested(headers) {
return false, errInvalidEncryptionParameters
}
if encrypted {
if (crypto.SSEC.IsEncrypted(info.UserDefined) && !crypto.SSEC.IsRequested(headers)) ||
(crypto.S3.IsEncrypted(info.UserDefined) && crypto.SSEC.IsRequested(headers)) {
err = errEncryptedObject
return
return encrypted, errEncryptedObject
}
if _, err = info.DecryptedSize(); err != nil {
return encrypted, err
}
_, err = info.DecryptedSize()
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
info.ETag = getDecryptedETag(headers, *info, false)
info.ETag = getDecryptedETag(headers, *info, headers.Get(crypto.SSECopyAlgorithm) != "")
}
}
return
return encrypted, nil
}
// The customer key in the header is used by the gateway for encryption in the case of

View file

@ -80,44 +80,49 @@ func TestEncryptRequest(t *testing.T) {
var decryptObjectInfoTests = []struct {
info ObjectInfo
headers http.Header
request *http.Request
expErr error
}{
{
info: ObjectInfo{Size: 100},
headers: http.Header{},
request: &http.Request{Header: http.Header{}},
expErr: nil,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
expErr: nil,
},
{
info: ObjectInfo{Size: 0, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
expErr: nil,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA=="}},
headers: http.Header{},
request: &http.Request{Header: http.Header{}},
expErr: errEncryptedObject,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
request: &http.Request{Method: http.MethodGet, Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
expErr: errInvalidEncryptionParameters,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
request: &http.Request{Method: http.MethodHead, Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
expErr: errInvalidEncryptionParameters,
},
{
info: ObjectInfo{Size: 31, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
expErr: errObjectTampered,
},
}
func TestDecryptObjectInfo(t *testing.T) {
for i, test := range decryptObjectInfoTests {
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
if encrypted, err := DecryptObjectInfo(&test.info, test.request); err != test.expErr {
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)

View file

@ -173,7 +173,7 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
// case of incomplete read.
pipeCloser := func() { pr.Close() }
return fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser)
return fn(pr, h, opts.CheckPrecondFn, pipeCloser)
}
// GetObject - reads an object erasured coded across multiple

View file

@ -752,7 +752,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
return nil, err
}
return objReaderFn(reader, h, opts.CheckCopyPrecondFn, closeFn)
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn)
}
// GetObject - reads an object from the disk.

View file

@ -908,7 +908,7 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
// CopyObject - Copies a blob from source container to destination container.
// Uses Azure equivalent `BlobURL.StartCopyFromURL`.
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
srcBlob := a.client.NewContainerURL(srcBucket).NewBlobURL(srcObject)

View file

@ -940,7 +940,7 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
// CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
src := l.client.Bucket(srcBucket).Object(srcObject)

View file

@ -330,7 +330,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser)
return fn(pr, h, o.CheckPrecondFn, pipeCloser)
}
// GetObjectInfo reads object info and replies back ObjectInfo

View file

@ -490,7 +490,7 @@ func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string,
// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}
// Set this header such that following CopyObject() always sets the right metadata on the destination.
@ -594,7 +594,7 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.PartInfo{}, minio.PreConditionFailed{}
}
srcInfo.UserDefined = map[string]string{

View file

@ -28,8 +28,8 @@ import (
"github.com/minio/minio/pkg/madmin"
)
// CheckCopyPreconditionFn returns true if copy precondition check failed.
type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool
// CheckPreconditionFn returns true if precondition check failed.
type CheckPreconditionFn func(o ObjectInfo) bool
// GetObjectInfoFn is the signature of GetObjectInfo function.
type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
@ -37,13 +37,13 @@ type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts Objec
// ObjectOptions represents object options for ObjectLayer object operations
type ObjectOptions struct {
ServerSideEncryption encrypt.ServerSide
Versioned bool // indicates if the bucket is versioned
WalkVersions bool // indicates if the we are interested in walking versions
VersionID string // Specifies the versionID which needs to be overwritten or read
MTime time.Time // Is only set in POST/PUT operations
UserDefined map[string]string // only set in case of POST/PUT operations
PartNumber int // only useful in case of GetObject/HeadObject
CheckCopyPrecondFn CheckCopyPreconditionFn // only set during CopyObject preconditional valuation
Versioned bool // indicates if the bucket is versioned
WalkVersions bool // indicates if the we are interested in walking versions
VersionID string // Specifies the versionID which needs to be overwritten or read
MTime time.Time // Is only set in POST/PUT operations
UserDefined map[string]string // only set in case of POST/PUT operations
PartNumber int // only useful in case of GetObject/HeadObject
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
}
// BucketOptions represents bucket options for ObjectLayer bucket operations

View file

@ -509,14 +509,12 @@ type GetObjectReader struct {
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
// reader. This ignores any object properties.
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
if opts.CheckCopyPrecondFn != nil {
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cleanupFns) - 1; i >= 0; i-- {
cleanupFns[i]()
}
return nil, PreConditionFailed{}
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cleanupFns) - 1; i >= 0; i-- {
cleanupFns[i]()
}
return nil, PreConditionFailed{}
}
return &GetObjectReader{
ObjInfo: oi,
@ -530,7 +528,7 @@ func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions
// GetObjectReader and an error. Request headers are passed to provide
// encryption parameters. cleanupFns allow cleanup funcs to be
// registered for calling after usage of the reader.
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
// are called on Close() in reverse order as passed here. NOTE: It is
@ -581,7 +579,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
// a reader that returns the desired range of
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
copySource := h.Get(crypto.SSECopyAlgorithm) != ""
cFns = append(cleanUpFns, cFns...)
@ -596,17 +594,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
}
return nil, err
}
encETag := oi.ETag
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
if opts.CheckCopyPrecondFn != nil {
if ok := opts.CheckCopyPrecondFn(oi, encETag); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
// Apply the skipLen and limit on the
@ -650,16 +645,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if opts.CheckCopyPrecondFn != nil {
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
// Decompression reader.
s2Reader := s2.NewReader(inputReader)
@ -700,16 +693,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
if err != nil {
return nil, 0, 0, err
}
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if opts.CheckCopyPrecondFn != nil {
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
r = &GetObjectReader{
ObjInfo: oi,

View file

@ -24,7 +24,6 @@ import (
"strconv"
"time"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/event"
@ -41,8 +40,8 @@ var (
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo, encETag)
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo)
}
// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
@ -51,14 +50,11 @@ func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter
// x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
// Return false for methods other than GET and HEAD.
if r.Method != http.MethodPut {
return false
}
if encETag == "" {
encETag = objInfo.ETag
}
// If the object doesn't have a modtime (IsZero), or the modtime
// is obviously garbage (Unix time == 0), then ignore modtimes
// and don't process the If-Modified-Since header.
@ -106,17 +102,11 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
}
}
shouldDecryptEtag := crypto.SSECopy.IsRequested(r.Header) && !crypto.IsMultiPart(objInfo.UserDefined)
// x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the
// same as the one specified; otherwise return a 412 (precondition failed).
ifMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfMatch)
if ifMatchETagHeader != "" {
etag := objInfo.ETag
if shouldDecryptEtag {
etag = encETag[len(encETag)-32:]
}
if objInfo.ETag != "" && !isETagEqual(etag, ifMatchETagHeader) {
if !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
// If the object ETag does not match with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
@ -128,11 +118,7 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
// one specified otherwise, return a 304 (not modified).
ifNoneMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfNoneMatch)
if ifNoneMatchETagHeader != "" {
etag := objInfo.ETag
if shouldDecryptEtag {
etag = encETag[len(encETag)-32:]
}
if objInfo.ETag != "" && isETagEqual(etag, ifNoneMatchETagHeader) {
if isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
// If the object ETag matches with the specified ETag.
writeHeaders()
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))

View file

@ -213,7 +213,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
@ -371,23 +371,43 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Get request range.
var rs *HTTPRangeSpec
var rangeErr error
rangeHeader := r.Header.Get(xhttp.Range)
if rangeHeader != "" {
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if err == errInvalidRange {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r))
return
}
rs, rangeErr = parseRequestRangeSpec(rangeHeader)
}
logger.LogIf(ctx, err, logger.Application)
// Validate pre-conditions if any.
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&oi, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return true
}
}
if checkPreconditions(ctx, w, r, oi, opts) {
return true
}
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if rangeErr == errInvalidRange {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r))
return true
}
if rangeErr != nil {
logger.LogIf(ctx, rangeErr, logger.Application)
}
return false
}
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
if globalBucketVersioningSys.Enabled(bucket) && gr != nil {
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
@ -409,18 +429,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// filter object lock metadata if permission does not permit
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
// Validate pre-conditions if any.
if checkPreconditions(ctx, w, r, objInfo, opts) {
return
}
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
if crypto.IsEncrypted(objInfo.UserDefined) {
@ -455,7 +463,8 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Write object content to response body
if _, err = io.Copy(httpWriter, gr); err != nil {
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
if !httpWriter.HasWritten() && !statusCodeWritten {
// write error response only if no data or headers has been written to client yet
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
}
return
@ -551,23 +560,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return
}
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
if rangeHeader != "" {
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if err == errInvalidRange {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
return
}
logger.LogIf(ctx, err)
}
}
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
if err != nil {
if globalBucketVersioningSys.Enabled(bucket) {
@ -590,14 +582,40 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
}
// Validate pre-conditions if any.
if checkPreconditions(ctx, w, r, objInfo, opts) {
return
}
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
if rangeHeader != "" {
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if err == errInvalidRange {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
return
}
logger.LogIf(ctx, err)
}
}
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
if crypto.IsEncrypted(objInfo.UserDefined) {
switch {
case crypto.S3.IsEncrypted(objInfo.UserDefined):
@ -614,11 +632,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
}
}
// Validate pre-conditions if any.
if checkPreconditions(ctx, w, r, objInfo, opts) {
return
}
// Set standard object headers.
if err = setObjectHeaders(w, objInfo, rs); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
@ -898,16 +911,25 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
var lock = noLock
if !cpSrcDstSame {
lock = readLock
}
checkCopyPrecondFn := func(o ObjectInfo, encETag string) bool {
return checkCopyObjectPreconditions(ctx, w, r, o, encETag)
checkCopyPrecondFn := func(o ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&o, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return true
}
}
return checkCopyObjectPreconditions(ctx, w, r, o)
}
getOpts.CheckCopyPrecondFn = checkCopyPrecondFn
srcOpts.CheckCopyPrecondFn = checkCopyPrecondFn
getOpts.CheckPrecondFn = checkCopyPrecondFn
var rs *HTTPRangeSpec
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
if err != nil {
@ -1790,22 +1812,31 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange)
if rangeHeader != "" {
var parseRangeErr error
if rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader); parseRangeErr != nil {
logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
var parseRangeErr error
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
}
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&o, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return true
}
}
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
return true
}
if parseRangeErr != nil {
logger.LogIf(ctx, parseRangeErr)
writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r))
return
// Range header mismatch is pre-condition like failure
// so return true to indicate Range precondition failed.
return true
}
return false
}
checkCopyPartPrecondFn := func(o ObjectInfo, encETag string) bool {
return checkCopyObjectPartPreconditions(ctx, w, r, o, encETag)
}
getOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn
srcOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
if err != nil {

View file

@ -1269,7 +1269,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeWebErrorResponse(w, err)
return
}

View file

@ -48,6 +48,19 @@ type FilterRule struct {
Value string `xml:"Value"`
}
func (filter FilterRule) isEmpty() bool {
return filter.Name == "" && filter.Value == ""
}
// MarshalXML implements a custom marshaller to support `omitempty` feature.
func (filter FilterRule) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if filter.isEmpty() {
return nil
}
type filterRuleWrapper FilterRule
return e.EncodeElement(filterRuleWrapper(filter), start)
}
// UnmarshalXML - decodes XML data.
func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
@ -102,6 +115,10 @@ func (ruleList *FilterRuleList) UnmarshalXML(d *xml.Decoder, start xml.StartElem
return nil
}
func (ruleList FilterRuleList) isEmpty() bool {
return len(ruleList.Rules) == 0
}
// Pattern - returns pattern using prefix and suffix values.
func (ruleList FilterRuleList) Pattern() string {
var prefix string
@ -124,6 +141,15 @@ type S3Key struct {
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
}
// MarshalXML implements a custom marshaller to support `omitempty` feature.
func (s3Key S3Key) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if s3Key.RuleList.isEmpty() {
return nil
}
type s3KeyWrapper S3Key
return e.EncodeElement(s3KeyWrapper(s3Key), start)
}
// common - represents common elements inside <QueueConfiguration>, <CloudFunctionConfiguration>
// and <TopicConfiguration>
type common struct {