diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 6ab738f91..c0879ef7c 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -20,9 +20,11 @@ package cmd import ( "context" "encoding/xml" + "errors" "fmt" "net/http" "net/url" + "strconv" "strings" "github.com/Azure/azure-storage-blob-go/azblob" @@ -2177,10 +2179,30 @@ func toAPIError(ctx context.Context, err error) APIError { } // Add more Gateway SDKs here if any in future. default: - apiErr = APIError{ - Code: apiErr.Code, - Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err), - HTTPStatusCode: apiErr.HTTPStatusCode, + if errors.Is(err, errMalformedEncoding) { + apiErr = APIError{ + Code: "BadRequest", + Description: err.Error(), + HTTPStatusCode: http.StatusBadRequest, + } + } else if errors.Is(err, errChunkTooBig) { + apiErr = APIError{ + Code: "BadRequest", + Description: err.Error(), + HTTPStatusCode: http.StatusBadRequest, + } + } else if errors.Is(err, strconv.ErrRange) { + apiErr = APIError{ + Code: "BadRequest", + Description: err.Error(), + HTTPStatusCode: http.StatusBadRequest, + } + } else { + apiErr = APIError{ + Code: apiErr.Code, + Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err), + HTTPStatusCode: apiErr.HTTPStatusCode, + } } } } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 9499d50a2..c5d67d42b 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -1113,7 +1113,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam dataLen: 1024, chunkSize: 1024, expectedContent: []byte{}, - expectedRespStatus: http.StatusInternalServerError, + expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, shouldPass: false, @@ -1174,7 +1174,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam dataLen: 1024, chunkSize: 1024, expectedContent: []byte{}, - expectedRespStatus: http.StatusInternalServerError, + expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, shouldPass: false, @@ -3381,7 +3381,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN noAPIErr := APIError{} missingDateHeaderErr := getAPIError(ErrMissingDateHeader) - internalErr := getAPIError(ErrInternalError) + internalErr := getAPIError(ErrBadRequest) testCases := []struct { fault Fault expectedErr APIError diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index c161d7c6c..c749bc1e0 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -145,9 +145,12 @@ const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4Ki // lineTooLong is generated as chunk header is bigger than 4KiB. var errLineTooLong = errors.New("header line too long") -// Malformed encoding is generated when chunk header is wrongly formed. +// malformed encoding is generated when chunk header is wrongly formed. var errMalformedEncoding = errors.New("malformed chunked encoding") +// chunk is considered too big if its bigger than > 16MiB. +var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") + // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. @@ -190,6 +193,22 @@ func (cr *s3ChunkedReader) Close() (err error) { return nil } +// Now, we read one chunk from the underlying reader. +// A chunk has the following format: +// + ";chunk-signature=" + + "\r\n" + + "\r\n" +// +// First, we read the chunk size but fail if it is larger +// than 16 MiB. We must not accept arbitrary large chunks. +// One 16 MiB is a reasonable max limit. +// +// Then we read the signature and payload data. We compute the SHA256 checksum +// of the payload and verify that it matches the expected signature value. +// +// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered +// a chunk with a chunk size = 0. However, this chunk still has a signature and we must +// verify it. +const maxChunkSize = 16 << 20 // 16 MiB + // Read - implements `io.Reader`, which transparently decodes // the incoming AWS Signature V4 streaming signature. func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { @@ -205,21 +224,6 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { buf = buf[n:] } - // Now, we read one chunk from the underlying reader. - // A chunk has the following format: - // + ";chunk-signature=" + + "\r\n" + + "\r\n" - // - // Frist, we read the chunk size but fail if it is larger - // than 1 MB. We must not accept arbitrary large chunks. - // One 1 MB is a reasonable max limit. - // - // Then we read the signature and payload data. We compute the SHA256 checksum - // of the payload and verify that it matches the expected signature value. - // - // The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered - // a chunk with a chunk size = 0. However, this chunk still has a signature and we must - // verify it. - const MaxSize = 1 << 20 // 1 MB var size int for { b, err := cr.reader.ReadByte() @@ -249,8 +253,8 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { cr.err = errMalformedEncoding return n, cr.err } - if size > MaxSize { - cr.err = errMalformedEncoding + if size > maxChunkSize { + cr.err = errChunkTooBig return n, cr.err } }