handler/PUT: Handle signature verification through a custom reader. (#2066)

Change brings in a new signVerifyReader which provides a io.Reader
compatible reader, additionally implements Verify() function.

Verify() function validates the signature present in the incoming
request. This approach is choosen to avoid complexities involved
in using io.Pipe().

Thanks to Krishna for his inputs on this.

Fixes #2058
Fixes #2054
Fixes #2087
This commit is contained in:
Harshavardhana 2016-07-05 01:04:50 -07:00 committed by Anand Babu (AB) Periasamy
parent 0540863663
commit 8a028a9efb
18 changed files with 380 additions and 518 deletions

View file

@ -105,12 +105,19 @@ const (
ErrBucketAlreadyOwnedByYou
// Add new error codes here.
// S3 extended errors.
ErrContentSHA256Mismatch
// Add new extended error codes here.
// Minio extended errors.
ErrReadQuorum
ErrWriteQuorum
ErrStorageFull
ErrObjectExistsAsDirectory
ErrPolicyNesting
// Add new extended error codes here.
// Please open a https://github.com/minio/minio/issues before adding
// new error codes here.
)
// error code to APIError structure, these fields carry respective
@ -401,6 +408,14 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Your previous request to create the named bucket succeeded and you already own it.",
HTTPStatusCode: http.StatusConflict,
},
/// S3 extensions.
ErrContentSHA256Mismatch: {
Code: "XAmzContentSHA256Mismatch",
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
/// Minio extensions.
ErrStorageFull: {
Code: "XMinioStorageFull",
@ -438,8 +453,15 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
return ErrNone
}
// Verify if the underlying error is signature mismatch.
if err == errSignatureMismatch {
return ErrSignatureDoesNotMatch
switch err {
case errSignatureMismatch:
apiErr = ErrSignatureDoesNotMatch
case errContentSHA256Mismatch:
apiErr = ErrContentSHA256Mismatch
}
if apiErr != ErrNone {
// If there was a match in the above switch case.
return apiErr
}
switch err.(type) {
case StorageFull:

View file

@ -27,10 +27,6 @@ import (
"strings"
)
// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
// client did not calculate sha256 of the payload.
const unsignedPayload = "UNSIGNED-PAYLOAD"
// Verify if the request http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD"
func isRequestUnsignedPayload(r *http.Request) bool {
return r.Header.Get("x-amz-content-sha256") == unsignedPayload
@ -136,7 +132,9 @@ func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
validateRegion := true // Validate region.
var sha256sum string
if skipSHA256Calculation(r) {
// Skips calculating sha256 on the payload on server,
// if client requested for it.
if skipContentSha256Cksum(r) {
sha256sum = unsignedPayload
} else {
sha256sum = hex.EncodeToString(sum256(payload))

View file

@ -62,18 +62,20 @@ func erasureCreateFile(disks []StorageAPI, volume string, path string, partName
if rErr != nil && rErr != io.ErrUnexpectedEOF {
return nil, 0, rErr
}
// Returns encoded blocks.
var enErr error
blocks, enErr = encodeData(buf[0:n], eInfo.DataBlocks, eInfo.ParityBlocks)
if enErr != nil {
return nil, 0, enErr
}
if n > 0 {
// Returns encoded blocks.
var enErr error
blocks, enErr = encodeData(buf[0:n], eInfo.DataBlocks, eInfo.ParityBlocks)
if enErr != nil {
return nil, 0, enErr
}
// Write to all disks.
if err = appendFile(disks, volume, path, blocks, eInfo.Distribution, hashWriters, writeQuorum); err != nil {
return nil, 0, err
// Write to all disks.
if err = appendFile(disks, volume, path, blocks, eInfo.Distribution, hashWriters, writeQuorum); err != nil {
return nil, 0, err
}
size += int64(n)
}
size += int64(n)
}
// Save the checksums.

View file

@ -294,10 +294,22 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Initialize md5 writer.
md5Writer := md5.New()
// Allocate 32KiB buffer for staging buffer.
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending more data than the set content size.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
// Allocate 128KiB buffer for staging buffer.
var buf = make([]byte, readSizeV1)
// Read till io.EOF.
for {
n, err := io.ReadFull(data, buf)
n, err := io.ReadFull(limitDataReader, buf)
if err == io.EOF {
break
}
@ -311,9 +323,22 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
}
// Validate if payload is valid.
if isSignVerify(data) {
if err := data.(*signVerifyReader).Verify(); err != nil {
// Incoming payload wrong, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
// Error return.
return "", toObjectErr(err, bucket, object)
}
}
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
// MD5 mismatch, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
// Returns md5 mismatch.
return "", BadDigest{md5Hex, newMD5Hex}
}
}

View file

@ -315,6 +315,16 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// Initialize md5 writer.
md5Writer := md5.New()
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending more data than the set content size.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
if size == 0 {
// For size 0 we write a 0byte file.
err := fs.storage.AppendFile(minioMetaBucket, tempObj, []byte(""))
@ -324,17 +334,17 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
} else {
// Allocate a buffer to Read() the object upload stream.
buf := make([]byte, readSizeV1)
// Read the buffer till io.EOF and append the read data to
// the temporary file.
// Read the buffer till io.EOF and append the read data to the temporary file.
for {
n, rErr := data.Read(buf)
n, rErr := limitDataReader.Read(buf)
if rErr != nil && rErr != io.EOF {
return "", toObjectErr(rErr, bucket, object)
}
if n > 0 {
// Update md5 writer.
md5Writer.Write(buf[:n])
wErr := fs.storage.AppendFile(minioMetaBucket, tempObj, buf[:n])
md5Writer.Write(buf[0:n])
wErr := fs.storage.AppendFile(minioMetaBucket, tempObj, buf[0:n])
if wErr != nil {
return "", toObjectErr(wErr, bucket, object)
}
@ -351,14 +361,27 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
if len(metadata) != 0 {
md5Hex = metadata["md5Sum"]
}
// Validate if payload is valid.
if isSignVerify(data) {
if vErr := data.(*signVerifyReader).Verify(); vErr != nil {
// Incoming payload wrong, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tempObj)
// Error return.
return "", toObjectErr(vErr, bucket, object)
}
}
if md5Hex != "" {
if newMD5Hex != md5Hex {
// MD5 mismatch, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tempObj)
// Returns md5 mismatch.
return "", BadDigest{md5Hex, newMD5Hex}
}
}
// Entire object was written to the temp location, now it's safe to rename it
// to the actual location.
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
err := fs.storage.RenameFile(minioMetaBucket, tempObj, bucket, object)
if err != nil {
return "", toObjectErr(err, bucket, object)

View file

@ -158,8 +158,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
}
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -270,7 +269,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t *testing
// Test case - 14.
// Input with size less than the size of actual data inside the reader.
{bucket, object, uploadID, 1, "abcd", "a35", int64(len("abcd") - 1), false, "",
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated e2fc714c4727ee9395f324cd2e7f331f")},
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated 900150983cd24fb0d6963f7d28e17f72")},
// Test case - 15-18.
// Validating for success cases.
{bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), true, "", nil},
@ -292,8 +291,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t *testing
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if testCase.expectedError.Error() != actualErr.Error() {
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
instanceType, testCase.expectedError.Error(), actualErr.Error())
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.expectedError.Error(), actualErr.Error())
}
}
// Test passes as expected, but the output values are verified for correctness here.
@ -415,8 +413,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t *testing.T
}
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1263,8 +1260,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
}
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1503,8 +1499,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t *testing.T) {
}
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1751,8 +1746,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t *
}
// Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts {
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize,
bytes.NewBufferString(part.inputReaderData), part.inputMd5)
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize, bytes.NewBufferString(part.inputReaderData), part.inputMd5)
if err != nil {
t.Fatalf("%s : %s", instanceType, err)
}

View file

@ -88,7 +88,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t *testing.T)
// Test case - 9.
// Input with size less than the size of actual data inside the reader.
{bucket, object, "abcd", map[string]string{"md5Sum": "a35"}, int64(len("abcd") - 1), false, "",
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated e2fc714c4727ee9395f324cd2e7f331f")},
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated 900150983cd24fb0d6963f7d28e17f72")},
// Test case - 10-13.
// Validating for success cases.
{bucket, object, "abcd", map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil},
@ -110,8 +110,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t *testing.T)
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if testCase.expectedError.Error() != actualErr.Error() {
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
instanceType, testCase.expectedError.Error(), actualErr.Error())
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.expectedError.Error(), actualErr.Error())
}
}
// Test passes as expected, but the output values are verified for correctness here.

View file

@ -17,10 +17,8 @@
package main
import (
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
@ -28,7 +26,6 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
mux "github.com/gorilla/mux"
@ -51,14 +48,6 @@ func setGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
}
}
// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
// client did not calculate sha256 of the payload. Hence we skip calculating sha256.
// We also skip calculating sha256 for presigned requests without "x-amz-content-sha256" header.
func skipSHA256Calculation(r *http.Request) bool {
shaHeader := r.Header.Get("X-Amz-Content-Sha256")
return isRequestUnsignedPayload(r) || (isRequestPresignedSignatureV4(r) && shaHeader == "")
}
// errAllowableNotFound - For an anon user, return 404 if have ListBucket, 403 otherwise
// this is in keeping with the permissions sections of the docs of both:
// HEAD Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
@ -604,73 +593,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
// Create anonymous object.
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, metadata)
case authTypePresigned, authTypeSigned:
validateRegion := true // Validate region.
if skipSHA256Calculation(r) {
// Either sha256-header is "UNSIGNED-PAYLOAD" or this is a presigned PUT
// request without sha256-header.
var s3Error APIErrorCode
if isRequestSignatureV4(r) {
s3Error = doesSignatureMatch(unsignedPayload, r, validateRegion)
} else if isRequestPresignedSignatureV4(r) {
s3Error = doesPresignedSignatureMatch(unsignedPayload, r, validateRegion)
}
if s3Error != ErrNone {
if s3Error == ErrSignatureDoesNotMatch {
err = errSignatureMismatch
} else {
err = fmt.Errorf("%v", getAPIError(s3Error))
}
} else {
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, metadata)
}
} else {
// Sha256 of payload has to be calculated and matched with what was sent in the header.
// Initialize a pipe for data pipe line.
reader, writer := io.Pipe()
var wg = &sync.WaitGroup{}
// Start writing in a routine.
wg.Add(1)
go func() {
defer wg.Done()
shaWriter := sha256.New()
multiWriter := io.MultiWriter(shaWriter, writer)
if _, wErr := io.CopyN(multiWriter, r.Body, size); wErr != nil {
// Pipe closed.
if wErr == io.ErrClosedPipe {
return
}
errorIf(wErr, "Unable to read from HTTP body.")
writer.CloseWithError(wErr)
return
}
shaPayload := shaWriter.Sum(nil)
var s3Error APIErrorCode
if isRequestSignatureV4(r) {
s3Error = doesSignatureMatch(hex.EncodeToString(shaPayload), r, validateRegion)
} else if isRequestPresignedSignatureV4(r) {
s3Error = doesPresignedSignatureMatch(hex.EncodeToString(shaPayload), r, validateRegion)
}
var sErr error
if s3Error != ErrNone {
if s3Error == ErrSignatureDoesNotMatch {
sErr = errSignatureMismatch
} else {
sErr = fmt.Errorf("%v", getAPIError(s3Error))
}
writer.CloseWithError(sErr)
return
}
writer.Close()
}()
// Create object.
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
// Close the pipe.
reader.Close()
// Wait for all the routines to finish.
wg.Wait()
}
// Initialize signature verifier.
reader := newSignVerify(r)
// Create object.
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
}
if err != nil {
errorIf(err, "Unable to create an object.")
@ -781,6 +707,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
}
var partMD5 string
incomingMD5 := hex.EncodeToString(md5Bytes)
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
@ -792,77 +719,12 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// No need to verify signature, anonymous request access is
// already allowed.
hexMD5 := hex.EncodeToString(md5Bytes)
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, hexMD5)
// No need to verify signature, anonymous request access is already allowed.
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5)
case authTypePresigned, authTypeSigned:
validateRegion := true // Validate region.
if skipSHA256Calculation(r) {
// Either sha256-header is "UNSIGNED-PAYLOAD" or this is a presigned
// request without sha256-header.
var s3Error APIErrorCode
if isRequestSignatureV4(r) {
s3Error = doesSignatureMatch(unsignedPayload, r, validateRegion)
} else if isRequestPresignedSignatureV4(r) {
s3Error = doesPresignedSignatureMatch(unsignedPayload, r, validateRegion)
}
if s3Error != ErrNone {
if s3Error == ErrSignatureDoesNotMatch {
err = errSignatureMismatch
} else {
err = fmt.Errorf("%v", getAPIError(s3Error))
}
} else {
md5SumHex := hex.EncodeToString(md5Bytes)
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, md5SumHex)
}
} else {
// Initialize a pipe for data pipe line.
reader, writer := io.Pipe()
var wg = &sync.WaitGroup{}
// Start writing in a routine.
wg.Add(1)
go func() {
defer wg.Done()
shaWriter := sha256.New()
multiWriter := io.MultiWriter(shaWriter, writer)
if _, wErr := io.CopyN(multiWriter, r.Body, size); wErr != nil {
// Pipe closed, just ignore it.
if wErr == io.ErrClosedPipe {
return
}
errorIf(wErr, "Unable to read from HTTP request body.")
writer.CloseWithError(wErr)
return
}
shaPayload := shaWriter.Sum(nil)
var s3Error APIErrorCode
if isRequestSignatureV4(r) {
s3Error = doesSignatureMatch(hex.EncodeToString(shaPayload), r, validateRegion)
} else if isRequestPresignedSignatureV4(r) {
s3Error = doesPresignedSignatureMatch(hex.EncodeToString(shaPayload), r, validateRegion)
}
if s3Error != ErrNone {
if s3Error == ErrSignatureDoesNotMatch {
err = errSignatureMismatch
} else {
err = fmt.Errorf("%v", getAPIError(s3Error))
}
writer.CloseWithError(err)
return
}
// Close the writer.
writer.Close()
}()
md5SumHex := hex.EncodeToString(md5Bytes)
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, md5SumHex)
// Close the pipe.
reader.Close()
// Wait for all the routines to finish.
wg.Wait()
}
// Initialize signature verifier.
reader := newSignVerify(r)
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5)
}
if err != nil {
errorIf(err, "Unable to create object part.")

View file

@ -20,6 +20,7 @@ package objcache
import (
"errors"
"fmt"
"io"
"sync"
"time"
@ -85,10 +86,21 @@ func (c *Cache) Size(key string) int64 {
}
// Create validates and returns an in memory writer referencing entry.
func (c *Cache) Create(key string, size int64) (io.Writer, error) {
func (c *Cache) Create(key string, size int64) (writer io.Writer, err error) {
c.mutex.Lock()
defer c.mutex.Unlock()
// Recovers any panic generated and return errors appropriately.
defer func() {
if r := recover(); r != nil {
var ok bool
err, ok = r.(error)
if !ok {
err = fmt.Errorf("objcache: %v", r)
}
}
}() // Do not crash the server.
valueLen := uint64(size)
if c.maxSize > 0 {
// Check if the size of the object is not bigger than the capacity of the cache.
@ -105,8 +117,8 @@ func (c *Cache) Create(key string, size int64) (io.Writer, error) {
return c.entries[key], nil
}
// Open - open the in-memory file, returns an memory reader.
// returns error ErrNotFoundInCache if fsPath does not exist.
// Open - open the in-memory file, returns an in memory read seeker.
// returns an error ErrNotFoundInCache, if the key does not exist.
func (c *Cache) Open(key string) (io.ReadSeeker, error) {
c.mutex.RLock()
defer c.mutex.RUnlock()

View file

@ -779,6 +779,66 @@ func (s *TestSuiteCommon) TestListBuckets(c *C) {
c.Assert(err, IsNil)
}
// This tests validate if PUT handler can successfully detect signature mismatch.
func (s *TestSuiteCommon) TestValidateSignature(c *C) {
// generate a random bucket name.
bucketName := getRandomBucketName()
// HTTP request to create the bucket.
request, err := newTestRequest("PUT", getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client := http.Client{}
// Execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
objName := "test-object"
// Body is on purpose set to nil so that we get payload generated for empty bytes.
// Create new HTTP request with incorrect secretKey to generate an incorrect signature.
secretKey := s.secretKey + "a"
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objName), 0, nil, s.accessKey, secretKey)
c.Assert(err, IsNil)
response, err = client.Do(request)
c.Assert(err, IsNil)
verifyError(c, response, "SignatureDoesNotMatch", "The request signature we calculated does not match the signature you provided. Check your key and signing method.", http.StatusForbidden)
}
// This tests validate if PUT handler can successfully detect SHA256 mismatch.
func (s *TestSuiteCommon) TestSHA256Mismatch(c *C) {
// generate a random bucket name.
bucketName := getRandomBucketName()
// HTTP request to create the bucket.
request, err := newTestRequest("PUT", getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client := http.Client{}
// Execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
objName := "test-object"
// Body is on purpose set to nil so that we get payload generated for empty bytes.
// Create new HTTP request with incorrect secretKey to generate an incorrect signature.
secretKey := s.secretKey + "a"
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objName), 0, nil, s.accessKey, secretKey)
c.Assert(request.Header.Get("x-amz-content-sha256"), Equals, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
// Set the body to generate signature mismatch.
request.Body = ioutil.NopCloser(bytes.NewReader([]byte("Hello, World")))
c.Assert(err, IsNil)
// execute the HTTP request.
response, err = client.Do(request)
c.Assert(err, IsNil)
verifyError(c, response, "XAmzContentSHA256Mismatch", "The provided 'x-amz-content-sha256' header does not match what was computed.", http.StatusBadRequest)
}
// TestNotBeAbleToCreateObjectInNonexistentBucket - Validates the error response
// on an attempt to upload an object into a non-existent bucket.
func (s *TestSuiteCommon) TestPutObjectLongName(c *C) {
@ -790,11 +850,11 @@ func (s *TestSuiteCommon) TestPutObjectLongName(c *C) {
c.Assert(err, IsNil)
client := http.Client{}
// execute the HTTP request to create bucket.
// Execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// content for the object to be uploaded.
// Content for the object to be uploaded.
buffer := bytes.NewReader([]byte("hello world"))
// make long object name.
longObjName := fmt.Sprintf("%0255d/%0255d/%0255d", 1, 1, 1)
@ -808,6 +868,7 @@ func (s *TestSuiteCommon) TestPutObjectLongName(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK)
// make long object name.
longObjName = fmt.Sprintf("%0256d", 1)
buffer = bytes.NewReader([]byte("hello world"))
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, longObjName),
int64(buffer.Len()), buffer, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
@ -1916,7 +1977,7 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) {
client = http.Client{}
response, err = client.Do(request)
c.Assert(err, IsNil)
// exepcting a successful upload.
// expecting a successful upload.
c.Assert(response.StatusCode, Equals, http.StatusOK)
objectName = "test-2-object"
buffer1 = bytes.NewReader(data)
@ -1925,7 +1986,8 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) {
int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// set Content-Md5 to invalid value.
request.Header.Set("Content-Md5", "WvLTlMrX9NpYDQlEIFlnDw==")
request.Header.Set("Content-Md5", "kvLTlMrX9NpYDQlEIFlnDA==")
// expecting a failure during upload.
client = http.Client{}
response, err = client.Do(request)
c.Assert(err, IsNil)

View file

@ -1,284 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"crypto/md5"
"encoding/hex"
"io/ioutil"
"net/http"
. "gopkg.in/check.v1"
)
// API suite container for XL specific tests.
type TestSuiteXL struct {
testServer TestServer
endPoint string
accessKey string
secretKey string
}
// Initializing the test suite.
var _ = Suite(&TestSuiteXL{})
// Setting up the test suite.
// Starting the Test server with temporary XL backend.
func (s *TestSuiteXL) SetUpSuite(c *C) {
s.testServer = StartTestServer(c, "XL")
s.endPoint = s.testServer.Server.URL
s.accessKey = s.testServer.AccessKey
s.secretKey = s.testServer.SecretKey
}
// Called implicitly by "gopkg.in/check.v1" after all tests are run.
func (s *TestSuiteXL) TearDownSuite(c *C) {
s.testServer.Stop()
}
// TestGetOnObject - Asserts properties for GET on an object.
// GET requests on an object retrieves the object from server.
// Tests behaviour when If-Match/If-None-Match headers are set on the request.
func (s *TestSuiteXL) TestGetOnObject(c *C) {
// generate a random bucket name.
bucketName := getRandomBucketName()
// make HTTP request to create the bucket.
request, err := newTestRequest("PUT", getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client := http.Client{}
// execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
buffer1 := bytes.NewReader([]byte("hello world"))
request, err = newTestRequest("PUT", s.endPoint+"/"+bucketName+"/object1",
int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// GetObject with If-Match sending correct etag in request headers
// is expected to return the object
md5Writer := md5.New()
md5Writer.Write([]byte("hello world"))
etag := hex.EncodeToString(md5Writer.Sum(nil))
request, err = newTestRequest("GET", s.endPoint+"/"+bucketName+"/object1",
0, nil, s.accessKey, s.secretKey)
request.Header.Set("If-Match", etag)
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
var body []byte
body, err = ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(string(body), Equals, "hello world")
// GetObject with If-Match sending mismatching etag in request headers
// is expected to return an error response with ErrPreconditionFailed.
request, err = newTestRequest("GET", s.endPoint+"/"+bucketName+"/object1",
0, nil, s.accessKey, s.secretKey)
request.Header.Set("If-Match", etag[1:])
response, err = client.Do(request)
verifyError(c, response, "PreconditionFailed", "At least one of the preconditions you specified did not hold.", http.StatusPreconditionFailed)
// GetObject with If-None-Match sending mismatching etag in request headers
// is expected to return the object.
request, err = newTestRequest("GET", s.endPoint+"/"+bucketName+"/object1",
0, nil, s.accessKey, s.secretKey)
request.Header.Set("If-None-Match", etag[1:])
response, err = client.Do(request)
c.Assert(response.StatusCode, Equals, http.StatusOK)
body, err = ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(string(body), Equals, "hello world")
// GetObject with If-None-Match sending matching etag in request headers
// is expected to return (304) Not-Modified.
request, err = newTestRequest("GET", s.endPoint+"/"+bucketName+"/object1",
0, nil, s.accessKey, s.secretKey)
request.Header.Set("If-None-Match", etag)
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusNotModified)
}
// TestCopyObject - Validates copy object.
// The following is the test flow.
// 1. Create bucket.
// 2. Insert Object.
// 3. Use "X-Amz-Copy-Source" header to copy the previously inserted object.
// 4. Validate the content of copied object.
func (s *TestSuiteXL) TestCopyObject(c *C) {
// generate a random bucket name.
bucketName := getRandomBucketName()
// HTTP request to create the bucket.
request, err := newTestRequest("PUT", getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client := http.Client{}
// execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// content for the object to be inserted.
buffer1 := bytes.NewReader([]byte("hello world"))
objectName := "testObject"
// create HTTP request for object upload.
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName),
int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey)
request.Header.Set("Content-Type", "application/json")
c.Assert(err, IsNil)
// execute the HTTP request for object upload.
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
objectName2 := "testObject2"
// creating HTTP request for uploading the object.
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName2),
0, nil, s.accessKey, s.secretKey)
// setting the "X-Amz-Copy-Source" to allow copying the content of
// previously uploaded object.
request.Header.Set("X-Amz-Copy-Source", "/"+bucketName+"/"+objectName)
c.Assert(err, IsNil)
// execute the HTTP request.
// the content is expected to have the content of previous disk.
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// creating HTTP request to fetch the previously uploaded object.
request, err = newTestRequest("GET", getGetObjectURL(s.endPoint, bucketName, objectName2),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// executing the HTTP request.
response, err = client.Do(request)
c.Assert(err, IsNil)
// validating the response status code.
c.Assert(response.StatusCode, Equals, http.StatusOK)
// reading the response body.
// response body is expected to have the copied content of the first uploaded object.
object, err := ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(string(object), Equals, "hello world")
c.Assert(response.Header.Get("Content-Type"), Equals, "application/json")
}
// TestContentTypePersists - Object upload with different Content-type is first done.
// And then a HEAD and GET request on these objects are done to validate if the same Content-Type set during upload persists.
func (s *TestSuiteXL) TestContentTypePersists(c *C) {
// generate a random bucket name.
bucketName := getRandomBucketName()
// HTTP request to create the bucket.
request, err := newTestRequest("PUT", getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client := http.Client{}
// execute the HTTP request to create bucket.
response, err := client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// Uploading a new object with Content-Type "application/zip".
// content for the object to be uploaded.
buffer1 := bytes.NewReader([]byte("hello world"))
objectName := "test-1-object"
// constructing HTTP request for object upload.
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName),
int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// setting the Content-Type header to be application/zip.
// After object upload a validation will be done to see if the Content-Type set persists.
request.Header.Set("Content-Type", "application/zip")
client = http.Client{}
// execute the HTTP request for object upload.
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// Fetching the object info using HEAD request for the object which was uploaded above.
request, err = newTestRequest("HEAD", getHeadObjectURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// Execute the HTTP request.
response, err = client.Do(request)
c.Assert(err, IsNil)
// Verify if the Content-Type header is set during the object persists.
c.Assert(response.Header.Get("Content-Type"), Equals, "application/zip")
// Fetching the object itself and then verify the Content-Type header.
request, err = newTestRequest("GET", getGetObjectURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client = http.Client{}
// Execute the HTTP to fetch the object.
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// Verify if the Content-Type header is set during the object persists.
c.Assert(response.Header.Get("Content-Type"), Equals, "application/zip")
// Uploading a new object with Content-Type "application/json".
objectName = "test-2-object"
buffer2 := bytes.NewReader([]byte("hello world"))
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName),
int64(buffer2.Len()), buffer2, s.accessKey, s.secretKey)
// deleting the old header value.
delete(request.Header, "Content-Type")
// setting the request header to be application/json.
request.Header.Add("Content-Type", "application/json")
c.Assert(err, IsNil)
// Execute the HTTP request to upload the object.
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
// Obtain the info of the object which was uploaded above using HEAD request.
request, err = newTestRequest("HEAD", getHeadObjectURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// Execute the HTTP request.
response, err = client.Do(request)
c.Assert(err, IsNil)
// Assert if the content-type header set during the object upload persists.
c.Assert(response.Header.Get("Content-Type"), Equals, "application/json")
// Fetch the object and assert whether the Content-Type header persists.
request, err = newTestRequest("GET", getGetObjectURL(s.endPoint, bucketName, objectName),
0, nil, s.accessKey, s.secretKey)
c.Assert(err, IsNil)
// Execute the HTTP request.
response, err = client.Do(request)
c.Assert(err, IsNil)
// Assert if the content-type header set during the object upload persists.
c.Assert(response.Header.Get("Content-Type"), Equals, "application/json")
}

View file

@ -26,6 +26,18 @@ import (
"unicode/utf8"
)
// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
// client did not calculate sha256 of the payload.
const unsignedPayload = "UNSIGNED-PAYLOAD"
// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
// client did not calculate sha256 of the payload. Hence we skip calculating sha256.
// We also skip calculating sha256 for presigned requests without "x-amz-content-sha256" header.
func skipContentSha256Cksum(r *http.Request) bool {
contentSha256 := r.Header.Get("X-Amz-Content-Sha256")
return isRequestUnsignedPayload(r) || (isRequestPresignedSignatureV4(r) && contentSha256 == "")
}
// isValidRegion - verify if incoming region value is valid with configured Region.
func isValidRegion(reqRegion string, confRegion string) bool {
if confRegion == "" || confRegion == "US" {

View file

@ -216,6 +216,11 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, validate
return ErrInvalidAccessKeyID
}
// Hashed payload mismatch, return content sha256 mismatch.
if hashedPayload != req.URL.Query().Get("X-Amz-Content-Sha256") {
return ErrContentSHA256Mismatch
}
// Verify if region is valid.
sRegion := preSignValues.Credential.scope.region
// Should validate region, only if region is set. Some operations
@ -235,9 +240,8 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, validate
query := make(url.Values)
if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
query.Set("X-Amz-Content-Sha256", hashedPayload)
} else {
hashedPayload = "UNSIGNED-PAYLOAD"
}
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignValues.Date) > time.Duration(preSignValues.Expires) {
@ -331,6 +335,11 @@ func doesSignatureMatch(hashedPayload string, r *http.Request, validateRegion bo
return err
}
// Hashed payload mismatch, return content sha256 mismatch.
if hashedPayload != req.Header.Get("X-Amz-Content-Sha256") {
return ErrContentSHA256Mismatch
}
// Extract all the signed headers along with its values.
extractedSignedHeaders := extractSignedHeaders(signV4Values.SignedHeaders, req.Header)

104
signature-verify-reader.go Normal file
View file

@ -0,0 +1,104 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
)
// signVerifyReader represents an io.Reader compatible interface which
// transparently calculates sha256, caller should call `Verify()` to
// validate the signature header.
type signVerifyReader struct {
Request *http.Request // HTTP request to be validated and read.
HashWriter hash.Hash // sha256 hash writer.
}
// Initializes a new signature verify reader.
func newSignVerify(req *http.Request) *signVerifyReader {
return &signVerifyReader{
Request: req, // Save the request.
HashWriter: sha256.New(), // Inititalize sha256.
}
}
// isSignVerify - is given reader a `signVerifyReader`.
func isSignVerify(reader io.Reader) bool {
_, ok := reader.(*signVerifyReader)
return ok
}
// Verify - verifies signature and returns error upon signature mismatch.
func (v *signVerifyReader) Verify() error {
validateRegion := true // Defaults to validating region.
shaPayloadHex := hex.EncodeToString(v.HashWriter.Sum(nil))
if skipContentSha256Cksum(v.Request) {
// Sets 'UNSIGNED-PAYLOAD' if client requested to not calculated sha256.
shaPayloadHex = unsignedPayload
}
// Signature verification block.
var s3Error APIErrorCode
if isRequestSignatureV4(v.Request) {
s3Error = doesSignatureMatch(shaPayloadHex, v.Request, validateRegion)
} else if isRequestPresignedSignatureV4(v.Request) {
s3Error = doesPresignedSignatureMatch(shaPayloadHex, v.Request, validateRegion)
} else {
// Couldn't figure out the request type, set the error as AccessDenied.
s3Error = ErrAccessDenied
}
// Set signature error as 'errSignatureMismatch' if possible.
var sErr error
// Validate if we have received signature mismatch or sha256 mismatch.
if s3Error != ErrNone {
switch s3Error {
case ErrContentSHA256Mismatch:
sErr = errContentSHA256Mismatch
case ErrSignatureDoesNotMatch:
sErr = errSignatureMismatch
default:
sErr = fmt.Errorf("%v", getAPIError(s3Error))
}
return sErr
}
return nil
}
// Reads from request body and writes to hash writer. All reads performed
// through it are matched with corresponding writes to hash writer. There is
// no internal buffering the write must complete before the read completes.
// Any error encountered while writing is reported as a read error. As a
// special case `Read()` skips writing to hash writer if the client requested
// for the payload to be skipped.
func (v *signVerifyReader) Read(b []byte) (n int, err error) {
n, err = v.Request.Body.Read(b)
if n > 0 {
// Skip calculating the hash.
if skipContentSha256Cksum(v.Request) {
return
}
// Stagger all reads to its corresponding writes to hash writer.
if n, err = v.HashWriter.Write(b[:n]); err != nil {
return n, err
}
}
return
}

View file

@ -29,3 +29,6 @@ var errSignatureMismatch = errors.New("Signature does not match")
// used when token used for authentication by the MinioBrowser has expired
var errInvalidToken = errors.New("Invalid token")
// If x-amz-content-sha256 header value mismatches with what we calculate.
var errContentSHA256Mismatch = errors.New("sha256 mismatch")

View file

@ -356,6 +356,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
// FIXME: Allow file upload handler to set content-type, content-encoding.
if _, err := web.ObjectAPI.PutObject(bucket, object, -1, r.Body, nil); err != nil {
writeWebErrorResponse(w, err)
}

View file

@ -302,10 +302,23 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st
return xl.newMultipartUpload(bucket, object, meta)
}
// putObjectPart - reads incoming data until EOF for the part file on
// an ongoing multipart transaction. Internally incoming data is
// erasure coded and written across all disks.
func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// PutObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to single put operation but it is part
// of the multipart transcation.
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket}
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return "", BucketNotFound{Bucket: bucket}
}
if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
}
// Hold the lock and start the operation.
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID)
nsMutex.Lock(minioMetaBucket, uploadIDPath)
@ -345,7 +358,7 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
if size > 0 {
// This is done so that we can avoid erroneous clients sending
// more data than the set content size.
data = io.LimitReader(data, size+1)
data = io.LimitReader(data, size)
} // else we read till EOF.
// Construct a tee reader for md5sum.
@ -369,6 +382,16 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
size = sizeWritten
}
// Validate if payload is valid.
if isSignVerify(data) {
if err = data.(*signVerifyReader).Verify(); err != nil {
// Incoming payload wrong, delete the temporary object.
xl.deleteObject(minioMetaBucket, tmpPartPath)
// Returns md5 mismatch.
return "", toObjectErr(err, bucket, object)
}
}
// Calculate new md5sum.
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
@ -422,26 +445,6 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
return newMD5Hex, nil
}
// PutObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to single put operation but it is part
// of the multipart transcation.
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket}
}
// Verify whether the bucket exists.
if !xl.isBucketExist(bucket) {
return "", BucketNotFound{Bucket: bucket}
}
if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
}
return xl.putObjectPart(bucket, object, uploadID, partID, size, data, md5Hex)
}
// listObjectParts - wrapper reading `xl.json` for a given object and
// uploadID. Lists all the parts captured inside `xl.json` content.
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {

View file

@ -117,18 +117,20 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
return err
} // Cache has not been found, fill the cache.
// Proceed to set the cache.
var newBuffer io.Writer
// Cache is only set if whole object is being read.
if startOffset == 0 && length == xlMeta.Stat.Size {
// Proceed to set the cache.
var newBuffer io.Writer
// Create a new entry in memory of length.
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), length)
if err != nil {
if err == nil {
// Create a multi writer to write to both memory and client response.
mw = io.MultiWriter(newBuffer, writer)
}
if err != nil && err != objcache.ErrCacheFull {
// Perhaps cache is full, returns here.
return err
}
// Create a multi writer to write to both memory and client response.
mw = io.MultiWriter(newBuffer, writer)
}
}
@ -373,15 +375,18 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
md5Writer := md5.New()
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending
// more data than the set content size.
data = io.LimitReader(data, size+1)
} // else we read till EOF.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
// Tee reader combines incoming data stream and md5, data read
// from input stream is written to md5.
teeReader := io.TeeReader(data, md5Writer)
// Tee reader combines incoming data stream and md5, data read from input stream is written to md5.
teeReader := io.TeeReader(limitDataReader, md5Writer)
// Collect all the previous erasure infos across the disk.
var eInfos []erasureInfo
@ -419,6 +424,16 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
}
// Validate if payload is valid.
if isSignVerify(data) {
if vErr := data.(*signVerifyReader).Verify(); vErr != nil {
// Incoming payload wrong, delete the temporary object.
xl.deleteObject(minioMetaTmpBucket, tempObj)
// Error return.
return "", toObjectErr(vErr, bucket, object)
}
}
// md5Hex representation.
md5Hex := metadata["md5Sum"]
if md5Hex != "" {