refactor ObjectLayer PutObject and PutObjectPart (#4925)

This change refactor the ObjectLayer PutObject and PutObjectPart
functions. Instead of passing an io.Reader and a size to PUT operations
ObejectLayer expects an HashReader.
A HashReader verifies the MD5 sum (and SHA256 sum if required) of the object.
This change updates all all PutObject(Part) calls and removes unnecessary code
in all ObjectLayer implementations.

Fixes #4923
This commit is contained in:
Andreas Auernhammer 2017-09-19 12:40:27 -07:00 committed by Dee Koder
parent f8024cadbb
commit 79ba4d3f33
38 changed files with 310 additions and 663 deletions

View file

@ -943,7 +943,7 @@ func TestHealObjectHandler(t *testing.T) {
}
_, err = adminTestBed.objLayer.PutObject(bucketName, objName,
int64(len("hello")), bytes.NewReader([]byte("hello")), nil, "")
NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""), nil)
if err != nil {
t.Fatalf("Failed to create %s - %v", objName, err)
}
@ -1083,7 +1083,7 @@ func TestHealUploadHandler(t *testing.T) {
// Upload a part.
partID := 1
_, err = adminTestBed.objLayer.PutObjectPart(bucketName, objName, uploadID,
partID, int64(len("hello")), bytes.NewReader([]byte("hello")), "", "")
partID, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""))
if err != nil {
t.Fatalf("Failed to upload part %d of %s/%s - %v", partID,
bucketName, objName, err)

View file

@ -58,7 +58,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
if err != nil {
b.Fatal(err)
}
@ -118,7 +118,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
metadata := make(map[string]string)
metadata["etag"] = getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["etag"], sha256sum)
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, NewHashReader(bytes.NewBuffer(textPartData), int64(len(textPartData)), metadata["etag"], sha256sum))
if err != nil {
b.Fatal(err)
}
@ -216,7 +216,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
metadata["etag"] = getMD5Hash(textData)
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
if err != nil {
b.Fatal(err)
}
@ -329,7 +329,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
i := 0
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
if err != nil {
b.Fatal(err)
}
@ -372,7 +372,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
sha256sum := ""
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
if err != nil {
b.Fatal(err)
}

View file

@ -562,7 +562,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
defer objectLock.Unlock()
objInfo, err := objectAPI.PutObject(bucket, object, fileSize, fileBody, metadata, sha256sum)
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(fileBody, fileSize, metadata["etag"], sha256sum), metadata)
if err != nil {
errorIf(err, "Unable to create object.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)

View file

@ -632,8 +632,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(bucketName, objectName, int64(len(contentBytes)), bytes.NewBuffer(contentBytes),
make(map[string]string), sha256sum)
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)

View file

@ -218,7 +218,7 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy *bucketPolicy) err
return err
}
defer objLock.Unlock()
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, int64(len(buf)), bytes.NewReader(buf), nil, ""); err != nil {
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", ""), nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err)
}

View file

@ -465,7 +465,7 @@ func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj Obje
// write object to path
sha256Sum := getSHA256Hash(buf)
_, err = obj.PutObject(minioMetaBucket, ncPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
_, err = obj.PutObject(minioMetaBucket, ncPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
if err != nil {
errorIf(err, "Unable to write bucket notification configuration.")
return err
@ -492,7 +492,7 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer
// write object to path
sha256Sum := getSHA256Hash(buf)
_, err = obj.PutObject(minioMetaBucket, lcPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
_, err = obj.PutObject(minioMetaBucket, lcPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
if err != nil {
errorIf(err, "Unable to write bucket listener configuration to object layer.")
}

View file

@ -62,7 +62,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
notificationXML += "</NotificationConfiguration>"
size := int64(len([]byte(notificationXML)))
reader := bytes.NewReader([]byte(notificationXML))
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, size, reader, nil, ""); err != nil {
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, NewHashReader(reader, size, "", ""), nil); err != nil {
t.Fatal("Unexpected error:", err)
}

View file

@ -235,9 +235,8 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
bucket := "bucket"
object := "object"
sha256sum := ""
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
return []StorageAPI{}, err
}
@ -326,9 +325,8 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
bucket := "bucket"
object := "object"
sha256sum := ""
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
@ -401,9 +399,8 @@ func TestFormatXLReorderByInspection(t *testing.T) {
bucket := "bucket"
object := "object"
sha256sum := ""
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}

View file

@ -52,9 +52,7 @@ func TestReadFSMetadata(t *testing.T) {
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err)
}
sha256sum := ""
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
t.Fatal("Unexpected err: ", err)
}
@ -89,9 +87,7 @@ func TestWriteFSMetadata(t *testing.T) {
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err)
}
sha256sum := ""
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
t.Fatal("Unexpected err: ", err)
}

View file

@ -17,10 +17,8 @@
package cmd
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"os"
pathutil "path"
@ -28,7 +26,6 @@ import (
"time"
"github.com/minio/minio/pkg/lock"
"github.com/minio/sha256-simd"
)
const (
@ -459,7 +456,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
}()
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
@ -474,7 +471,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// an ongoing multipart transaction. Internally incoming data is
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return pi, err
}
@ -523,36 +520,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
partSuffix := fmt.Sprintf("object%d", partID)
tmpPartPath := uploadID + "." + mustGetUUID() + "." + partSuffix
// Initialize md5 writer.
md5Writer := md5.New()
hashWriters := []io.Writer{md5Writer}
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
hashWriters = append(hashWriters, sha256Writer)
}
multiWriter := io.MultiWriter(hashWriters...)
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending more data than the set content size.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
teeReader := io.TeeReader(limitDataReader, multiWriter)
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
if size := data.Size(); size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
buf := make([]byte, bufSize)
fsPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tmpPartPath)
bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size)
bytesWritten, cErr := fsCreateFile(fsPartPath, data, buf, data.Size())
if cErr != nil {
fsRemoveFile(fsPartPath)
return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
@ -560,7 +535,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
if bytesWritten < data.Size() {
fsRemoveFile(fsPartPath)
return pi, traceError(IncompleteBody{})
}
@ -570,18 +545,8 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// delete.
defer fsRemoveFile(fsPartPath)
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return pi, traceError(SHA256Mismatch{})
}
if err = data.Verify(); err != nil {
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
}
partPath := pathJoin(bucket, object, uploadID, partSuffix)
@ -599,7 +564,8 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
}
// Save the object part info in `fs.json`.
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
md5Hex := hex.EncodeToString(data.MD5())
fsMeta.AddObjectPart(partID, partSuffix, md5Hex, data.Size())
if _, err = fsMeta.WriteTo(rwlk); err != nil {
partLock.Unlock()
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
@ -625,7 +591,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
return PartInfo{
PartNumber: partID,
LastModified: fi.ModTime(),
ETag: newMD5Hex,
ETag: md5Hex,
Size: fi.Size(),
}, nil
}

View file

@ -183,7 +183,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
sha256sum := ""
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex, sha256sum)
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), dataLen, md5Hex, sha256sum))
if !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
@ -211,9 +211,8 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
}
md5Hex := getMD5Hash(data)
sha256sum := ""
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil {
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, "")); err != nil {
t.Fatal("Unexpected error ", err)
}
@ -252,7 +251,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
md5Hex := getMD5Hash(data)
sha256sum := ""
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil {
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, sha256sum)); err != nil {
t.Fatal("Unexpected error ", err)
}

View file

@ -17,10 +17,8 @@
package cmd
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
@ -30,7 +28,6 @@ import (
"syscall"
"github.com/minio/minio/pkg/lock"
"github.com/minio/sha256-simd"
)
// fsObjects - Implements fs object layer.
@ -364,7 +361,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
}()
objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
objInfo, err := fs.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
@ -511,7 +508,7 @@ func (fs fsObjects) parentDirIsObject(bucket, parent string) bool {
// until EOF, writes data directly to configured filesystem path.
// Additionally writes `fs.json` which carries the necessary metadata
// for future object operations.
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, retErr error) {
func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
var err error
// Validate if bucket name is valid and exists.
@ -522,12 +519,12 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// This is a special case with size as '0' and object ends
// with a slash separator, we treat it like a valid operation
// and return success.
if isObjectDir(object, size) {
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
return dirObjectInfo(bucket, object, size, metadata), nil
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
}
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
@ -571,37 +568,14 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// so that cleaning it up will be easy if the server goes down.
tempObj := mustGetUUID()
// Initialize md5 writer.
md5Writer := md5.New()
hashWriters := []io.Writer{md5Writer}
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
hashWriters = append(hashWriters, sha256Writer)
}
multiWriter := io.MultiWriter(hashWriters...)
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending more data than the set content size.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
// Allocate a buffer to Read() from request body
bufSize := int64(readSizeV1)
if size > 0 && bufSize > size {
if size := data.Size(); size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
teeReader := io.TeeReader(limitDataReader, multiWriter)
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, teeReader, buf, size)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
if err != nil {
fsRemoveFile(fsTmpObjPath)
errorIf(err, "Failed to create object %s/%s", bucket, object)
@ -610,7 +584,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, traceError(IncompleteBody{})
}
@ -620,27 +594,11 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// nothing to delete.
defer fsRemoveFile(fsTmpObjPath)
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// Update the md5sum if not set with the newly calculated one.
if len(metadata["etag"]) == 0 {
metadata["etag"] = newMD5Hex
if err = data.Verify(); err != nil { // verify MD5 and SHA256
return ObjectInfo{}, traceError(err)
}
// md5Hex representation.
md5Hex := metadata["etag"]
if md5Hex != "" {
if newMD5Hex != md5Hex {
// Returns md5 mismatch.
return ObjectInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return ObjectInfo{}, traceError(SHA256Mismatch{})
}
}
metadata["etag"] = hex.EncodeToString(data.MD5())
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)

View file

@ -64,8 +64,7 @@ func TestFSShutdown(t *testing.T) {
fs := obj.(*fsObjects)
objectContent := "12345"
obj.MakeBucketWithLocation(bucketName, "")
sha256sum := ""
obj.PutObject(bucketName, objectName, int64(len(objectContent)), bytes.NewReader([]byte(objectContent)), nil, sha256sum)
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
return fs, disk
}
@ -134,10 +133,8 @@ func TestFSPutObject(t *testing.T) {
t.Fatal(err)
}
sha256sum := ""
// With a regular object.
_, err := obj.PutObject(bucketName+"non-existent", objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err := obj.PutObject(bucketName+"non-existent", objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist")
}
@ -146,7 +143,7 @@ func TestFSPutObject(t *testing.T) {
}
// With a directory object.
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", int64(0), bytes.NewReader([]byte("")), nil, sha256sum)
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist")
}
@ -154,11 +151,11 @@ func TestFSPutObject(t *testing.T) {
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
}
_, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
_, err = obj.PutObject(bucketName, objectName+"/1", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
_, err = obj.PutObject(bucketName, objectName+"/1", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err == nil {
t.Fatal("Unexpected should fail here, backend corruption occurred")
}
@ -173,7 +170,7 @@ func TestFSPutObject(t *testing.T) {
}
}
_, err = obj.PutObject(bucketName, objectName+"/1/", 0, bytes.NewReader([]byte("")), nil, sha256sum)
_, err = obj.PutObject(bucketName, objectName+"/1/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
if err == nil {
t.Fatal("Unexpected should fail here, backned corruption occurred")
}
@ -201,8 +198,7 @@ func TestFSDeleteObject(t *testing.T) {
objectName := "object"
obj.MakeBucketWithLocation(bucketName, "")
sha256sum := ""
obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
// Test with invalid bucket name
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {

View file

@ -17,11 +17,8 @@
package cmd
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
"net/url"
@ -32,7 +29,6 @@ import (
"github.com/Azure/azure-sdk-for-go/storage"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/sha256-simd"
)
const globalAzureAPIVersion = "2016-05-31"
@ -408,52 +404,16 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
// PutObject - Create a new blob with the incoming data,
// uses Azure equivalent CreateBlockBlobFromReader.
func (a *azureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
var sha256Writer hash.Hash
var md5sumWriter hash.Hash
var writers []io.Writer
md5sum := metadata["etag"]
func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
delete(metadata, "etag")
teeReader := data
if sha256sum != "" {
sha256Writer = sha256.New()
writers = append(writers, sha256Writer)
}
if md5sum != "" {
md5sumWriter = md5.New()
writers = append(writers, md5sumWriter)
}
if len(writers) > 0 {
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
}
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, s3ToAzureHeaders(metadata))
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(data.Size()), data, s3ToAzureHeaders(metadata))
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
}
if md5sum != "" {
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
if newMD5sum != md5sum {
a.client.DeleteBlob(bucket, object, nil)
return ObjectInfo{}, azureToObjectError(traceError(BadDigest{md5sum, newMD5sum}))
}
if err = data.Verify(); err != nil {
a.client.DeleteBlob(bucket, object, nil)
return ObjectInfo{}, azureToObjectError(traceError(err))
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
a.client.DeleteBlob(bucket, object, nil)
return ObjectInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
}
}
return a.GetObjectInfo(bucket, object)
}
@ -537,39 +497,19 @@ func azureParseBlockID(blockID string) (partID, subPartNumber int, md5Hex string
}
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error) {
if meta := a.metaInfo.get(uploadID); meta == nil {
return info, traceError(InvalidUploadID{})
}
var sha256Writer hash.Hash
var md5sumWriter hash.Hash
var etag string
var writers []io.Writer
if sha256sum != "" {
sha256Writer = sha256.New()
writers = append(writers, sha256Writer)
}
if md5Hex != "" {
md5sumWriter = md5.New()
writers = append(writers, md5sumWriter)
etag = md5Hex
} else {
etag := data.md5Sum
if etag == "" {
// Generate random ETag.
etag = getMD5Hash([]byte(mustGetUUID()))
}
teeReader := data
if len(writers) > 0 {
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
}
subPartSize := int64(azureBlockSize)
subPartNumber := 1
for remainingSize := size; remainingSize >= 0; remainingSize -= subPartSize {
subPartSize, subPartNumber := int64(azureBlockSize), 1
for remainingSize := data.Size(); remainingSize >= 0; remainingSize -= subPartSize {
// Allow to create zero sized part.
if remainingSize == 0 && subPartNumber > 1 {
break
@ -580,33 +520,21 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
}
id := azureGetBlockID(partID, subPartNumber, etag)
err = a.client.PutBlockWithLength(bucket, object, id, uint64(subPartSize), io.LimitReader(teeReader, subPartSize), nil)
err = a.client.PutBlockWithLength(bucket, object, id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil {
return info, azureToObjectError(traceError(err), bucket, object)
}
subPartNumber++
}
if md5Hex != "" {
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
if newMD5sum != md5Hex {
a.client.DeleteBlob(bucket, object, nil)
return PartInfo{}, azureToObjectError(traceError(BadDigest{md5Hex, newMD5sum}))
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return PartInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
}
if err = data.Verify(); err != nil {
a.client.DeleteBlob(bucket, object, nil)
return info, azureToObjectError(traceError(err), bucket, object)
}
info.PartNumber = partID
info.ETag = etag
info.LastModified = UTCNow()
info.Size = size
info.Size = data.Size()
return info, nil
}

View file

@ -18,12 +18,10 @@ package cmd
import (
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"io"
"math"
"regexp"
@ -720,8 +718,7 @@ func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, er
}
// PutObject - Create a new object with the incoming data,
func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Reader,
metadata map[string]string, sha256sum string) (ObjectInfo, error) {
func (l *gcsGateway) PutObject(bucket string, key string, data *HashReader, metadata map[string]string) (ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
@ -729,15 +726,9 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
}
reader := data
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
reader = io.TeeReader(data, sha256Writer)
if _, err := hex.DecodeString(metadata["etag"]); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
md5sum := metadata["etag"]
delete(metadata, "etag")
object := l.client.Bucket(bucket).Object(key)
@ -747,17 +738,8 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
w.ContentType = metadata["content-type"]
w.ContentEncoding = metadata["content-encoding"]
w.Metadata = metadata
if md5sum != "" {
var err error
w.MD5, err = hex.DecodeString(md5sum)
if err != nil {
// Close the object writer upon error.
w.Close()
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
}
if _, err := io.CopyN(w, reader, size); err != nil {
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
w.Close()
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
@ -765,12 +747,9 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
// Close the object writer upon success.
w.Close()
// Verify sha256sum after close.
if sha256sum != "" {
if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum {
object.Delete(l.ctx)
return ObjectInfo{}, traceError(SHA256Mismatch{})
}
if err := data.Verify(); err != nil { // Verify sha256sum after close.
object.Delete(l.ctx)
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
attrs, err := object.Attrs(l.ctx)
@ -855,65 +834,37 @@ func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID str
}
// PutObjectPart puts a part of object in bucket
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *HashReader) (PartInfo, error) {
if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil {
return PartInfo{}, err
}
var sha256Writer hash.Hash
var etag string
// Honor etag if client did send md5Hex.
if md5Hex != "" {
etag = md5Hex
} else {
etag := data.md5Sum
if etag == "" {
// Generate random ETag.
etag = getMD5Hash([]byte(mustGetUUID()))
}
reader := data
if sha256sum != "" {
sha256Writer = sha256.New()
reader = io.TeeReader(data, sha256Writer)
}
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
w := object.NewWriter(l.ctx)
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
// where it tries to upload 0 bytes in the last chunk and get error from server.
w.ChunkSize = 0
if md5Hex != "" {
var err error
w.MD5, err = hex.DecodeString(md5Hex)
if err != nil {
// Make sure to close object writer upon error.
w.Close()
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
}
if _, err := io.CopyN(w, reader, size); err != nil {
if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error.
w.Close()
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
// Make sure to close the object writer upon success.
w.Close()
// Verify sha256sum after Close().
if sha256sum != "" {
if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum {
object.Delete(l.ctx)
return PartInfo{}, traceError(SHA256Mismatch{})
}
if err := data.Verify(); err != nil {
object.Delete(l.ctx)
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
}
return PartInfo{
PartNumber: partNumber,
ETag: etag,
LastModified: UTCNow(),
Size: size,
Size: data.Size(),
}, nil
}

View file

@ -281,7 +281,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, "")
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, "", ""), metadata)
case authTypeSignedV2, authTypePresignedV2:
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
@ -289,7 +289,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, "")
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", ""), metadata)
case authTypePresigned, authTypeSigned:
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
@ -303,7 +303,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
}
// Create object.
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", sha256sum), metadata)
default:
// For all unknown auth types return error.
writeErrorResponse(w, ErrAccessDenied, r.URL)

View file

@ -330,32 +330,18 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
return fromMinioClientObjectInfo(bucket, oi), nil
}
// Decodes hex encoded md5, sha256 into their raw byte representations.
func getMD5AndSha256SumBytes(md5Hex, sha256Hex string) (md5Bytes, sha256Bytes []byte, err error) {
if md5Hex != "" {
md5Bytes, err = hex.DecodeString(md5Hex)
if err != nil {
return nil, nil, err
}
}
if sha256Hex != "" {
sha256Bytes, err = hex.DecodeString(sha256Hex)
if err != nil {
return nil, nil, err
}
}
return md5Bytes, sha256Bytes, nil
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(metadata["etag"], sha256sum)
func (l *s3Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
md5sumBytes, err := hex.DecodeString(metadata["etag"])
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "etag")
oi, err := l.Client.PutObject(bucket, object, size, data, md5Bytes, sha256Bytes, toMinioClientMetadata(metadata))
oi, err := l.Client.PutObject(bucket, object, data.Size(), data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
@ -492,13 +478,18 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(md5Hex, sha256sum)
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
md5HexBytes, err := hex.DecodeString(data.md5Sum)
if err != nil {
return pi, s3ToObjectError(traceError(err), bucket, object)
return pi, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5Bytes, sha256Bytes)
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
if err != nil {
return pi, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data.Size(), data, md5HexBytes, sha256sumBytes)
if err != nil {
return pi, err
}

View file

@ -1,58 +0,0 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
)
// Tests extracting md5/sha256 bytes.
func TestGetMD5AndSha256Bytes(t *testing.T) {
testCases := []struct {
md5Hex string
sha256Hex string
success bool
}{
// Test 1: Hex encoding failure.
{
md5Hex: "a",
sha256Hex: "b",
success: false,
},
// Test 2: Hex encoding success.
{
md5Hex: "91be0b892e47ede9de06aac14ca0369e",
sha256Hex: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
success: true,
},
// Test 3: hex values are empty should return success.
{
md5Hex: "",
sha256Hex: "",
success: true,
},
}
for i, testCase := range testCases {
_, _, err := getMD5AndSha256SumBytes(testCase.md5Hex, testCase.sha256Hex)
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success, but got failure %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure, but got success", i+1)
}
}
}

View file

@ -65,11 +65,10 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -215,11 +214,10 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
{bucketName, "test-object2", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
{bucketName, "dir/test-object3", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -329,11 +327,10 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)

View file

@ -33,8 +33,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
sha256sum := ""
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil, sha256sum)
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", NewHashReader(bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}

View file

@ -16,7 +16,15 @@
package cmd
import "io"
import (
"bytes"
"crypto/md5"
"encoding/hex"
"hash"
"io"
sha256 "github.com/minio/sha256-simd"
)
// ObjectLayer implements primitives for object API layer.
type ObjectLayer interface {
@ -34,7 +42,7 @@ type ObjectLayer interface {
// Object operations.
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error)
PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error)
CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error)
DeleteObject(bucket, object string) error
@ -42,7 +50,7 @@ type ObjectLayer interface {
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error)
PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
AbortMultipartUpload(bucket, object, uploadID string) error
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error)
@ -55,3 +63,81 @@ type ObjectLayer interface {
ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error)
}
// HashReader writes what it reads from an io.Reader to an MD5 and SHA256 hash.Hash.
// HashReader verifies that the content of the io.Reader matches the expected checksums.
type HashReader struct {
src io.Reader
size int64
md5Hash, sha256Hash hash.Hash
md5Sum, sha256Sum string // hex representation
}
// NewHashReader returns a new HashReader computing the MD5 sum and SHA256 sum
// (if set) of the provided io.Reader.
func NewHashReader(src io.Reader, size int64, md5Sum, sha256Sum string) *HashReader {
var sha256Hash hash.Hash
if sha256Sum != "" {
sha256Hash = sha256.New()
}
if size >= 0 {
src = io.LimitReader(src, size)
} else {
size = -1
}
return &HashReader{
src: src,
size: size,
md5Sum: md5Sum,
sha256Sum: sha256Sum,
md5Hash: md5.New(),
sha256Hash: sha256Hash,
}
}
func (r *HashReader) Read(p []byte) (n int, err error) {
n, err = r.src.Read(p)
if err != nil && err != io.EOF {
return
}
if r.md5Hash != nil {
r.md5Hash.Write(p[:n])
}
if r.sha256Hash != nil {
r.sha256Hash.Write(p[:n])
}
return
}
// Size returns the absolute number of bytes the HashReader
// will return during reading. It returns -1 for unlimited
// data.
func (r *HashReader) Size() int64 { return r.size }
// MD5 returns the MD5 sum of the processed data. Any
// further reads will change the MD5 sum.
func (r *HashReader) MD5() []byte { return r.md5Hash.Sum(nil) }
// Verify verifies if the computed MD5 sum - and SHA256 sum - are
// equal to the ones specified when creating the HashReader.
func (r *HashReader) Verify() error {
if r.sha256Hash != nil {
sha256Sum, err := hex.DecodeString(r.sha256Sum)
if err != nil {
return SHA256Mismatch{}
}
if !bytes.Equal(sha256Sum, r.sha256Hash.Sum(nil)) {
return errContentSHA256Mismatch
}
}
if r.md5Hash != nil && r.md5Sum != "" {
md5Sum, err := hex.DecodeString(r.md5Sum)
if err != nil {
return BadDigest{r.md5Sum, hex.EncodeToString(r.md5Hash.Sum(nil))}
}
if sum := r.md5Hash.Sum(nil); !bytes.Equal(md5Sum, sum) {
return BadDigest{r.md5Sum, hex.EncodeToString(sum)}
}
}
return nil
}

View file

@ -64,9 +64,8 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"obj1", "obj1", nil},
{"obj2", "obj2", nil},
}
sha256sum := ""
for _, object := range testObjects {
_, err = obj.PutObject(testBuckets[0], object.name, int64(len(object.content)), bytes.NewBufferString(object.content), object.meta, sha256sum)
_, err = obj.PutObject(testBuckets[0], object.name, NewHashReader(bytes.NewBufferString(object.content), int64(len(object.content)), object.meta["etag"], ""), object.meta)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -605,11 +604,10 @@ func BenchmarkListObjects(b *testing.B) {
b.Fatal(err)
}
sha256sum := ""
// Insert objects to be listed and benchmarked later.
for i := 0; i < 20000; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(bucket, key, int64(len(key)), bytes.NewBufferString(key), nil, sha256sum)
_, err = obj.PutObject(bucket, key, NewHashReader(bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
if err != nil {
b.Fatal(err)
}

View file

@ -218,7 +218,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -232,7 +232,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
// Object part upload should fail with quorum not available.
testCase := createPartCases[len(createPartCases)-1]
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err == nil {
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
}
@ -347,7 +347,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
// Validate all the test cases.
for i, testCase := range testCases {
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, testCase.inputSHA256)
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
// All are test cases above are expected to fail.
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
@ -481,7 +481,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1336,7 +1336,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1576,7 +1576,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
@ -1825,7 +1825,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts {
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize, bytes.NewBufferString(part.inputReaderData), part.inputMd5, sha256sum)
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
if err != nil {
t.Fatalf("%s : %s", instanceType, err)
}

View file

@ -154,7 +154,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
}
for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, testCase.inputSHA256)
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
actualErr = errorCause(actualErr)
if actualErr != nil && testCase.expectedError == nil {
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
@ -228,7 +228,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
sha256sum := ""
for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, sha256sum)
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr)
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
@ -278,7 +278,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
InsufficientWriteQuorum{},
}
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, sha256sum)
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr)
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
@ -310,9 +310,8 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
}
data := []byte("hello, world")
sha256sum := ""
// Create object.
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, sha256sum)
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil {
// Failed to create object, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
@ -357,7 +356,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
md5Writer.Write(fiveMBBytes)
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
sha256sum := ""
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, int64(len(fiveMBBytes)), bytes.NewReader(fiveMBBytes), etag1, sha256sum)
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
if err != nil {
// Failed to upload object part, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
@ -368,7 +367,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
md5Writer = md5.New()
md5Writer.Write(data)
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, int64(len(data)), bytes.NewReader(data), etag2, sha256sum)
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, NewHashReader(bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
if err != nil {
// Failed to upload object part, abort.
t.Fatalf("%s : %s", instanceType, err.Error())

View file

@ -19,7 +19,6 @@ package cmd
import (
"encoding/hex"
"fmt"
"io"
"path"
"runtime"
"strings"
@ -269,27 +268,3 @@ type byBucketName []BucketInfo
func (d byBucketName) Len() int { return len(d) }
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
// rangeReader returns a Reader that reads from r
// but returns error after Max bytes read as errDataTooLarge.
// but returns error if reader exits before reading Min bytes
// errDataTooSmall.
type rangeReader struct {
Reader io.Reader // underlying reader
Min int64 // min bytes remaining
Max int64 // max bytes remaining
}
func (l *rangeReader) Read(p []byte) (n int, err error) {
n, err = l.Reader.Read(p)
l.Max -= int64(n)
l.Min -= int64(n)
if l.Max < 0 {
// If more data is available than what is expected we return error.
return 0, errDataTooLarge
}
if err == io.EOF && l.Min > 0 {
return 0, errDataTooSmall
}
return
}

View file

@ -17,8 +17,6 @@
package cmd
import (
"io/ioutil"
"strings"
"testing"
)
@ -131,34 +129,6 @@ func TestIsValidObjectName(t *testing.T) {
}
}
// Tests rangeReader.
func TestRangeReader(t *testing.T) {
testCases := []struct {
data string
minLen int64
maxLen int64
err error
}{
{"1234567890", 0, 15, nil},
{"1234567890", 0, 10, nil},
{"1234567890", 0, 5, toObjectErr(errDataTooLarge, "test", "test")},
{"123", 5, 10, toObjectErr(errDataTooSmall, "test", "test")},
{"123", 2, 10, nil},
}
for i, test := range testCases {
r := strings.NewReader(test.data)
_, err := ioutil.ReadAll(&rangeReader{
Reader: r,
Min: test.minLen,
Max: test.maxLen,
})
if toObjectErr(err, "test", "test") != test.err {
t.Fatalf("test %d failed: expected %v, got %v", i+1, test.err, err)
}
}
}
// Tests getCompleteMultipartMD5
func TestGetCompleteMultipartMD5(t *testing.T) {
testCases := []struct {

View file

@ -546,7 +546,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return
}
// Create anonymous object.
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
case authTypeStreamingSigned:
// Initialize stream signature verifier.
reader, s3Error := newSignV4ChunkedReader(r)
@ -555,7 +555,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, metadata["etag"], sha256sum), metadata)
case authTypeSignedV2, authTypePresignedV2:
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
@ -563,7 +563,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, s3Error, r.URL)
return
}
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
case authTypePresigned, authTypeSigned:
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
@ -574,7 +574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
}
// Create object.
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
}
if err != nil {
errorIf(err, "Unable to create an object. %s", r.URL.Path)
@ -836,7 +836,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return
}
// No need to verify signature, anonymous request access is already allowed.
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
case authTypeStreamingSigned:
// Initialize stream signature verifier.
reader, s3Error := newSignV4ChunkedReader(r)
@ -845,7 +845,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, s3Error, r.URL)
return
}
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5, sha256sum)
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(reader, size, incomingMD5, sha256sum))
case authTypeSignedV2, authTypePresignedV2:
s3Error := isReqAuthenticatedV2(r)
if s3Error != ErrNone {
@ -853,7 +853,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, s3Error, r.URL)
return
}
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
case authTypePresigned, authTypeSigned:
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
@ -864,7 +864,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
if !skipContentSha256Cksum(r) {
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
}
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
}
if err != nil {
errorIf(err, "Unable to create object part.")

View file

@ -72,11 +72,10 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
}{
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err := obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -220,11 +219,10 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err := obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -1054,12 +1052,11 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength,
bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName,
NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -1170,11 +1167,10 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -1512,11 +1508,10 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// used for anonymous HTTP request test.
{bucketName, anonObject, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -2158,8 +2153,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
}
// Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts {
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize,
bytes.NewBufferString(part.inputReaderData), part.inputMd5, "")
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
if err != nil {
t.Fatalf("%s : %s", instanceType, err)
}
@ -2513,8 +2508,8 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
}
// Iterating over createPartCases to generate multipart chunks.
for _, part := range parts {
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize,
bytes.NewBufferString(part.inputReaderData), part.inputMd5, "")
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
if err != nil {
t.Fatalf("%s : %s", instanceType, err)
}
@ -2657,7 +2652,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
// iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, "")
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -3361,8 +3356,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
uploadIDCopy := uploadID
// create an object Part, will be used to test list object parts.
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, int64(len("hello")), bytes.NewReader([]byte("hello")),
"5d41402abc4b2a76b9719d911017c592", "")
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
if err != nil {
t.Fatalf("Minio %s : %s.", instanceType, err)
}

View file

@ -107,7 +107,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
expectedETaghex := getMD5Hash(data)
var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedETaghex, "")
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
if err != nil {
c.Errorf("%s: <ERROR> %s", instanceType, err)
}
@ -157,7 +157,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHan
metadata["md5"] = expectedETaghex
var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedETaghex, "")
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -198,7 +198,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrH
metadata := make(map[string]string)
metadata["etag"] = expectedETaghex
var objInfo ObjectInfo
objInfo, err = obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata, "")
objInfo, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -251,7 +251,7 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
// check before paging occurs.
for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject("bucket", key, int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -271,7 +271,7 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
// check after paging occurs pages work.
for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject("bucket", key, int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -288,11 +288,11 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
}
// check paging with prefix at end returns less objects.
{
_, err = obj.PutObject("bucket", "newPrefix", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "newPrefix", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject("bucket", "newPrefix2", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "newPrefix2", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -330,11 +330,11 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
// check delimited results with delimiter and prefix.
{
_, err = obj.PutObject("bucket", "this/is/delimited", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "this/is/delimited", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -443,14 +443,16 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, c TestErrHan
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject("bucket", "object", int64(len("The list of parts was not in ascending order. The parts list must be specified in order by part number.")), bytes.NewBufferString("The list of parts was not in ascending order. The parts list must be specified in order by part number."), nil, "")
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
length := int64(len(uploadContent))
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
length := int64(len(uploadContent))
_, err = obj.PutObject("bucket", "object", length, bytes.NewBufferString(uploadContent), nil, "")
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
length = int64(len(uploadContent))
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -472,7 +474,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(c *C) {
// Tests validate that bucket operation on non-existent bucket fails.
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, c TestErrHandler) {
_, err := obj.PutObject("bucket1", "object", int64(len("one")), bytes.NewBufferString("one"), nil, "")
_, err := obj.PutObject("bucket1", "object", NewHashReader(bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
if err == nil {
c.Fatal("Expected error but found nil")
}
@ -519,7 +521,7 @@ func testPutObject(obj ObjectLayer, instanceType string, c TestErrHandler) {
}
var bytesBuffer1 bytes.Buffer
_, err = obj.PutObject("bucket", "object", length, readerEOF, nil, "")
_, err = obj.PutObject("bucket", "object", NewHashReader(readerEOF, length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -532,7 +534,7 @@ func testPutObject(obj ObjectLayer, instanceType string, c TestErrHandler) {
}
var bytesBuffer2 bytes.Buffer
_, err = obj.PutObject("bucket", "object", length, readerNoEOF, nil, "")
_, err = obj.PutObject("bucket", "object", NewHashReader(readerNoEOF, length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -560,7 +562,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, c TestErrHandle
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
upload might have been aborted or completed.`
length := int64(len(uploadContent))
_, err = obj.PutObject("bucket", "dir1/dir2/object", length, bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "dir1/dir2/object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
@ -740,10 +742,9 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject(bucketName, "dir1/dir3/object",
int64(len("The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")),
bytes.NewBufferString("One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."), nil, "")
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
length := int64(len(content))
_, err = obj.PutObject(bucketName, "dir1/dir3/object", NewHashReader(bytes.NewBufferString(content), length, "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
@ -787,7 +788,7 @@ func testContentType(obj ObjectLayer, instanceType string, c TestErrHandler) {
}
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
// Test empty.
_, err = obj.PutObject("bucket", "minio.png", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
_, err = obj.PutObject("bucket", "minio.png", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}

View file

@ -542,7 +542,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
defer objectLock.Unlock()
sha256sum := ""
objInfo, err := objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
if err != nil {
writeWebErrorResponse(w, err)
return

View file

@ -382,8 +382,8 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
}
data := bytes.Repeat([]byte("a"), objectSize)
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data), map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil {
t.Fatalf("Was not able to upload an object, %v", err)
@ -476,16 +476,15 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
}
data := bytes.Repeat([]byte("a"), objectSize)
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data),
map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil {
t.Fatalf("Was not able to upload an object, %v", err)
}
objectName = "a/object"
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data),
map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil {
t.Fatalf("Was not able to upload an object, %v", err)
}
@ -865,7 +864,8 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
}
content := []byte("temporary file's content")
_, err = obj.PutObject(bucketName, objectName, int64(len(content)), bytes.NewReader(content), map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}, "")
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
if err != nil {
t.Fatalf("Was not able to upload an object, %v", err)
}
@ -957,9 +957,9 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
t.Fatalf("%s : %s", instanceType, err)
}
obj.PutObject(bucket, "a/one", int64(len(fileOne)), strings.NewReader(fileOne), nil, "")
obj.PutObject(bucket, "a/b/two", int64(len(fileTwo)), strings.NewReader(fileTwo), nil, "")
obj.PutObject(bucket, "a/c/three", int64(len(fileThree)), strings.NewReader(fileThree), nil, "")
obj.PutObject(bucket, "a/one", NewHashReader(strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
obj.PutObject(bucket, "a/b/two", NewHashReader(strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
obj.PutObject(bucket, "a/c/three", NewHashReader(strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
test := func(token string) (int, []byte) {
rec := httptest.NewRecorder()
@ -1043,7 +1043,8 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
}
data := bytes.Repeat([]byte("a"), objectSize)
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data), map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil {
t.Fatalf("Was not able to upload an object, %v", err)
}

View file

@ -220,7 +220,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Fatalf("Failed to make a bucket %v", err)
}
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
@ -358,7 +358,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to make a bucket %v", err)
}
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}

View file

@ -491,7 +491,7 @@ func TestHealObjectXL(t *testing.T) {
var uploadedParts []completePart
for _, partID := range []int{2, 1} {
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, int64(len(data)), bytes.NewReader(data), "", "")
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1)
}

View file

@ -56,14 +56,14 @@ func TestListObjectsHeal(t *testing.T) {
// Put 5 objects under sane dir
for i := 0; i < 5; i++ {
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
}
// Put 500 objects under unsane/subdir dir
for i := 0; i < 5; i++ {
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
@ -181,7 +181,7 @@ func TestListUploadsHeal(t *testing.T) {
// Upload a part.
data := bytes.Repeat([]byte("a"), 1024)
_, err = xl.PutObjectPart(bucketName, objName, uploadID, 1,
int64(len(data)), bytes.NewReader(data), "", "")
NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
if err != nil {
t.Fatal(err)
}

View file

@ -64,11 +64,10 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
// case - 1.
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
}
sha256sum := ""
// iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs {
// uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -151,7 +150,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if perr != nil {
t.Fatalf("%s : %s", instanceType, perr)
}

View file

@ -17,10 +17,8 @@
package cmd
import (
"crypto/md5"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"path"
@ -29,7 +27,6 @@ import (
"time"
"github.com/minio/minio/pkg/mimedb"
"github.com/minio/sha256-simd"
)
// updateUploadJSON - add or remove upload ID info in all `uploads.json`.
@ -558,7 +555,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
}()
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
@ -575,7 +572,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// of the multipart transaction.
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
return pi, err
}
@ -623,31 +620,10 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
tmpPart := mustGetUUID()
tmpPartPath := path.Join(tmpPart, partSuffix)
// Initialize md5 writer.
md5Writer := md5.New()
writers := []io.Writer{md5Writer}
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
writers = append(writers, sha256Writer)
}
mw := io.MultiWriter(writers...)
var lreader = data
// Limit the reader to its provided size > 0.
if size > 0 {
// This is done so that we can avoid erroneous clients sending
// more data than the set content size.
lreader = io.LimitReader(data, size)
}
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
defer xl.deleteObject(minioMetaTmpBucket, tmpPart)
if size > 0 {
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, size, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil {
if data.Size() > 0 {
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, data.Size(), onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil {
return pi, toObjectErr(pErr, bucket, object)
}
@ -658,37 +634,19 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
return pi, toObjectErr(err, bucket, object)
}
buffer := make([]byte, xlMeta.Erasure.BlockSize, 2*xlMeta.Erasure.BlockSize) // alloc additional space for parity blocks created while erasure coding
file, err := storage.CreateFile(io.TeeReader(lreader, mw), minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
file, err := storage.CreateFile(data, minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if file.Size < size {
if file.Size < data.Size() {
return pi, traceError(IncompleteBody{})
}
// For size == -1, perhaps client is sending in chunked encoding
// set the size as size that was actually written.
if size == -1 {
size = file.Size
}
// Calculate new md5sum.
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
// Returns md5 mismatch.
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return pi, traceError(SHA256Mismatch{})
}
if err = data.Verify(); err != nil {
return pi, toObjectErr(err, bucket, object)
}
// post-upload check (write) lock
@ -730,7 +688,8 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
xlMeta.Stat.ModTime = UTCNow()
// Add the current part.
xlMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
md5Hex := hex.EncodeToString(data.MD5())
xlMeta.AddObjectPart(partID, partSuffix, md5Hex, file.Size)
for i, disk := range onlineDisks {
if disk == OfflineDisk {
@ -762,7 +721,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
return PartInfo{
PartNumber: partID,
LastModified: fi.ModTime,
ETag: newMD5Hex,
ETag: md5Hex,
Size: fi.Size,
}, nil
}

View file

@ -17,9 +17,7 @@
package cmd
import (
"crypto/md5"
"encoding/hex"
"hash"
"io"
"path"
"strconv"
@ -29,7 +27,6 @@ import (
"github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/objcache"
"github.com/minio/sha256-simd"
)
// list all errors which can be ignored in object operations.
@ -117,7 +114,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
}()
objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
objInfo, err := xl.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
@ -432,18 +429,18 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.json` which carries the necessary metadata for future
// object operations.
func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
// This is a special case with size as '0' and object ends with
// a slash separator, we treat it like a valid operation and
// return success.
if isObjectDir(object, size) {
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
return dirObjectInfo(bucket, object, size, metadata), nil
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
}
// Validate put object input args.
@ -466,54 +463,27 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
uniqueID := mustGetUUID()
tempObj := uniqueID
// Initialize md5 writer.
md5Writer := md5.New()
writers := []io.Writer{md5Writer}
var sha256Writer hash.Hash
if sha256sum != "" {
sha256Writer = sha256.New()
writers = append(writers, sha256Writer)
}
// Limit the reader to its provided size if specified.
var reader io.Reader = data
// Proceed to set the cache.
var newBuffer io.WriteCloser
// If caching is enabled, proceed to set the cache.
if size > 0 && xl.objCacheEnabled {
if data.Size() > 0 && xl.objCacheEnabled {
// PutObject invalidates any previously cached object in memory.
xl.objCache.Delete(path.Join(bucket, object))
// Create a new entry in memory of size.
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), size)
if err == nil {
// Create a multi writer to write to both memory and client response.
writers = append(writers, newBuffer)
}
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), data.Size())
// Ignore error if cache is full, proceed to write the object.
if err != nil && err != objcache.ErrCacheFull {
// For any other error return here.
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
}
reader = io.TeeReader(data, newBuffer)
}
mw := io.MultiWriter(writers...)
// Limit the reader to its provided size if specified.
var limitDataReader io.Reader
if size > 0 {
// This is done so that we can avoid erroneous clients sending
// more data than the set content size.
limitDataReader = io.LimitReader(data, size)
} else {
// else we read till EOF.
limitDataReader = data
}
// Tee reader combines incoming data stream and md5, data read from input stream is written to md5.
teeReader := io.TeeReader(limitDataReader, mw)
// Initialize parts metadata
partsMetadata := make([]xlMetaV1, len(xl.storageDisks))
@ -550,7 +520,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
// Calculate the size of the current part, if size is unknown, curPartSize wil be unknown too.
// allowEmptyPart will always be true if this is the first part and false otherwise.
var curPartSize int64
curPartSize, err = getPartSizeFromIdx(size, globalPutPartSize, partIdx)
curPartSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -564,7 +534,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
}
file, erasureErr := storage.CreateFile(io.LimitReader(teeReader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
file, erasureErr := storage.CreateFile(io.LimitReader(reader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
@ -596,7 +566,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
// Check part size for the next index.
var partSize int64
partSize, err = getPartSizeFromIdx(size, globalPutPartSize, partIdx+1)
partSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -605,25 +575,17 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
}
// For size == -1, perhaps client is sending in chunked encoding
// set the size as size that was actually written.
if size == -1 {
size = sizeWritten
} else {
// Check if stored data satisfies what is asked
if sizeWritten < size {
return ObjectInfo{}, traceError(IncompleteBody{})
}
if size := data.Size(); size > 0 && sizeWritten < data.Size() {
return ObjectInfo{}, traceError(IncompleteBody{})
}
// Save additional erasureMetadata.
modTime := UTCNow()
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// Update the md5sum if not set with the newly calculated one.
if len(metadata["etag"]) == 0 {
metadata["etag"] = newMD5Hex
if err = data.Verify(); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
metadata["etag"] = hex.EncodeToString(data.MD5())
// Guess content-type from the extension if possible.
if metadata["content-type"] == "" {
@ -634,22 +596,6 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
}
}
// md5Hex representation.
md5Hex := metadata["etag"]
if md5Hex != "" {
if newMD5Hex != md5Hex {
// Returns md5 mismatch.
return ObjectInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return ObjectInfo{}, traceError(SHA256Mismatch{})
}
}
if xl.isObject(bucket, object) {
// Rename if an object already exists to temporary location.
newUniqueID := mustGetUUID()
@ -670,7 +616,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
// Update `xl.json` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Meta = metadata
partsMetadata[index].Stat.Size = size
partsMetadata[index].Stat.Size = sizeWritten
partsMetadata[index].Stat.ModTime = modTime
}
@ -686,7 +632,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
// Once we have successfully renamed the object, Close the buffer which would
// save the object on cache.
if size > 0 && xl.objCacheEnabled && newBuffer != nil {
if sizeWritten > 0 && xl.objCacheEnabled && newBuffer != nil {
newBuffer.Close()
}

View file

@ -53,12 +53,12 @@ func TestRepeatPutObjectPart(t *testing.T) {
}
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
if err != nil {
t.Fatal(err)
}
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
if err != nil {
t.Fatal(err)
}
@ -92,7 +92,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
}
// Create object "obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject("bucket", "obj", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = xl.PutObject("bucket", "obj", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
@ -128,7 +128,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
bucket := "bucket"
object := "object"
// Create object "obj" under bucket "bucket".
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
@ -143,7 +143,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
}
// Create "obj" under "bucket".
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
@ -178,7 +178,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
bucket := "bucket"
object := "object"
// Create "object" under "bucket".
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
@ -230,7 +230,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
bucket := "bucket"
object := "object"
// Create "object" under "bucket".
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil {
t.Fatal(err)
}
@ -253,7 +253,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
}
// Upload new content to same object "object"
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
err = errorCause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
@ -294,7 +294,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
_, err = obj.PutObject(bucket, object, length, bytes.NewReader(data), nil, "")
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), length, "", ""), nil)
if err != nil {
t.Fatal(err)
}