Allow Compression + encryption (#11103)

This commit is contained in:
Klaus Post 2021-01-05 20:08:35 -08:00 committed by GitHub
parent 97a4c120e9
commit eb9172eecb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 399 additions and 234 deletions

View file

@ -26,19 +26,22 @@ import (
// Config represents the compression settings. // Config represents the compression settings.
type Config struct { type Config struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
Extensions []string `json:"extensions"` AllowEncrypted bool `json:"allow_encryption"`
MimeTypes []string `json:"mime-types"` Extensions []string `json:"extensions"`
MimeTypes []string `json:"mime-types"`
} }
// Compression environment variables // Compression environment variables
const ( const (
Extensions = "extensions" Extensions = "extensions"
MimeTypes = "mime_types" AllowEncrypted = "allow_encryption"
MimeTypes = "mime_types"
EnvCompressState = "MINIO_COMPRESS_ENABLE" EnvCompressState = "MINIO_COMPRESS_ENABLE"
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS" EnvCompressAllowEncryption = "MINIO_COMPRESS_ALLOW_ENCRYPTION"
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES" EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
// Include-list for compression. // Include-list for compression.
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin" DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
@ -52,6 +55,10 @@ var (
Key: config.Enable, Key: config.Enable,
Value: config.EnableOff, Value: config.EnableOff,
}, },
config.KV{
Key: AllowEncrypted,
Value: config.EnableOff,
},
config.KV{ config.KV{
Key: Extensions, Key: Extensions,
Value: DefaultExtensions, Value: DefaultExtensions,
@ -101,6 +108,12 @@ func LookupConfig(kvs config.KVS) (Config, error) {
return cfg, nil return cfg, nil
} }
allowEnc := env.Get(EnvCompressAllowEncryption, kvs.Get(AllowEncrypted))
cfg.AllowEncrypted, err = config.ParseBool(allowEnc)
if err != nil {
return cfg, err
}
compressExtensions := env.Get(EnvCompressExtensions, kvs.Get(Extensions)) compressExtensions := env.Get(EnvCompressExtensions, kvs.Get(Extensions))
compressMimeTypes := env.Get(EnvCompressMimeTypes, kvs.Get(MimeTypes)) compressMimeTypes := env.Get(EnvCompressMimeTypes, kvs.Get(MimeTypes))
compressMimeTypesLegacy := env.Get(EnvCompressMimeTypesLegacy, kvs.Get(MimeTypes)) compressMimeTypesLegacy := env.Get(EnvCompressMimeTypesLegacy, kvs.Get(MimeTypes))

View file

@ -353,9 +353,7 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a // DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
// reader // reader
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset, func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (io.Reader, error) {
length int64, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (
io.Reader, error) {
bucket, object := oi.Bucket, oi.Name bucket, object := oi.Bucket, oi.Name
// Single part case // Single part case

View file

@ -31,7 +31,7 @@ import (
// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup. // Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup.
func TestGetObject(t *testing.T) { func TestGetObject(t *testing.T) {
ExecObjectLayerTest(t, testGetObject) ExecExtendedObjectLayerTest(t, testGetObject)
} }
// ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated. // ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated.

View file

@ -157,7 +157,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
// Wrapper for calling PutObjectPart tests for both Erasure multiple disks and single node setup. // Wrapper for calling PutObjectPart tests for both Erasure multiple disks and single node setup.
func TestObjectAPIPutObjectPart(t *testing.T) { func TestObjectAPIPutObjectPart(t *testing.T) {
ExecObjectLayerTest(t, testObjectAPIPutObjectPart) ExecExtendedObjectLayerTest(t, testObjectAPIPutObjectPart)
} }
// Tests validate correctness of PutObjectPart. // Tests validate correctness of PutObjectPart.
@ -289,7 +289,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
// Wrapper for calling TestListMultipartUploads tests for both Erasure multiple disks and single node setup. // Wrapper for calling TestListMultipartUploads tests for both Erasure multiple disks and single node setup.
func TestListMultipartUploads(t *testing.T) { func TestListMultipartUploads(t *testing.T) {
ExecObjectLayerTest(t, testListMultipartUploads) ExecExtendedObjectLayerTest(t, testListMultipartUploads)
} }
// testListMultipartUploads - Tests validate listing of multipart uploads. // testListMultipartUploads - Tests validate listing of multipart uploads.
@ -1643,7 +1643,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
// Test for validating complete Multipart upload. // Test for validating complete Multipart upload.
func TestObjectCompleteMultipartUpload(t *testing.T) { func TestObjectCompleteMultipartUpload(t *testing.T) {
ExecObjectLayerTest(t, testObjectCompleteMultipartUpload) ExecExtendedObjectLayerTest(t, testObjectCompleteMultipartUpload)
} }
// Tests validate CompleteMultipart functionality. // Tests validate CompleteMultipart functionality.

View file

@ -37,7 +37,7 @@ func md5Header(data []byte) map[string]string {
// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup.
func TestObjectAPIPutObjectSingle(t *testing.T) { func TestObjectAPIPutObjectSingle(t *testing.T) {
ExecObjectLayerTest(t, testObjectAPIPutObject) ExecExtendedObjectLayerTest(t, testObjectAPIPutObject)
} }
// Tests validate correctness of PutObject. // Tests validate correctness of PutObject.

View file

@ -394,9 +394,6 @@ func (o ObjectInfo) IsCompressedOK() (bool, error) {
if !ok { if !ok {
return false, nil return false, nil
} }
if crypto.IsEncrypted(o.UserDefined) {
return true, fmt.Errorf("compression %q and encryption enabled on same object", scheme)
}
switch scheme { switch scheme {
case compressionAlgorithmV1, compressionAlgorithmV2: case compressionAlgorithmV1, compressionAlgorithmV2:
return true, nil return true, nil
@ -415,9 +412,6 @@ func (o ObjectInfo) GetActualETag(h http.Header) string {
// GetActualSize - returns the actual size of the stored object // GetActualSize - returns the actual size of the stored object
func (o ObjectInfo) GetActualSize() (int64, error) { func (o ObjectInfo) GetActualSize() (int64, error) {
if crypto.IsEncrypted(o.UserDefined) {
return o.DecryptedSize()
}
if o.IsCompressed() { if o.IsCompressed() {
sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"] sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
if !ok { if !ok {
@ -429,6 +423,10 @@ func (o ObjectInfo) GetActualSize() (int64, error) {
} }
return size, nil return size, nil
} }
if crypto.IsEncrypted(o.UserDefined) {
return o.DecryptedSize()
}
return o.Size, nil return o.Size, nil
} }
@ -441,7 +439,7 @@ func isCompressible(header http.Header, object string) bool {
globalCompressConfigMu.Unlock() globalCompressConfigMu.Unlock()
_, ok := crypto.IsRequested(header) _, ok := crypto.IsRequested(header)
if !cfg.Enabled || ok || excludeForCompression(header, object, cfg) { if !cfg.Enabled || (ok && !cfg.AllowEncrypted) || excludeForCompression(header, object, cfg) {
return false return false
} }
return true return true
@ -461,16 +459,15 @@ func excludeForCompression(header http.Header, object string, cfg compress.Confi
} }
// Filter compression includes. // Filter compression includes.
if len(cfg.Extensions) == 0 || len(cfg.MimeTypes) == 0 { exclude := len(cfg.Extensions) > 0 || len(cfg.MimeTypes) > 0
return false if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) {
exclude = false
} }
extensions := cfg.Extensions if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) {
mimeTypes := cfg.MimeTypes exclude = false
if hasStringSuffixInSlice(objStr, extensions) || hasPattern(mimeTypes, contentType) {
return false
} }
return true return exclude
} }
// Utility which returns if a string is present in the list. // Utility which returns if a string is present in the list.
@ -520,21 +517,29 @@ func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
} }
// Returns the compressed offset which should be skipped. // Returns the compressed offset which should be skipped.
func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (int64, int64) { // If encrypted offsets are adjusted for encrypted block headers/trailers.
var compressedOffset int64 // Since de-compression is after decryption encryption overhead is only added to compressedOffset.
func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset int64, partSkip int64) {
var skipLength int64 var skipLength int64
var cumulativeActualSize int64 var cumulativeActualSize int64
var firstPartIdx int
if len(objectInfo.Parts) > 0 { if len(objectInfo.Parts) > 0 {
for _, part := range objectInfo.Parts { for i, part := range objectInfo.Parts {
cumulativeActualSize += part.ActualSize cumulativeActualSize += part.ActualSize
if cumulativeActualSize <= offset { if cumulativeActualSize <= offset {
compressedOffset += part.Size compressedOffset += part.Size
} else { } else {
firstPartIdx = i
skipLength = cumulativeActualSize - part.ActualSize skipLength = cumulativeActualSize - part.ActualSize
break break
} }
} }
} }
if isEncryptedMultipart(objectInfo) && firstPartIdx > 0 {
off, _, _, _, _, err := objectInfo.GetDecryptedRange(partNumberToRangeSpec(objectInfo, firstPartIdx))
logger.LogIf(context.Background(), err)
compressedOffset += off
}
return compressedOffset, offset - skipLength return compressedOffset, offset - skipLength
} }
@ -604,9 +609,92 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
isEncrypted = false isEncrypted = false
} }
var skipLen int64 var skipLen int64
// Calculate range to read (different for // Calculate range to read (different for encrypted/compressed objects)
// e.g. encrypted/compressed objects)
switch { switch {
case isCompressed:
// If compressed, we start from the beginning of the part.
// Read the decompressed size from the meta.json.
actualSize, err := oi.GetActualSize()
if err != nil {
return nil, 0, 0, err
}
off, length = int64(0), oi.Size
decOff, decLength := int64(0), actualSize
if rs != nil {
off, length, err = rs.GetOffsetLength(actualSize)
if err != nil {
return nil, 0, 0, err
}
// In case of range based queries on multiparts, the offset and length are reduced.
off, decOff = getCompressedOffsets(oi, off)
decLength = length
length = oi.Size - off
// For negative length we read everything.
if decLength < 0 {
decLength = actualSize - decOff
}
// Reply back invalid range if the input offset and length fall out of range.
if decOff > actualSize || decOff+decLength > actualSize {
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
if isEncrypted {
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
// Attach decrypter on inputReader
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, opts.PartNumber, oi, copySource)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
}
// Decompression reader.
s2Reader := s2.NewReader(inputReader)
// Apply the skipLen and limit on the decompressed stream.
err = s2Reader.Skip(decOff)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
decReader := io.LimitReader(s2Reader, decLength)
if decLength > compReadAheadSize {
rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
if err == nil {
decReader = rah
cFns = append(cFns, func() {
rah.Close()
})
}
}
oi.Size = decLength
// Assemble the GetObjectReader
r = &GetObjectReader{
ObjInfo: oi,
pReader: decReader,
cleanUpFns: cFns,
opts: opts,
}
return r, nil
}
case isEncrypted: case isEncrypted:
var seqNumber uint32 var seqNumber uint32
var partStart int var partStart int
@ -635,8 +723,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
cFns = append(cleanUpFns, cFns...) cFns = append(cleanUpFns, cFns...)
// Attach decrypter on inputReader // Attach decrypter on inputReader
var decReader io.Reader var decReader io.Reader
decReader, err = DecryptBlocksRequestR(inputReader, h, decReader, err = DecryptBlocksRequestR(inputReader, h, seqNumber, partStart, oi, copySource)
off, length, seqNumber, partStart, oi, copySource)
if err != nil { if err != nil {
// Call the cleanup funcs // Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- { for i := len(cFns) - 1; i >= 0; i-- {
@ -659,76 +746,6 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
// decrypted stream // decrypted stream
decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength) decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
// Assemble the GetObjectReader
r = &GetObjectReader{
ObjInfo: oi,
pReader: decReader,
cleanUpFns: cFns,
opts: opts,
}
return r, nil
}
case isCompressed:
// Read the decompressed size from the meta.json.
actualSize, err := oi.GetActualSize()
if err != nil {
return nil, 0, 0, err
}
off, length = int64(0), oi.Size
decOff, decLength := int64(0), actualSize
if rs != nil {
off, length, err = rs.GetOffsetLength(actualSize)
if err != nil {
return nil, 0, 0, err
}
// In case of range based queries on multiparts, the offset and length are reduced.
off, decOff = getCompressedOffsets(oi, off)
decLength = length
length = oi.Size - off
// For negative length we read everything.
if decLength < 0 {
decLength = actualSize - decOff
}
// Reply back invalid range if the input offset and length fall out of range.
if decOff > actualSize || decOff+decLength > actualSize {
return nil, 0, 0, errInvalidRange
}
}
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, PreConditionFailed{}
}
// Decompression reader.
s2Reader := s2.NewReader(inputReader)
// Apply the skipLen and limit on the decompressed stream.
err = s2Reader.Skip(decOff)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
decReader := io.LimitReader(s2Reader, decLength)
if decLength > compReadAheadSize {
rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
if err == nil {
decReader = rah
cFns = append(cFns, func() {
rah.Close()
})
}
}
oi.Size = decLength
// Assemble the GetObjectReader // Assemble the GetObjectReader
r = &GetObjectReader{ r = &GetObjectReader{
ObjInfo: oi, ObjInfo: oi,

View file

@ -317,7 +317,7 @@ func TestIsCompressed(t *testing.T) {
result bool result bool
err bool err bool
}{ }{
{ 0: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV1, UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV1,
"content-type": "application/octet-stream", "content-type": "application/octet-stream",
@ -325,7 +325,7 @@ func TestIsCompressed(t *testing.T) {
}, },
result: true, result: true,
}, },
{ 1: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2, UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2,
"content-type": "application/octet-stream", "content-type": "application/octet-stream",
@ -333,7 +333,7 @@ func TestIsCompressed(t *testing.T) {
}, },
result: true, result: true,
}, },
{ 2: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"X-Minio-Internal-compression": "unknown/compression/type", UserDefined: map[string]string{"X-Minio-Internal-compression": "unknown/compression/type",
"content-type": "application/octet-stream", "content-type": "application/octet-stream",
@ -342,7 +342,7 @@ func TestIsCompressed(t *testing.T) {
result: true, result: true,
err: true, err: true,
}, },
{ 3: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2, UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2,
"content-type": "application/octet-stream", "content-type": "application/octet-stream",
@ -351,9 +351,9 @@ func TestIsCompressed(t *testing.T) {
}, },
}, },
result: true, result: true,
err: true, err: false,
}, },
{ 4: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"X-Minio-Internal-XYZ": "klauspost/compress/s2", UserDefined: map[string]string{"X-Minio-Internal-XYZ": "klauspost/compress/s2",
"content-type": "application/octet-stream", "content-type": "application/octet-stream",
@ -361,7 +361,7 @@ func TestIsCompressed(t *testing.T) {
}, },
result: false, result: false,
}, },
{ 5: {
objInfo: ObjectInfo{ objInfo: ObjectInfo{
UserDefined: map[string]string{"content-type": "application/octet-stream", UserDefined: map[string]string{"content-type": "application/octet-stream",
"etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"},

View file

@ -977,18 +977,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} }
var reader io.Reader var reader io.Reader
var length = srcInfo.Size
// Set the actual size to the decrypted size if encrypted. // Set the actual size to the compressed/decrypted size if encrypted.
actualSize := srcInfo.Size actualSize, err := srcInfo.GetActualSize()
if crypto.IsEncrypted(srcInfo.UserDefined) { if err != nil {
actualSize, err = srcInfo.DecryptedSize() writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
if err != nil { return
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
length = actualSize
} }
length := actualSize
if !cpSrcDstSame { if !cpSrcDstSame {
if err := enforceBucketQuota(ctx, dstBucket, actualSize); err != nil { if err := enforceBucketQuota(ctx, dstBucket, actualSize); err != nil {
@ -1000,8 +996,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var compressMetadata map[string]string var compressMetadata map[string]string
// No need to compress for remote etcd calls // No need to compress for remote etcd calls
// Pass the decompressed stream to such calls. // Pass the decompressed stream to such calls.
isCompressed := objectAPI.IsCompressionSupported() && isCompressible(r.Header, srcObject) && !isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) isDstCompressed := objectAPI.IsCompressionSupported() &&
if isCompressed { isCompressible(r.Header, srcObject) &&
!isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI)
if isDstCompressed {
compressMetadata = make(map[string]string, 2) compressMetadata = make(map[string]string, 2)
// Preserving the compression metadata. // Preserving the compression metadata.
compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
@ -1034,7 +1032,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
_, objectEncryption := crypto.IsRequested(r.Header) _, objectEncryption := crypto.IsRequested(r.Header)
objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined) objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined)
var encMetadata = make(map[string]string) var encMetadata = make(map[string]string)
if objectAPI.IsEncryptionSupported() && !isCompressed { if objectAPI.IsEncryptionSupported() {
// Encryption parameters not applicable for this object. // Encryption parameters not applicable for this object.
if !crypto.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header) { if !crypto.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
@ -1105,8 +1103,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var targetSize int64 var targetSize int64
switch { switch {
case isDstCompressed:
targetSize = -1
case !isSourceEncrypted && !isTargetEncrypted: case !isSourceEncrypted && !isTargetEncrypted:
targetSize = srcInfo.Size targetSize, _ = srcInfo.GetActualSize()
case isSourceEncrypted && isTargetEncrypted: case isSourceEncrypted && isTargetEncrypted:
objInfo := ObjectInfo{Size: actualSize} objInfo := ObjectInfo{Size: actualSize}
targetSize = objInfo.EncryptedSize() targetSize = objInfo.EncryptedSize()
@ -1131,7 +1131,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} }
// do not try to verify encrypted content // do not try to verify encrypted content
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize, globalCLIContext.StrictS3Compat) srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1541,10 +1541,15 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
info := ObjectInfo{Size: size}
wantSize := int64(-1)
if size >= 0 {
info := ObjectInfo{Size: size}
wantSize = info.EncryptedSize()
}
// do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat) hashReader, err = hash.NewReader(reader, wantSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1564,10 +1569,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
} }
switch { switch {
case objInfo.IsCompressed():
if !strings.HasSuffix(objInfo.ETag, "-1") {
objInfo.ETag = objInfo.ETag + "-1"
}
case crypto.IsEncrypted(objInfo.UserDefined): case crypto.IsEncrypted(objInfo.UserDefined):
switch { switch {
case crypto.S3.IsEncrypted(objInfo.UserDefined): case crypto.S3.IsEncrypted(objInfo.UserDefined):
@ -1581,6 +1582,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:] objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:]
} }
} }
case objInfo.IsCompressed():
if !strings.HasSuffix(objInfo.ETag, "-1") {
objInfo.ETag = objInfo.ETag + "-1"
}
} }
if mustReplicate(ctx, r, bucket, object, metadata, "") { if mustReplicate(ctx, r, bucket, object, metadata, "") {
globalReplicationState.queueReplicaTask(objInfo) globalReplicationState.queueReplicaTask(objInfo)
@ -1892,7 +1897,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
actualPartSize := srcInfo.Size actualPartSize := srcInfo.Size
if crypto.IsEncrypted(srcInfo.UserDefined) { if crypto.IsEncrypted(srcInfo.UserDefined) {
actualPartSize, err = srcInfo.DecryptedSize() actualPartSize, err = srcInfo.GetActualSize()
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1991,7 +1996,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
isEncrypted := crypto.IsEncrypted(mi.UserDefined) isEncrypted := crypto.IsEncrypted(mi.UserDefined)
var objectEncryptionKey crypto.ObjectKey var objectEncryptionKey crypto.ObjectKey
if objectAPI.IsEncryptionSupported() && !isCompressed && isEncrypted { if objectAPI.IsEncryptionSupported() && isEncrypted {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) { if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r))
return return
@ -2022,8 +2027,13 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
info := ObjectInfo{Size: length} wantSize := int64(-1)
srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length, globalCLIContext.StrictS3Compat) if length >= 0 {
info := ObjectInfo{Size: length}
wantSize = info.EncryptedSize()
}
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -2226,7 +2236,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
isEncrypted := crypto.IsEncrypted(mi.UserDefined) isEncrypted := crypto.IsEncrypted(mi.UserDefined)
var objectEncryptionKey crypto.ObjectKey var objectEncryptionKey crypto.ObjectKey
if objectAPI.IsEncryptionSupported() && !isCompressed && isEncrypted { if objectAPI.IsEncryptionSupported() && isEncrypted {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) { if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r))
return return
@ -2267,9 +2277,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
info := ObjectInfo{Size: size} wantSize := int64(-1)
if size >= 0 {
info := ObjectInfo{Size: size}
wantSize = info.EncryptedSize()
}
// do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat) hashReader, err = hash.NewReader(reader, wantSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return

View file

@ -327,7 +327,7 @@ func TestAPIGetObjectHandler(t *testing.T) {
defer func() { globalPolicySys = nil }() defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIGetObjectHandler, []string{"GetObject"}) ExecExtendedObjectLayerAPITest(t, testAPIGetObjectHandler, []string{"GetObject"})
} }
func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -651,7 +651,7 @@ func TestAPIGetObjectWithMPHandler(t *testing.T) {
defer func() { globalPolicySys = nil }() defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIGetObjectWithMPHandler, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject"}) ExecExtendedObjectLayerAPITest(t, testAPIGetObjectWithMPHandler, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject"})
} }
func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -849,7 +849,7 @@ func TestAPIGetObjectWithPartNumberHandler(t *testing.T) {
defer func() { globalPolicySys = nil }() defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIGetObjectWithPartNumberHandler, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject"}) ExecExtendedObjectLayerAPITest(t, testAPIGetObjectWithPartNumberHandler, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject"})
} }
func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -971,7 +971,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket
// Wrapper for calling PutObject API handler tests using streaming signature v4 for both Erasure multiple disks and FS single drive setup. // Wrapper for calling PutObject API handler tests using streaming signature v4 for both Erasure multiple disks and FS single drive setup.
func TestAPIPutObjectStreamSigV4Handler(t *testing.T) { func TestAPIPutObjectStreamSigV4Handler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIPutObjectStreamSigV4Handler, []string{"PutObject"}) ExecExtendedObjectLayerAPITest(t, testAPIPutObjectStreamSigV4Handler, []string{"PutObject"})
} }
func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -1289,7 +1289,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
// Wrapper for calling PutObject API handler tests for both Erasure multiple disks and FS single drive setup. // Wrapper for calling PutObject API handler tests for both Erasure multiple disks and FS single drive setup.
func TestAPIPutObjectHandler(t *testing.T) { func TestAPIPutObjectHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIPutObjectHandler, []string{"PutObject"}) ExecExtendedObjectLayerAPITest(t, testAPIPutObjectHandler, []string{"PutObject"})
} }
func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -1538,7 +1538,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// expected. // expected.
func TestAPICopyObjectPartHandlerSanity(t *testing.T) { func TestAPICopyObjectPartHandlerSanity(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"CopyObjectPart"}) ExecExtendedObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"CopyObjectPart"})
} }
func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -1649,7 +1649,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// Wrapper for calling Copy Object Part API handler tests for both Erasure multiple disks and single node setup. // Wrapper for calling Copy Object Part API handler tests for both Erasure multiple disks and single node setup.
func TestAPICopyObjectPartHandler(t *testing.T) { func TestAPICopyObjectPartHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPICopyObjectPartHandler, []string{"CopyObjectPart"}) ExecExtendedObjectLayerAPITest(t, testAPICopyObjectPartHandler, []string{"CopyObjectPart"})
} }
func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -1965,7 +1965,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup. // Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup.
func TestAPICopyObjectHandler(t *testing.T) { func TestAPICopyObjectHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"}) ExecExtendedObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"})
} }
func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -2044,8 +2044,11 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// expected output. // expected output.
expectedRespStatus int expectedRespStatus int
}{ }{
0: {
expectedRespStatus: http.StatusMethodNotAllowed,
},
// Test case - 1, copy metadata from newObject1, ignore request headers. // Test case - 1, copy metadata from newObject1, ignore request headers.
{ 1: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2059,7 +2062,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 2. // Test case - 2.
// Test case with invalid source object. // Test case with invalid source object.
{ 2: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator), copySourceHeader: url.QueryEscape(SlashSeparator),
@ -2071,7 +2074,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 3. // Test case - 3.
// Test case with new object name is same as object to be copied. // Test case with new object name is same as object to be copied.
{ 3: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2084,7 +2087,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 4. // Test case - 4.
// Test case with new object name is same as object to be copied. // Test case with new object name is same as object to be copied.
// But source copy is without leading slash // But source copy is without leading slash
{ 4: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(bucketName + SlashSeparator + objectName),
@ -2097,7 +2100,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 5. // Test case - 5.
// Test case with new object name is same as object to be copied // Test case with new object name is same as object to be copied
// but metadata is updated. // but metadata is updated.
{ 5: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2113,7 +2116,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 6. // Test case - 6.
// Test case with invalid metadata-directive. // Test case with invalid metadata-directive.
{ 6: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2130,7 +2133,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 7. // Test case - 7.
// Test case with new object name is same as object to be copied // Test case with new object name is same as object to be copied
// fail with BadRequest. // fail with BadRequest.
{ 7: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2148,7 +2151,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case with non-existent source file. // Test case with non-existent source file.
// Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`. // Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`.
// Expecting the response status code to http.StatusNotFound (404). // Expecting the response status code to http.StatusNotFound (404).
{ 8: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"),
@ -2162,7 +2165,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case with non-existent source file. // Test case with non-existent source file.
// Case for the purpose of failing `api.ObjectAPI.PutObject`. // Case for the purpose of failing `api.ObjectAPI.PutObject`.
// Expecting the response status code to http.StatusNotFound (404). // Expecting the response status code to http.StatusNotFound (404).
{ 9: {
bucketName: "non-existent-destination-bucket", bucketName: "non-existent-destination-bucket",
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2174,7 +2177,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Test case - 10. // Test case - 10.
// Case with invalid AccessKey. // Case with invalid AccessKey.
{ 10: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: objectName, newObjectName: objectName,
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2184,7 +2187,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusForbidden, expectedRespStatus: http.StatusForbidden,
}, },
// Test case - 11, copy metadata from newObject1 with satisfying modified header. // Test case - 11, copy metadata from newObject1 with satisfying modified header.
{ 11: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2194,7 +2197,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 12, copy metadata from newObject1 with unsatisfying modified header. // Test case - 12, copy metadata from newObject1 with unsatisfying modified header.
{ 12: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2204,7 +2207,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusPreconditionFailed, expectedRespStatus: http.StatusPreconditionFailed,
}, },
// Test case - 13, copy metadata from newObject1 with wrong modified header format // Test case - 13, copy metadata from newObject1 with wrong modified header format
{ 13: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2214,7 +2217,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 14, copy metadata from newObject1 with satisfying unmodified header. // Test case - 14, copy metadata from newObject1 with satisfying unmodified header.
{ 14: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2224,7 +2227,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 15, copy metadata from newObject1 with unsatisfying unmodified header. // Test case - 15, copy metadata from newObject1 with unsatisfying unmodified header.
{ 15: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2234,7 +2237,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusPreconditionFailed, expectedRespStatus: http.StatusPreconditionFailed,
}, },
// Test case - 16, copy metadata from newObject1 with incorrect unmodified header format. // Test case - 16, copy metadata from newObject1 with incorrect unmodified header format.
{ 16: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
@ -2244,7 +2247,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 17, copy metadata from newObject1 with null versionId // Test case - 17, copy metadata from newObject1 with null versionId
{ 17: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null", copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null",
@ -2253,7 +2256,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 18, copy metadata from newObject1 with non null versionId // Test case - 18, copy metadata from newObject1 with non null versionId
{ 18: {
bucketName: bucketName, bucketName: bucketName,
newObjectName: "newObject1", newObjectName: "newObject1",
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17",
@ -2273,7 +2276,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
0, nil, testCase.accessKey, testCase.secretKey, nil) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i, err)
} }
// "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied. // "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied.
if testCase.copySourceHeader != "" { if testCase.copySourceHeader != "" {
@ -2303,25 +2306,35 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status. // Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus { if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i, instanceType, testCase.expectedRespStatus, rec.Code)
continue continue
} }
if rec.Code == http.StatusOK { if rec.Code == http.StatusOK {
var cpObjResp CopyObjectResponse var cpObjResp CopyObjectResponse
if err = xml.Unmarshal(rec.Body.Bytes(), &cpObjResp); err != nil { if err = xml.Unmarshal(rec.Body.Bytes(), &cpObjResp); err != nil {
t.Fatalf("Test %d: %s: Failed to parse the CopyObjectResult response: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to parse the CopyObjectResult response: <ERROR> %s", i, instanceType, err)
} }
// See if the new object is formed. // See if the new object is formed.
// testing whether the copy was successful. // testing whether the copy was successful.
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts) // Note that this goes directly to the file system,
if err != nil { // so encryption/compression may interfere at some point.
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
globalCompressConfigMu.Lock()
cfg := globalCompressConfig
globalCompressConfigMu.Unlock()
if !cfg.Enabled {
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts)
if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
}
if !bytes.Equal(bytesData[0].byteData, buffers[0].Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the copied object doesn't match the original one.", i, instanceType)
}
buffers[0].Reset()
} else {
t.Log("object not validated due to compression")
} }
if !bytes.Equal(bytesData[0].byteData, buffers[0].Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the copied object doesn't match the original one.", i+1, instanceType)
}
buffers[0].Reset()
} }
// Verify response of the V2 signed HTTP request. // Verify response of the V2 signed HTTP request.
@ -2330,7 +2343,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
reqV2, err = newTestRequest(http.MethodPut, getCopyObjectURL("", testCase.bucketName, testCase.newObjectName), 0, nil) reqV2, err = newTestRequest(http.MethodPut, getCopyObjectURL("", testCase.bucketName, testCase.newObjectName), 0, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i, err)
} }
// "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied. // "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied.
if testCase.copySourceHeader != "" { if testCase.copySourceHeader != "" {
@ -2366,7 +2379,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// Call the ServeHTTP to execute the handler. // Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2) apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus { if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i, instanceType, testCase.expectedRespStatus, recV2.Code)
} }
} }
@ -3304,7 +3317,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
// when the request signature type is `streaming signature`. // when the request signature type is `streaming signature`.
func TestAPIPutObjectPartHandlerStreaming(t *testing.T) { func TestAPIPutObjectPartHandlerStreaming(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIPutObjectPartHandlerStreaming, []string{"NewMultipart", "PutObjectPart"}) ExecExtendedObjectLayerAPITest(t, testAPIPutObjectPartHandlerStreaming, []string{"NewMultipart", "PutObjectPart"})
} }
func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -3392,7 +3405,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
// for variety of inputs. // for variety of inputs.
func TestAPIPutObjectPartHandler(t *testing.T) { func TestAPIPutObjectPartHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIPutObjectPartHandler, []string{"PutObjectPart"}) ExecExtendedObjectLayerAPITest(t, testAPIPutObjectPartHandler, []string{"PutObjectPart"})
} }
func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
@ -3797,7 +3810,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
// for variety of success/failure cases. // for variety of success/failure cases.
func TestAPIListObjectPartsHandler(t *testing.T) { func TestAPIListObjectPartsHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIListObjectPartsHandler, []string{"ListObjectParts"}) ExecExtendedObjectLayerAPITest(t, testAPIListObjectPartsHandler, []string{"ListObjectParts"})
} }
func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,

View file

@ -21,10 +21,12 @@ import (
"context" "context"
"io" "io"
"math/rand" "math/rand"
"os"
"strconv" "strconv"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/crypto"
) )
// Return pointer to testOneByteReadEOF{} // Return pointer to testOneByteReadEOF{}
@ -68,10 +70,8 @@ func (r *testOneByteReadNoEOF) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
type ObjectLayerAPISuite struct{}
// Wrapper for calling testMakeBucket for both Erasure and FS. // Wrapper for calling testMakeBucket for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestMakeBucket(t *testing.T) { func TestMakeBucket(t *testing.T) {
ExecObjectLayerTest(t, testMakeBucket) ExecObjectLayerTest(t, testMakeBucket)
} }
@ -84,8 +84,8 @@ func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// Wrapper for calling testMultipartObjectCreation for both Erasure and FS. // Wrapper for calling testMultipartObjectCreation for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) { func TestMultipartObjectCreation(t *testing.T) {
ExecObjectLayerTest(t, testMultipartObjectCreation) ExecExtendedObjectLayerTest(t, testMultipartObjectCreation)
} }
// Tests validate creation of part files during Multipart operation. // Tests validate creation of part files during Multipart operation.
@ -128,7 +128,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
} }
// Wrapper for calling testMultipartObjectAbort for both Erasure and FS. // Wrapper for calling testMultipartObjectAbort for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) { func TestMultipartObjectAbort(t *testing.T) {
ExecObjectLayerTest(t, testMultipartObjectAbort) ExecObjectLayerTest(t, testMultipartObjectAbort)
} }
@ -173,8 +173,8 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
} }
// Wrapper for calling testMultipleObjectCreation for both Erasure and FS. // Wrapper for calling testMultipleObjectCreation for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) { func TestMultipleObjectCreation(t *testing.T) {
ExecObjectLayerTest(t, testMultipleObjectCreation) ExecExtendedObjectLayerTest(t, testMultipleObjectCreation)
} }
// Tests validate object creation. // Tests validate object creation.
@ -230,7 +230,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
} }
// Wrapper for calling TestPaging for both Erasure and FS. // Wrapper for calling TestPaging for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestPaging(t *testing.T) { func TestPaging(t *testing.T) {
ExecObjectLayerTest(t, testPaging) ExecObjectLayerTest(t, testPaging)
} }
@ -434,7 +434,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// Wrapper for calling testObjectOverwriteWorks for both Erasure and FS. // Wrapper for calling testObjectOverwriteWorks for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(t *testing.T) { func TestObjectOverwriteWorks(t *testing.T) {
ExecObjectLayerTest(t, testObjectOverwriteWorks) ExecObjectLayerTest(t, testObjectOverwriteWorks)
} }
@ -471,7 +471,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
} }
// Wrapper for calling testNonExistantBucketOperations for both Erasure and FS. // Wrapper for calling testNonExistantBucketOperations for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) { func TestNonExistantBucketOperations(t *testing.T) {
ExecObjectLayerTest(t, testNonExistantBucketOperations) ExecObjectLayerTest(t, testNonExistantBucketOperations)
} }
@ -488,7 +488,7 @@ func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t Tes
} }
// Wrapper for calling testBucketRecreateFails for both Erasure and FS. // Wrapper for calling testBucketRecreateFails for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestBucketRecreateFails(t *testing.T) { func TestBucketRecreateFails(t *testing.T) {
ExecObjectLayerTest(t, testBucketRecreateFails) ExecObjectLayerTest(t, testBucketRecreateFails)
} }
@ -508,9 +508,68 @@ func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHand
} }
} }
func execExtended(t *testing.T, fn func(t *testing.T)) {
// Exec with default settings...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = false
globalCompressConfigMu.Unlock()
t.Run("default", func(t *testing.T) {
fn(t)
})
if testing.Short() {
return
}
// Enable compression and exec...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = true
globalCompressConfig.MimeTypes = nil
globalCompressConfig.Extensions = nil
globalCompressConfigMu.Unlock()
t.Run("compressed", func(t *testing.T) {
fn(t)
})
globalAutoEncryption = true
os.Setenv("MINIO_KMS_MASTER_KEY", "my-minio-key:6368616e676520746869732070617373776f726420746f206120736563726574")
defer os.Setenv("MINIO_KMS_MASTER_KEY", "")
var err error
GlobalKMS, err = crypto.NewKMS(crypto.KMSConfig{})
if err != nil {
t.Fatal(err)
}
t.Run("encrypted", func(t *testing.T) {
fn(t)
})
// Enable compression of encrypted and exec...
globalCompressConfigMu.Lock()
globalCompressConfig.AllowEncrypted = true
globalCompressConfigMu.Unlock()
t.Run("compressed+encrypted", func(t *testing.T) {
fn(t)
})
// Reset...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = false
globalCompressConfig.AllowEncrypted = false
globalCompressConfigMu.Unlock()
globalAutoEncryption = false
}
// ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed.
// This can be used to test functionality when reading and writing data.
func ExecExtendedObjectLayerTest(t *testing.T, objTest objTestType) {
execExtended(t, func(t *testing.T) {
ExecObjectLayerTest(t, objTest)
})
}
// Wrapper for calling testPutObject for both Erasure and FS. // Wrapper for calling testPutObject for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestPutObject(t *testing.T) { func TestPutObject(t *testing.T) {
ExecObjectLayerTest(t, testPutObject) ExecExtendedObjectLayerTest(t, testPutObject)
} }
// Tests validate PutObject without prefix. // Tests validate PutObject without prefix.
@ -553,8 +612,8 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// Wrapper for calling testPutObjectInSubdir for both Erasure and FS. // Wrapper for calling testPutObjectInSubdir for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(t *testing.T) { func TestPutObjectInSubdir(t *testing.T) {
ExecObjectLayerTest(t, testPutObjectInSubdir) ExecExtendedObjectLayerTest(t, testPutObjectInSubdir)
} }
// Tests validate PutObject with subdirectory prefix. // Tests validate PutObject with subdirectory prefix.
@ -585,7 +644,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
} }
// Wrapper for calling testListBuckets for both Erasure and FS. // Wrapper for calling testListBuckets for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestListBuckets(t *testing.T) { func TestListBuckets(t *testing.T) {
ExecObjectLayerTest(t, testListBuckets) ExecObjectLayerTest(t, testListBuckets)
} }
@ -644,7 +703,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// Wrapper for calling testListBucketsOrder for both Erasure and FS. // Wrapper for calling testListBucketsOrder for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestListBucketsOrder(t *testing.T) { func TestListBucketsOrder(t *testing.T) {
ExecObjectLayerTest(t, testListBucketsOrder) ExecObjectLayerTest(t, testListBucketsOrder)
} }
@ -678,7 +737,7 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler
} }
// Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS. // Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestListObjectsTestsForNonExistantBucket(t *testing.T) { func TestListObjectsTestsForNonExistantBucket(t *testing.T) {
ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket) ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket)
} }
@ -700,7 +759,7 @@ func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType stri
} }
// Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS. // Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(t *testing.T) { func TestNonExistantObjectInBucket(t *testing.T) {
ExecObjectLayerTest(t, testNonExistantObjectInBucket) ExecObjectLayerTest(t, testNonExistantObjectInBucket)
} }
@ -716,8 +775,8 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestE
t.Fatalf("%s: Expected error but found nil", instanceType) t.Fatalf("%s: Expected error but found nil", instanceType)
} }
if isErrObjectNotFound(err) { if isErrObjectNotFound(err) {
if err.Error() != "Object not found: bucket#dir1" { if err.Error() != "Object not found: bucket/dir1" {
t.Errorf("%s: Expected the Error message to be `%s`, but instead found `%s`", instanceType, "Object not found: bucket#dir1", err.Error()) t.Errorf("%s: Expected the Error message to be `%s`, but instead found `%s`", instanceType, "Object not found: bucket/dir1", err.Error())
} }
} else { } else {
if err.Error() != "fails" { if err.Error() != "fails" {
@ -727,7 +786,7 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestE
} }
// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both Erasure and FS. // Wrapper for calling testGetDirectoryReturnsObjectNotFound for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T) { func TestGetDirectoryReturnsObjectNotFound(t *testing.T) {
ExecObjectLayerTest(t, testGetDirectoryReturnsObjectNotFound) ExecObjectLayerTest(t, testGetDirectoryReturnsObjectNotFound)
} }
@ -770,7 +829,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
} }
// Wrapper for calling testContentType for both Erasure and FS. // Wrapper for calling testContentType for both Erasure and FS.
func (s *ObjectLayerAPISuite) TestContentType(t *testing.T) { func TestContentType(t *testing.T) {
ExecObjectLayerTest(t, testContentType) ExecObjectLayerTest(t, testContentType)
} }

View file

@ -1866,6 +1866,14 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
removeRoots(append(erasureDisks, fsDir)) removeRoots(append(erasureDisks, fsDir))
} }
// ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed.
// This can be used to test functionality when reading and writing data.
func ExecExtendedObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) {
execExtended(t, func(t *testing.T) {
ExecObjectLayerAPITest(t, objAPITest, endpoints)
})
}
// function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests. // function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests.
type objAPITestType func(obj ObjectLayer, instanceType string, bucketName string, type objAPITestType func(obj ObjectLayer, instanceType string, bucketName string,
apiRouter http.Handler, credentials auth.Credentials, t *testing.T) apiRouter http.Handler, credentials auth.Credentials, t *testing.T)

View file

@ -26,7 +26,7 @@ Once the header is validated, we proceed to the actual data structure of the `xl
- LegacyObjectType (preserves existing deployments and older xl.json format) - LegacyObjectType (preserves existing deployments and older xl.json format)
- DeleteMarker (a versionId to capture the DELETE sequences implemented primarily for AWS spec compatibility) - DeleteMarker (a versionId to capture the DELETE sequences implemented primarily for AWS spec compatibility)
A sample msgpack-JSON `xl.meta`, you can debug the content inside `xl.meta` using [xl-meta-to-json.go](https://github.com/minio/minio/blob/master/docs/bucket/versioning/xl-meta-to-json.go) program. A sample msgpack-JSON `xl.meta`, you can debug the content inside `xl.meta` using [xl-meta.go](https://github.com/minio/minio/blob/master/docs/bucket/versioning/xl-meta.go) program.
```json ```json
{ {
"Versions": [ "Versions": [

View file

@ -56,11 +56,12 @@ GLOBAL FLAGS:
} }
app.Action = func(c *cli.Context) error { app.Action = func(c *cli.Context) error {
if !c.Args().Present() { files := c.Args()
cli.ShowAppHelp(c) if len(files) == 0 {
return nil // If no args, assume xl.meta
files = []string{"xl.meta"}
} }
for _, file := range c.Args() { for _, file := range files {
var r io.Reader var r io.Reader
switch file { switch file {
case "-": case "-":

View file

@ -1,10 +1,19 @@
# Compression Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) # Compression Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
MinIO server allows streaming compression to ensure efficient disk space usage. Compression happens inflight, i.e objects are compressed before being written to disk(s). MinIO uses [`klauspost/compress/s2`](https://github.com/klauspost/compress/tree/master/s2) streaming compression due to its stability and performance. MinIO server allows streaming compression to ensure efficient disk space usage.
Compression happens inflight, i.e objects are compressed before being written to disk(s).
MinIO uses [`klauspost/compress/s2`](https://github.com/klauspost/compress/tree/master/s2)
streaming compression due to its stability and performance.
This algorithm is specifically optimized for machine generated content. Write throughput is typically at least 300MB/s per CPU core. Decompression speed is typically at least 1GB/s. This algorithm is specifically optimized for machine generated content.
This means that in cases where raw IO is below these numbers compression will not only reduce disk usage but also help increase system throughput. Write throughput is typically at least 500MB/s per CPU core,
Typically enabling compression on spinning disk systems will increase speed when the content can be compressed. and scales with the number of available CPU cores.
Decompression speed is typically at least 1GB/s.
This means that in cases where raw IO is below these numbers
compression will not only reduce disk usage but also help increase system throughput.
Typically, enabling compression on spinning disk systems
will increase speed when the content can be compressed.
## Get Started ## Get Started
@ -14,40 +23,71 @@ Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/docs/minio-quicksta
### 2. Run MinIO with compression ### 2. Run MinIO with compression
Compression can be enabled by updating the `compress` config settings for MinIO server config. Config `compress` settings take extensions and mime-types to be compressed. Compression can be enabled by updating the `compress` config settings for MinIO server config.
Config `compress` settings take extensions and mime-types to be compressed.
``` ```bash
$ mc admin config get myminio compression ~ mc admin config get myminio compression
compression extensions=".txt,.log,.csv,.json,.tar,.xml,.bin" mime_types="text/*,application/json,application/xml" compression extensions=".txt,.log,.csv,.json,.tar,.xml,.bin" mime_types="text/*,application/json,application/xml"
``` ```
Default config includes most common highly compressible content extensions and mime-types. Default config includes most common highly compressible content extensions and mime-types.
``` ```bash
$ mc admin config set myminio compression extensions=".pdf" mime_types="application/pdf" ~ mc admin config set myminio compression extensions=".pdf" mime_types="application/pdf"
``` ```
To show help on setting compression config values. To show help on setting compression config values.
``` ```bash
~ mc admin config set myminio compression ~ mc admin config set myminio compression
``` ```
To enable compression for all content, with default extensions and mime-types. To enable compression for all content, no matter the extension and content type
``` (except for the default excluded types) set BOTH extensions and mime types to empty.
~ mc admin config set myminio compression enable="on"
```bash
~ mc admin config set myminio compression enable="on" extensions="" mime_types=""
``` ```
The compression settings may also be set through environment variables. When set, environment variables override the defined `compress` config settings in the server config. The compression settings may also be set through environment variables.
When set, environment variables override the defined `compress` config settings in the server config.
```bash ```bash
export MINIO_COMPRESS="on" export MINIO_COMPRESS="on"
export MINIO_COMPRESS_EXTENSIONS=".pdf,.doc" export MINIO_COMPRESS_EXTENSIONS=".txt,.log,.csv,.json,.tar,.xml,.bin"
export MINIO_COMPRESS_MIME_TYPES="application/pdf" export MINIO_COMPRESS_MIME_TYPES="text/*,application/json,application/xml"
``` ```
### 3. Note ### 3. Compression + Encryption
- Already compressed objects are not fit for compression since they do not have compressible patterns. Such objects do not produce efficient [`LZ compression`](https://en.wikipedia.org/wiki/LZ77_and_LZ78) which is a fitness factor for a lossless data compression. Below is a list of common files and content-types which are not suitable for compression. Combining encryption and compression is not safe in all setups.
This is particularly so if the compression ratio of your content reveals information about it.
See [CRIME TLS](https://en.wikipedia.org/wiki/CRIME) as an example of this.
Therefore, compression is disabled when encrypting by default, and must be enabled separately.
Consult our security experts on [SUBNET](https://min.io/pricing) to help you evaluate if
your setup can use this feature combination safely.
To enable compression+encryption use:
```bash
~ mc admin config set myminio compression allow_encryption=on
```
Or alternatively through the environment variable `MINIO_COMPRESS_ALLOW_ENCRYPTION=on`.
### 4. Excluded Types
- Already compressed objects are not fit for compression since they do not have compressible patterns.
Such objects do not produce efficient [`LZ compression`](https://en.wikipedia.org/wiki/LZ77_and_LZ78)
which is a fitness factor for a lossless data compression.
Pre-compressed input typically compresses in excess of 2GiB/s per core,
so performance impact should be minimal even if precompressed data is re-compressed.
Decompressing incompressible data has no significant performance impact.
Below is a list of common files and content-types which are typically not suitable for compression.
- Extensions - Extensions
@ -72,15 +112,17 @@ export MINIO_COMPRESS_MIME_TYPES="application/pdf"
| `application/x-compress` | | `application/x-compress` |
| `application/x-xz` | | `application/x-xz` |
All files with these extensions and mime types are excluded from compression, even if compression is enabled for all types. All files with these extensions and mime types are excluded from compression,
even if compression is enabled for all types.
- MinIO does not support encryption with compression because compression and encryption together potentially enables room for side channel attacks like [`CRIME and BREACH`](https://blog.minio.io/c-e-compression-encryption-cb6b7f04a369) ### 5. Notes
- MinIO does not support compression for Gateway (Azure/GCS/NAS) implementations. - MinIO does not support compression for Gateway (Azure/GCS/NAS) implementations.
## To test the setup ## To test the setup
To test this setup, practice put calls to the server using `mc` and use `mc ls` on the data directory to view the size of the object. To test this setup, practice put calls to the server using `mc` and use `mc ls` on
the data directory to view the size of the object.
## Explore Further ## Explore Further