Add double encryption at S3 gateway. (#6423)

This PR adds pass-through, single encryption at gateway and double
encryption support (gateway encryption with pass through of SSE
headers to backend).

If KMS is set up (either with Vault as KMS or using
MINIO_SSE_MASTER_KEY),gateway will automatically perform
single encryption. If MINIO_GATEWAY_SSE is set up in addition to
Vault KMS, double encryption is performed.When neither KMS nor
MINIO_GATEWAY_SSE is set, do a pass through to backend.

When double encryption is specified, MINIO_GATEWAY_SSE can be set to
"C" for SSE-C encryption at gateway and backend, "S3" for SSE-S3
encryption at gateway/backend or both to support more than one option.

Fixes #6323, #6696
This commit is contained in:
poornas 2019-01-05 14:16:43 -08:00 committed by kannappanr
parent 2d19011a1d
commit 5a80cbec2a
59 changed files with 1902 additions and 196 deletions

View file

@ -133,7 +133,7 @@ func (ahs *allHealState) periodicHealSeqsClean() {
}
}
ahs.Unlock()
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
// server could be restarting - need
// to exit immediately
return

View file

@ -26,14 +26,19 @@ import (
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router) {
func registerAPIRouter(router *mux.Router, encryptionEnabled bool) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
}
// API Router

View file

@ -486,7 +486,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -627,9 +627,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
var reader io.Reader
@ -656,7 +664,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, metadata, ObjectOptions{})
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, metadata, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return

View file

@ -40,6 +40,14 @@ func RemoveSensitiveEntries(metadata map[string]string) { // The functions is te
delete(metadata, SSECopyKey)
}
// RemoveSSEHeaders removes all crypto-specific SSE
// header entries from the metadata map.
func RemoveSSEHeaders(metadata map[string]string) {
delete(metadata, SSEHeader)
delete(metadata, SSECKeyMD5)
delete(metadata, SSECAlgorithm)
}
// RemoveInternalEntries removes all crypto-specific internal
// metadata entries from the metadata map.
func RemoveInternalEntries(metadata map[string]string) {

View file

@ -91,7 +91,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
appendFileMap: make(map[string]*fsAppendFile),
}
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
cacheFS := cacheFSObjects{
FSObjects: fsObjects,
@ -158,7 +158,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)

View file

@ -134,7 +134,7 @@ func TestCacheExclusion(t *testing.T) {
t.Fatal(err)
}
cobj := cobjects.(*cacheObjects)
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
testCases := []struct {
bucketName string
objectName string

View file

@ -99,7 +99,7 @@ func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object,
return
}
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
func (api *DummyObjectLayer) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
return
}

View file

@ -29,6 +29,7 @@ import (
"path"
"strconv"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
@ -55,11 +56,11 @@ const (
// SSEIVSize is the size of the IV data
SSEIVSize = 32 // 32 bytes
// SSE dare package block size.
sseDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
// SSEDAREPackageBlockSize - SSE dare package block size.
SSEDAREPackageBlockSize = 64 * 1024 // 64KiB bytes
// SSE dare package meta padding bytes.
sseDAREPackageMetaSize = 32 // 32 bytes
// SSEDAREPackageMetaSize - SSE dare package meta padding bytes.
SSEDAREPackageMetaSize = 32 // 32 bytes
)
@ -158,14 +159,14 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
crypto.SSEC.CreateMetadata(metadata, sealedKey)
return nil
case crypto.S3.IsEncrypted(metadata):
if globalKMS == nil {
if GlobalKMS == nil {
return errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
if err != nil {
return err
}
oldKey, err := globalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
oldKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
@ -174,7 +175,7 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
return err
}
newKey, encKey, err := globalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
newKey, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return err
}
@ -187,10 +188,10 @@ func rotateKey(oldKey []byte, newKey []byte, bucket, object string, metadata map
func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]string, sseS3 bool) ([]byte, error) {
var sealedKey crypto.SealedKey
if sseS3 {
if globalKMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
key, encKey, err := globalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
key, encKey, err := GlobalKMS.GenerateKey(globalKMSKeyID, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
@ -279,7 +280,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
default:
return nil, errObjectTampered
case crypto.S3.IsEncrypted(metadata):
if globalKMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
@ -287,7 +288,7 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
if err != nil {
return nil, err
}
extKey, err := globalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
@ -395,7 +396,6 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
io.Reader, error) {
bucket, object := oi.Bucket, oi.Name
// Single part case
if !isEncryptedMultipart(oi) {
var reader io.Reader
@ -411,8 +411,8 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
return reader, nil
}
partDecRelOffset := int64(seqNumber) * sseDAREPackageBlockSize
partEncRelOffset := int64(seqNumber) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
partDecRelOffset := int64(seqNumber) * SSEDAREPackageBlockSize
partEncRelOffset := int64(seqNumber) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
w := &DecryptBlocksReader{
reader: inputReader,
@ -477,7 +477,7 @@ type DecryptBlocksReader struct {
// Current part index
partIndex int
// Parts information
parts []objectPartInfo
parts []ObjectPartInfo
header http.Header
bucket, object string
metadata map[string]string
@ -592,7 +592,7 @@ type DecryptBlocksWriter struct {
// Current part index
partIndex int
// Parts information
parts []objectPartInfo
parts []ObjectPartInfo
req *http.Request
bucket, object string
metadata map[string]string
@ -743,6 +743,7 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
}
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
var partStartIndex int
var partStartOffset = startOffset
// Skip parts until final offset maps to a particular part offset.
@ -765,8 +766,8 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
partStartOffset -= int64(decryptedSize)
}
startSeqNum := partStartOffset / sseDAREPackageBlockSize
partEncRelOffset := int64(startSeqNum) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
startSeqNum := partStartOffset / SSEDAREPackageBlockSize
partEncRelOffset := int64(startSeqNum) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
w := &DecryptBlocksWriter{
writer: client,
@ -857,9 +858,9 @@ func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (u
// getEncryptedSinglePartOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo) (seqNumber uint32, encOffset int64, encLength int64) {
onePkgSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
onePkgSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
seqNumber = uint32(offset / sseDAREPackageBlockSize)
seqNumber = uint32(offset / SSEDAREPackageBlockSize)
encOffset = int64(seqNumber) * onePkgSize
// The math to compute the encrypted length is always
// originalLength i.e (offset+length-1) to be divided under
@ -867,10 +868,10 @@ func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo
// block. This is then multiplied by final package size which
// is basically 64KiB + 32. Finally negate the encrypted offset
// to get the final encrypted length on disk.
encLength = ((offset+length)/sseDAREPackageBlockSize)*onePkgSize - encOffset
encLength = ((offset+length)/SSEDAREPackageBlockSize)*onePkgSize - encOffset
// Check for the remainder, to figure if we need one extract package to read from.
if (offset+length)%sseDAREPackageBlockSize > 0 {
if (offset+length)%SSEDAREPackageBlockSize > 0 {
encLength += onePkgSize
}
@ -1041,11 +1042,11 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
// partStart is always found in the loop above,
// because off is validated.
sseDAREEncPackageBlockSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
startPkgNum := (off - cumulativeSum) / sseDAREPackageBlockSize
sseDAREEncPackageBlockSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
startPkgNum := (off - cumulativeSum) / SSEDAREPackageBlockSize
// Now we can calculate the number of bytes to skip
skipLen = (off - cumulativeSum) % sseDAREPackageBlockSize
skipLen = (off - cumulativeSum) % SSEDAREPackageBlockSize
encOff = encCumulativeSum + startPkgNum*sseDAREEncPackageBlockSize
// Locate the part containing the end of the required range
@ -1062,7 +1063,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
}
// partEnd is always found in the loop above, because off and
// length are validated.
endPkgNum := (endOffset - cumulativeSum) / sseDAREPackageBlockSize
endPkgNum := (endOffset - cumulativeSum) / SSEDAREPackageBlockSize
// Compute endEncOffset with one additional DARE package (so
// we read the package containing the last desired byte).
endEncOffset := encCumulativeSum + (endPkgNum+1)*sseDAREEncPackageBlockSize
@ -1157,3 +1158,113 @@ func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, e
}
return
}
// The customer key in the header is used by the gateway for encryption in the case of
// s3 gateway double encryption. A new client key is derived from the customer provided
// key to be sent to the s3 backend for encryption at the backend.
func deriveClientKey(clientKey [32]byte, bucket, object string) [32]byte {
var key [32]byte
mac := hmac.New(sha256.New, clientKey[:])
mac.Write([]byte(crypto.SSEC.String()))
mac.Write([]byte(path.Join(bucket, object)))
mac.Sum(key[:0])
return key
}
// extract encryption options for pass through to backend in the case of gateway
func extractEncryptionOption(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) {
var clientKey [32]byte
var sse encrypt.ServerSide
if copySource {
if crypto.SSECopy.IsRequested(header) {
clientKey, err = crypto.SSECopy.ParseHTTP(header)
if err != nil {
return
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
}
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse)}, nil
}
return
}
if crypto.SSEC.IsRequested(header) {
clientKey, err = crypto.SSEC.ParseHTTP(header)
if err != nil {
return
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
}
return ObjectOptions{ServerSideEncryption: sse}, nil
}
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE()}, nil
}
return opts, nil
}
// get ObjectOptions for GET calls from encryption headers
func getEncryptionOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
var (
encryption encrypt.ServerSide
opts ObjectOptions
)
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
key, err := crypto.SSEC.ParseHTTP(r.Header)
if err != nil {
return opts, err
}
derivedKey := deriveClientKey(key, bucket, object)
encryption, err = encrypt.NewSSEC(derivedKey[:])
logger.CriticalIf(ctx, err)
return ObjectOptions{ServerSideEncryption: encryption}, nil
}
// default case of passing encryption headers to backend
return extractEncryptionOption(r.Header, false, nil)
}
// get ObjectOptions for PUT calls from encryption headers
func putEncryptionOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE()}, nil
}
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
return getEncryptionOpts(ctx, r, bucket, object)
}
// default case of passing encryption headers to backend
return extractEncryptionOption(r.Header, false, metadata)
}
// get ObjectOptions for Copy calls for encryption headers provided on the target side
func copyDstEncryptionOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
return putEncryptionOpts(ctx, r, bucket, object, metadata)
}
// get ObjectOptions for Copy calls for encryption headers provided on the source side
func copySrcEncryptionOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) {
var (
ssec encrypt.ServerSide
opts ObjectOptions
)
if GlobalGatewaySSE.SSEC() && crypto.SSECopy.IsRequested(r.Header) {
key, err := crypto.SSECopy.ParseHTTP(r.Header)
if err != nil {
return opts, err
}
derivedKey := deriveClientKey(key, bucket, object)
ssec, err = encrypt.NewSSEC(derivedKey[:])
if err != nil {
return opts, err
}
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(ssec)}, nil
}
// default case of passing encryption headers to backend
return extractEncryptionOption(r.Header, true, nil)
}

View file

@ -18,10 +18,12 @@ package cmd
import (
"bytes"
"encoding/base64"
"net/http"
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/sio"
)
@ -355,7 +357,7 @@ func TestGetDecryptedRange_Issue50(t *testing.T) {
"content-type": "application/octet-stream",
"etag": "166b1545b4c1535294ee0686678bea8c-2",
},
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Number: 1,
Name: "part.1",
@ -503,7 +505,7 @@ func TestGetDecryptedRange(t *testing.T) {
var (
// make a multipart object-info given part sizes
mkMPObj = func(sizes []int64) ObjectInfo {
r := make([]objectPartInfo, len(sizes))
r := make([]ObjectPartInfo, len(sizes))
sum := int64(0)
for i, s := range sizes {
r[i].Number = i
@ -675,3 +677,84 @@ func TestGetDecryptedRange(t *testing.T) {
}
}
var extractEncryptionOptionTests = []struct {
headers http.Header
copySource bool
metadata map[string]string
encryptionType encrypt.Type
err error
}{
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 0
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: "",
err: nil}, // 1
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"Mz"},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: crypto.ErrInvalidCustomerKey}, // 2
{headers: http.Header{crypto.SSEHeader: []string{"AES256"}},
copySource: false,
metadata: nil,
encryptionType: encrypt.S3,
err: nil}, // 3
{headers: http.Header{},
copySource: false,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: encrypt.S3,
err: nil}, // 4
{headers: http.Header{},
copySource: true,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: "",
err: nil}, // 5
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 6
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: nil}, // 7
}
func TestExtractEncryptionOptions(t *testing.T) {
for i, test := range extractEncryptionOptionTests {
opts, err := extractEncryptionOption(test.headers, test.copySource, test.metadata)
if test.err != err {
t.Errorf("Case %d: expected err: %v , actual err: %v", i, test.err, err)
}
if err == nil {
if opts.ServerSideEncryption == nil && test.encryptionType != "" {
t.Errorf("Case %d: expected opts to be of %v encryption type", i, test.encryptionType)
}
if opts.ServerSideEncryption != nil && test.encryptionType != opts.ServerSideEncryption.Type() {
t.Errorf("Case %d: expected opts to have encryption type %v but was %v ", i, test.encryptionType, opts.ServerSideEncryption.Type())
}
}
}
}

View file

@ -136,13 +136,13 @@ func (env environment) LookupKMSConfig(config crypto.KMSConfig) (err error) {
if !config.Vault.IsEmpty() { // Vault and KMS master key provided
return errors.New("Ambiguous KMS configuration: vault configuration and a master key are provided at the same time")
}
globalKMSKeyID, globalKMS, err = parseKMSMasterKey(masterKey)
globalKMSKeyID, GlobalKMS, err = parseKMSMasterKey(masterKey)
if err != nil {
return err
}
}
if !config.Vault.IsEmpty() {
globalKMS, err = crypto.NewVault(config.Vault)
GlobalKMS, err = crypto.NewVault(config.Vault)
if err != nil {
return err
}
@ -154,7 +154,7 @@ func (env environment) LookupKMSConfig(config crypto.KMSConfig) (err error) {
return err
}
globalAutoEncryption = bool(autoEncryption)
if globalAutoEncryption && globalKMS == nil { // auto-encryption enabled but no KMS
if globalAutoEncryption && GlobalKMS == nil { // auto-encryption enabled but no KMS
return errors.New("Invalid KMS configuration: auto-encryption is enabled but no valid KMS configuration is present")
}
return nil

View file

@ -113,7 +113,7 @@ type fsMetaV1 struct {
// Metadata map for current object.
Meta map[string]string `json:"meta,omitempty"`
// parts info for current object - used in encryption.
Parts []objectPartInfo `json:"parts,omitempty"`
Parts []ObjectPartInfo `json:"parts,omitempty"`
}
// IsValid - tells if the format is sane by validating the version
@ -207,9 +207,9 @@ func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
return metaMap
}
func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
func parseFSPartsArray(fsMetaBuf []byte) []ObjectPartInfo {
// Get xlMetaV1.Parts array
var partsArray []objectPartInfo
var partsArray []ObjectPartInfo
partsArrayResult := gjson.GetBytes(fsMetaBuf, "parts")
partsArrayResult.ForEach(func(key, part gjson.Result) bool {
@ -219,7 +219,7 @@ func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
etag := gjson.Get(partJSON, "etag").String()
size := gjson.Get(partJSON, "size").Int()
actualSize := gjson.Get(partJSON, "actualSize").Int()
partsArray = append(partsArray, objectPartInfo{
partsArray = append(partsArray, ObjectPartInfo{
Number: int(number),
Name: name,
ETag: etag,

View file

@ -353,7 +353,7 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
return result, toObjectErr(err)
}
@ -515,7 +515,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
fsMeta := fsMetaV1{}
// Allocate parts similar to incoming slice.
fsMeta.Parts = make([]objectPartInfo, len(parts))
fsMeta.Parts = make([]ObjectPartInfo, len(parts))
entries, err := readDir(uploadIDDir)
if err != nil {
@ -560,7 +560,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
partSize = actualSize
}
fsMeta.Parts[i] = objectPartInfo{
fsMeta.Parts[i] = ObjectPartInfo{
Number: part.PartNumber,
ETag: part.ETag,
Size: fi.Size(),

View file

@ -36,7 +36,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
// Close the go-routine, we are going to
// manually start it and test in this test case.
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
bucketName := "bucket"
objectName := "object"
@ -47,14 +47,14 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
t.Fatal("Unexpected err: ", err)
}
go fs.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, globalServiceDoneCh)
go fs.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, GlobalServiceDoneCh)
// Wait for 40ms such that - we have given enough time for
// cleanup routine to kick in.
time.Sleep(40 * time.Millisecond)
// Close the routine we do not need it anymore.
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {

View file

@ -152,10 +152,10 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
fs.fsFormatRlk = rlk
if !fs.diskMount {
go fs.diskUsage(globalServiceDoneCh)
go fs.diskUsage(GlobalServiceDoneCh)
}
go fs.cleanupStaleMultipartUploads(ctx, globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fs.cleanupStaleMultipartUploads(ctx, GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
// Return successfully initialized object layer.
return fs, nil
@ -1317,7 +1317,7 @@ func (fs *FSObjects) IsListenBucketSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (fs *FSObjects) IsEncryptionSupported() bool {
return true
}

View file

@ -354,7 +354,7 @@ func TestFSListBuckets(t *testing.T) {
t.Fatal("Unexpected error: ", err)
}
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
// Create a bucket with invalid name
if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0777); err != nil {

View file

@ -18,7 +18,10 @@ package cmd
import (
"net/http"
"os"
"strings"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio-go"
@ -30,8 +33,27 @@ var (
// MustGetUUID function alias.
MustGetUUID = mustGetUUID
// IsMinAllowedPartSize function alias.
IsMinAllowedPartSize = isMinAllowedPartSize
// GetCompleteMultipartMD5 functon alias.
GetCompleteMultipartMD5 = getCompleteMultipartMD5
// Contains function alias.
Contains = contains
// ExtractETag provides extractETag function alias.
ExtractETag = extractETag
// CleanMetadataKeys provides cleanMetadataKeys function alias.
CleanMetadataKeys = cleanMetadataKeys
)
// StatInfo - alias for statInfo
type StatInfo struct {
statInfo
}
// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors.
func AnonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
@ -321,3 +343,30 @@ func ErrorRespToObjectError(err error, params ...string) error {
return err
}
// parse gateway sse env variable
func parseGatewaySSE(s string) (gatewaySSE, error) {
l := strings.Split(s, ";")
var gwSlice = make([]string, 0)
for _, val := range l {
v := strings.ToUpper(val)
if v == gatewaySSES3 || v == gatewaySSEC {
gwSlice = append(gwSlice, v)
continue
}
return nil, uiErrInvalidGWSSEValue(nil).Msg("gateway SSE cannot be (%s) ", v)
}
return gatewaySSE(gwSlice), nil
}
// handle gateway env vars
func handleGatewayEnvVars() {
gwsseVal, ok := os.LookupEnv("MINIO_GATEWAY_SSE")
if ok {
var err error
GlobalGatewaySSE, err = parseGatewaySSE(gwsseVal)
if err != nil {
logger.Fatal(err, "Unable to parse MINIO_GATEWAY_SSE value (`%s`)", gwsseVal)
}
}
}

View file

@ -0,0 +1,52 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"reflect"
"testing"
)
// Tests cache exclude parsing.
func TestParseGatewaySSE(t *testing.T) {
testCases := []struct {
gwSSEStr string
expected gatewaySSE
success bool
}{
// valid input
{"c;S3", []string{"C", "S3"}, true},
{"S3", []string{"S3"}, true},
{"c,S3", []string{}, false},
{"c;S3;KMS", []string{}, false},
{"C;s3", []string{"C", "S3"}, true},
}
for i, testCase := range testCases {
gwSSE, err := parseGatewaySSE(testCase.gwSSEStr)
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1)
}
if err == nil {
if !reflect.DeepEqual(gwSSE, testCase.expected) {
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expected, gwSSE)
}
}
}
}

48
cmd/gateway-env.go Normal file
View file

@ -0,0 +1,48 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
type gatewaySSE []string
const (
// GatewaySSES3 is set when SSE-S3 encryption needed on both gateway and backend
gatewaySSES3 = "S3"
// GatewaySSEC is set when SSE-C encryption needed on both gateway and backend
gatewaySSEC = "C"
)
func (sse gatewaySSE) SSES3() bool {
for _, v := range sse {
if v == gatewaySSES3 {
return true
}
}
return false
}
func (sse gatewaySSE) SSEC() bool {
for _, v := range sse {
if v == gatewaySSEC {
return true
}
}
return false
}
func (sse gatewaySSE) IsSet() bool {
return sse.SSES3() || sse.SSEC()
}

View file

@ -18,7 +18,6 @@ package cmd
import (
"context"
"errors"
"fmt"
"net/url"
"os"
@ -136,6 +135,9 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Handle common env vars.
handleCommonEnvVars()
// Handle gateway specific env
handleGatewayEnvVars()
// Validate if we have access, secret set through environment.
if !globalIsEnvCreds {
logger.Fatal(uiErrEnvCredentialsMissingGateway(nil), "Unable to start gateway")
@ -168,8 +170,11 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
logger.FatalIf(registerWebRouter(router), "Unable to configure web browser")
}
// Currently only NAS and S3 gateway support encryption headers.
encryptionEnabled := gatewayName == "s3" || gatewayName == "nas"
// Add API router.
registerAPIRouter(router)
registerAPIRouter(router, encryptionEnabled)
// Dummy endpoint representing gateway instance.
globalEndpoints = []Endpoint{{
@ -226,6 +231,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Load globalServerConfig from etcd
_ = globalConfigSys.Init(newObject)
}
// Load logger subsystem
loadLoggers()
@ -262,8 +268,18 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
_ = globalNotificationSys.Init(newObject)
}
if globalAutoEncryption && !newObject.IsEncryptionSupported() {
logger.Fatal(errors.New("Invalid KMS configuration"), "auto-encryption is enabled but gateway does not support encryption")
// Encryption support checks in gateway mode.
{
if (globalAutoEncryption || GlobalKMS != nil) && !newObject.IsEncryptionSupported() {
logger.Fatal(errInvalidArgument,
"Encryption support is requested but (%s) gateway does not support encryption", gw.Name())
}
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
logger.Fatal(uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured"),
"Unable to start gateway with SSE")
}
}
// Once endpoints are finalized, initialize the new object api.

View file

@ -49,7 +49,7 @@ func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, ob
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) {
logger.LogIf(ctx, NotImplemented{})
return lpi, NotImplemented{}
}
@ -137,7 +137,7 @@ func (a GatewayUnsupported) IsListenBucketSupported() bool {
return false
}
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (a GatewayUnsupported) IsEncryptionSupported() bool {
return false
}

View file

@ -981,7 +981,7 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
}
// ListObjectParts - Use Azure equivalent GetBlockList.
func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) {
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, err
}

View file

@ -684,7 +684,7 @@ func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object str
}
// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, err error) {
func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, err error) {
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
return lpi, err

View file

@ -1121,7 +1121,7 @@ func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.Part
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (minio.ListPartsInfo, error) {
func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (minio.ListPartsInfo, error) {
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: path.Join(gcsMinioMultipartPathV1, uploadID),
})

View file

@ -886,7 +886,7 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi minio.ListPartsInfo, err error) {
func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, err error) {
lupr, err := ossListObjectParts(l.Client, bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
logger.LogIf(ctx, err)

View file

@ -0,0 +1,238 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"bytes"
"context"
"encoding/json"
"errors"
"time"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/tidwall/gjson"
)
var (
errGWMetaNotFound = errors.New("dare.meta file not found")
errGWMetaInvalidFormat = errors.New("dare.meta format is invalid")
)
// A gwMetaV1 represents `gw.json` metadata header.
type gwMetaV1 struct {
Version string `json:"version"` // Version of the current `gw.json`.
Format string `json:"format"` // Format of the current `gw.json`.
Stat minio.StatInfo `json:"stat"` // Stat of the current object `gw.json`.
ETag string `json:"etag"` // ETag of the current object
// Metadata map for current object `gw.json`.
Meta map[string]string `json:"meta,omitempty"`
// Captures all the individual object `gw.json`.
Parts []minio.ObjectPartInfo `json:"parts,omitempty"`
}
// Gateway metadata constants.
const (
// Gateway meta version.
gwMetaVersion = "1.0.0"
// Gateway meta version.
gwMetaVersion100 = "1.0.0"
// Gateway meta format string.
gwMetaFormat = "gw"
// Add new constants here.
)
// newGWMetaV1 - initializes new gwMetaV1, adds version.
func newGWMetaV1() (gwMeta gwMetaV1) {
gwMeta = gwMetaV1{}
gwMeta.Version = gwMetaVersion
gwMeta.Format = gwMetaFormat
return gwMeta
}
// IsValid - tells if the format is sane by validating the version
// string, format fields.
func (m gwMetaV1) IsValid() bool {
return ((m.Version == gwMetaVersion || m.Version == gwMetaVersion100) &&
m.Format == gwMetaFormat)
}
// Converts metadata to object info.
func (m gwMetaV1) ToObjectInfo(bucket, object string) minio.ObjectInfo {
filterKeys := append([]string{
"ETag",
"Content-Length",
"Last-Modified",
"Content-Type",
}, defaultFilterKeys...)
objInfo := minio.ObjectInfo{
IsDir: false,
Bucket: bucket,
Name: object,
Size: m.Stat.Size,
ModTime: m.Stat.ModTime,
ContentType: m.Meta["content-type"],
ContentEncoding: m.Meta["content-encoding"],
ETag: minio.CanonicalizeETag(m.ETag),
UserDefined: minio.CleanMetadataKeys(m.Meta, filterKeys...),
Parts: m.Parts,
}
if sc, ok := m.Meta["x-amz-storage-class"]; ok {
objInfo.StorageClass = sc
}
// Success.
return objInfo
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range m.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, minio.InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, minio.InvalidRange{}
}
// parses gateway metadata stat info from metadata json
func parseGWStat(gwMetaBuf []byte) (si minio.StatInfo, e error) {
// obtain stat info.
stat := minio.StatInfo{}
// fetching modTime.
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(gwMetaBuf, "stat.modTime").String())
if err != nil {
return si, err
}
stat.ModTime = modTime
// obtain Stat.Size .
stat.Size = gjson.GetBytes(gwMetaBuf, "stat.size").Int()
return stat, nil
}
// parses gateway metadata version from metadata json
func parseGWVersion(gwMetaBuf []byte) string {
return gjson.GetBytes(gwMetaBuf, "version").String()
}
// parses gateway ETag from metadata json
func parseGWETag(gwMetaBuf []byte) string {
return gjson.GetBytes(gwMetaBuf, "etag").String()
}
// parses gateway metadata format from metadata json
func parseGWFormat(gwMetaBuf []byte) string {
return gjson.GetBytes(gwMetaBuf, "format").String()
}
// parses gateway metadata json to get list of ObjectPartInfo
func parseGWParts(gwMetaBuf []byte) []minio.ObjectPartInfo {
// Parse the GW Parts.
partsResult := gjson.GetBytes(gwMetaBuf, "parts").Array()
partInfo := make([]minio.ObjectPartInfo, len(partsResult))
for i, p := range partsResult {
info := minio.ObjectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
info.Size = p.Get("size").Int()
partInfo[i] = info
}
return partInfo
}
// parses gateway metadata json to get the metadata map
func parseGWMetaMap(gwMetaBuf []byte) map[string]string {
// Get gwMetaV1.Meta map.
metaMapResult := gjson.GetBytes(gwMetaBuf, "meta").Map()
metaMap := make(map[string]string)
for key, valResult := range metaMapResult {
metaMap[key] = valResult.String()
}
return metaMap
}
// Constructs GWMetaV1 using `gjson` lib to retrieve each field.
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, e error) {
// obtain version.
gwMeta.Version = parseGWVersion(gwMetaBuf)
// obtain format.
gwMeta.Format = parseGWFormat(gwMetaBuf)
// Parse gwMetaV1.Stat .
stat, err := parseGWStat(gwMetaBuf)
if err != nil {
logger.LogIf(ctx, err)
return gwMeta, err
}
gwMeta.ETag = parseGWETag(gwMetaBuf)
gwMeta.Stat = stat
// Parse the GW Parts.
gwMeta.Parts = parseGWParts(gwMetaBuf)
// parse gwMetaV1.
gwMeta.Meta = parseGWMetaMap(gwMetaBuf)
return gwMeta, nil
}
// readGWMeta reads `dare.meta` and returns back GW metadata structure.
func readGWMetadata(ctx context.Context, buf bytes.Buffer) (gwMeta gwMetaV1, err error) {
if buf.Len() == 0 {
return gwMetaV1{}, errGWMetaNotFound
}
gwMeta, err = gwMetaUnmarshalJSON(ctx, buf.Bytes())
if err != nil {
return gwMetaV1{}, err
}
if !gwMeta.IsValid() {
return gwMetaV1{}, errGWMetaInvalidFormat
}
// Return structured `dare.meta`.
return gwMeta, nil
}
// getGWMetadata - unmarshals dare.meta into a *minio.PutObjReader
func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1) (*minio.PutObjReader, error) {
// Marshal json.
metadataBytes, err := json.Marshal(&gwMeta)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)))
if err != nil {
return nil, err
}
return minio.NewPutObjReader(hashReader, nil, nil), nil
}

View file

@ -0,0 +1,74 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"bytes"
"context"
"testing"
)
// Tests for GW metadata format validity.
func TestGWMetaFormatValid(t *testing.T) {
tests := []struct {
name int
version string
format string
want bool
}{
{1, "123", "fs", false},
{2, "123", gwMetaFormat, false},
{3, gwMetaVersion, "test", false},
{4, gwMetaVersion100, "hello", false},
{5, gwMetaVersion, gwMetaFormat, true},
{6, gwMetaVersion100, gwMetaFormat, true},
}
for _, tt := range tests {
m := newGWMetaV1()
m.Version = tt.version
m.Format = tt.format
if got := m.IsValid(); got != tt.want {
t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want)
}
}
}
// Tests for reading GW metadata info.
func TestReadGWMetadata(t *testing.T) {
tests := []struct {
metaStr string
pass bool
}{
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "132", "modTime": "2018-08-31T22:25:39.23626461Z" }}}`, true},
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "132", "modTime": "0000-00-00T00:00:00.00000000Z" }}}`, false},
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "5242880", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"content-type":"application/octet-stream","etag":"57c743902b2fc8eea6ba3bb4fc58c8e8"},"parts":[{"number":1,"name":"part.1","etag":"","size":5242880}]}}`, true},
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":[{"number":1,"name":"part.1","etag":"c5cac075eefdab801a5198812f51b36e","size":67141632},{"number":2,"name":"part.2","etag":"ccdf4b774bc3be8eef9a8987309e8171","size":1049088}]}`, true},
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":"123"}`, true},
}
for i, tt := range tests {
buf := bytes.NewBufferString(tt.metaStr)
m, err := readGWMetadata(context.Background(), *buf)
t.Log(m)
if err != nil && tt.pass {
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i)
}
if err == nil && !tt.pass {
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i)
}
}
}

View file

@ -0,0 +1,778 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"bytes"
"context"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/encrypt"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
)
const (
// name of custom multipart metadata file for s3 backend.
gwdareMetaJSON string = "dare.meta"
// name of temporary per part metadata file
gwpartMetaJSON string = "part.meta"
// custom multipart files are stored under the defaultMinioGWPrefix
defaultMinioGWPrefix = ".minio"
defaultGWContentFileName = "data"
slashSeparator = "/"
)
// s3EncObjects is a wrapper around s3Objects and implements gateway calls for
// custom large objects encrypted at the gateway
type s3EncObjects struct {
s3Objects
}
/*
NOTE:
Custom gateway encrypted objects are stored on backend as follows:
obj/.minio/data <= encrypted content
obj/.minio/dare.meta <= metadata
When a multipart upload operation is in progress, the metadata set during
NewMultipartUpload is stored in obj/.minio/uploadID/dare.meta and each
UploadPart operation saves additional state of the part's encrypted ETag and
encrypted size in obj/.minio/uploadID/part1/part.meta
All the part metadata and temp dare.meta are cleaned up when upload completes
*/
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
var continuationToken, startAfter string
res, err := l.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, false, startAfter)
if err != nil {
return loi, err
}
loi.IsTruncated = res.IsTruncated
loi.NextMarker = res.NextContinuationToken
loi.Objects = res.Objects
loi.Prefixes = res.Prefixes
return loi, nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
var objects []minio.ObjectInfo
var prefixes []string
var isTruncated bool
// filter out objects that contain a .minio prefix, but is not a dare.meta metadata file.
for {
loi, e = l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter)
if e != nil {
return loi, minio.ErrorRespToObjectError(e, bucket)
}
for _, obj := range loi.Objects {
startAfter = obj.Name
continuationToken = loi.NextContinuationToken
isTruncated = loi.IsTruncated
if !isGWObject(obj.Name) {
continue
}
// get objectname and ObjectInfo from the custom metadata file
if strings.HasSuffix(obj.Name, gwdareMetaJSON) {
objSlice := strings.Split(obj.Name, slashSeparator+defaultMinioGWPrefix)
gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0]))
if e != nil {
continue
}
oInfo := gwMeta.ToObjectInfo(bucket, objSlice[0])
objects = append(objects, oInfo)
} else {
objects = append(objects, obj)
}
if len(objects) > maxKeys {
break
}
}
for _, p := range loi.Prefixes {
objName := strings.TrimSuffix(p, slashSeparator)
gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName))
// if prefix is actually a custom multi-part object, append it to objects
if err == nil {
objects = append(objects, gm.ToObjectInfo(bucket, objName))
continue
}
isPrefix := l.isPrefix(ctx, bucket, p, fetchOwner, startAfter)
if isPrefix {
prefixes = append(prefixes, p)
}
}
if (len(objects) > maxKeys) || !loi.IsTruncated {
break
}
}
loi.IsTruncated = isTruncated
loi.ContinuationToken = continuationToken
loi.Objects = make([]minio.ObjectInfo, 0)
loi.Prefixes = make([]string, 0)
for _, obj := range objects {
loi.NextContinuationToken = obj.Name
loi.Objects = append(loi.Objects, obj)
}
for _, pfx := range prefixes {
if pfx != prefix {
loi.Prefixes = append(loi.Prefixes, pfx)
}
}
return loi, nil
}
// isGWObject returns true if it is a custom object
func isGWObject(objName string) bool {
isEncrypted := strings.Contains(objName, defaultMinioGWPrefix)
if !isEncrypted {
return true
}
// ignore temp part.meta files
if strings.Contains(objName, gwpartMetaJSON) {
return false
}
pfxSlice := strings.Split(objName, slashSeparator)
var i1, i2 int
for i := len(pfxSlice) - 1; i >= 0; i-- {
p := pfxSlice[i]
if p == defaultMinioGWPrefix {
i1 = i
}
if p == gwdareMetaJSON {
i2 = i
}
if i1 > 0 && i2 > 0 {
break
}
}
// incomplete uploads would have a uploadID between defaultMinioGWPrefix and gwdareMetaJSON
return i2 > 0 && i1 > 0 && i2-i1 == 1
}
// isPrefix returns true if prefix exists and is not an incomplete multipart upload entry
func (l *s3EncObjects) isPrefix(ctx context.Context, bucket, prefix string, fetchOwner bool, startAfter string) bool {
var continuationToken, delimiter string
for {
loi, e := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter)
if e != nil {
return false
}
for _, obj := range loi.Objects {
if isGWObject(obj.Name) {
return true
}
}
continuationToken = loi.NextContinuationToken
if !loi.IsTruncated {
break
}
}
return false
}
// GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
func (l *s3EncObjects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
return l.getObject(ctx, bucket, key, startOffset, length, writer, etag, opts)
}
func (l *s3EncObjects) isGWEncrypted(ctx context.Context, bucket, object string) bool {
_, err := l.s3Objects.GetObjectInfo(ctx, bucket, getDareMetaPath(object), minio.ObjectOptions{})
return err == nil
}
// getDaremetadata fetches dare.meta from s3 backend and marshals into a structured format.
func (l *s3EncObjects) getGWMetadata(ctx context.Context, bucket, metaFileName string) (m gwMetaV1, err error) {
oi, err1 := l.s3Objects.GetObjectInfo(ctx, bucket, metaFileName, minio.ObjectOptions{})
if err1 != nil {
return m, err1
}
var buffer bytes.Buffer
err = l.s3Objects.GetObject(ctx, bucket, metaFileName, 0, oi.Size, &buffer, oi.ETag, minio.ObjectOptions{})
if err != nil {
return m, err
}
return readGWMetadata(ctx, buffer)
}
// writes dare metadata to the s3 backend
func (l *s3EncObjects) writeGWMetadata(ctx context.Context, bucket, metaFileName string, m gwMetaV1, o minio.ObjectOptions) error {
reader, err := getGWMetadata(ctx, bucket, metaFileName, m)
if err != nil {
logger.LogIf(ctx, err)
return err
}
_, err = l.s3Objects.PutObject(ctx, bucket, metaFileName, reader, map[string]string{}, o)
return err
}
// returns path of temporary metadata json file for the upload
func getTmpDareMetaPath(object, uploadID string) string {
return path.Join(getGWMetaPath(object), uploadID, gwdareMetaJSON)
}
// returns path of metadata json file for encrypted objects
func getDareMetaPath(object string) string {
return path.Join(getGWMetaPath(object), gwdareMetaJSON)
}
// returns path of temporary part metadata file for multipart uploads
func getPartMetaPath(object, uploadID string, partID int) string {
return path.Join(object, defaultMinioGWPrefix, uploadID, strconv.Itoa(partID), gwpartMetaJSON)
}
// deletes the custom dare metadata file saved at the backend
func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) error {
return l.s3Objects.DeleteObject(ctx, bucket, metaFileName)
}
func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
var o minio.ObjectOptions
if minio.GlobalGatewaySSE.SSEC() {
o = opts
}
dmeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(key))
if err != nil {
// unencrypted content
return l.s3Objects.GetObject(ctx, bucket, key, startOffset, length, writer, etag, o)
}
if startOffset < 0 {
logger.LogIf(ctx, minio.InvalidRange{})
}
// For negative length read everything.
if length < 0 {
length = dmeta.Stat.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > dmeta.Stat.Size || startOffset+length > dmeta.Stat.Size {
logger.LogIf(ctx, minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size})
return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}
}
// Get start part index and offset.
_, partOffset, err := dmeta.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
if _, _, err := dmeta.ObjectToPartOffset(ctx, endOffset); err != nil {
return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}
}
return l.s3Objects.GetObject(ctx, bucket, key, partOffset, endOffset, writer, dmeta.ETag, o)
}
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, o minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
var opts minio.ObjectOptions
if minio.GlobalGatewaySSE.SSEC() {
opts = o
}
objInfo, err := l.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
}
objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined)
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo)
if err != nil {
return nil, minio.ErrorRespToObjectError(err)
}
if l.isGWEncrypted(ctx, bucket, object) {
object = getGWContentPath(object)
}
pr, pw := io.Pipe()
go func() {
err := l.getObject(ctx, bucket, object, off, length, pw, objInfo.ETag, opts)
pw.CloseWithError(err)
}()
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return fn(pr, h, pipeCloser)
}
// GetObjectInfo reads object info and replies back ObjectInfo
// For custom gateway encrypted large objects, the ObjectInfo is retrieved from the dare.meta file.
func (l *s3EncObjects) GetObjectInfo(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
var opts minio.ObjectOptions
if minio.GlobalGatewaySSE.SSEC() {
opts = o
}
gwMeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object))
if err != nil {
return l.s3Objects.GetObjectInfo(ctx, bucket, object, opts)
}
return gwMeta.ToObjectInfo(bucket, object), nil
}
// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, s, d minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
cpSrcDstSame := strings.EqualFold(path.Join(srcBucket, srcObject), path.Join(dstBucket, dstObject))
if cpSrcDstSame {
var gwMeta gwMetaV1
if s.ServerSideEncryption != nil && d.ServerSideEncryption != nil &&
((s.ServerSideEncryption.Type() == encrypt.SSEC && d.ServerSideEncryption.Type() == encrypt.SSEC) ||
(s.ServerSideEncryption.Type() == encrypt.S3 && d.ServerSideEncryption.Type() == encrypt.S3)) {
gwMeta, err = l.getGWMetadata(ctx, srcBucket, getDareMetaPath(srcObject))
if err != nil {
return
}
header := make(http.Header)
if d.ServerSideEncryption != nil {
d.ServerSideEncryption.Marshal(header)
}
for k, v := range header {
srcInfo.UserDefined[k] = v[0]
}
gwMeta.Meta = srcInfo.UserDefined
if err = l.writeGWMetadata(ctx, dstBucket, getDareMetaPath(dstObject), gwMeta, minio.ObjectOptions{}); err != nil {
return objInfo, minio.ErrorRespToObjectError(err)
}
return gwMeta.ToObjectInfo(dstBucket, dstObject), nil
}
}
return l.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, srcInfo.UserDefined, d)
}
// DeleteObject deletes a blob in bucket
// For custom gateway encrypted large objects, cleans up encrypted content and metadata files
// from the backend.
func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
// Get dare meta json
if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil {
return l.s3Objects.DeleteObject(ctx, bucket, object)
}
// delete encrypted object
l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object))
return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if e != nil {
return
}
lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath("/"))
lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath("/"))
for i := range lmi.Uploads {
lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath("/"))
}
return
}
// NewMultipartUpload uploads object in multiple parts
func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string, o minio.ObjectOptions) (uploadID string, err error) {
var opts minio.ObjectOptions
if o.ServerSideEncryption != nil &&
((minio.GlobalGatewaySSE.SSEC() && o.ServerSideEncryption.Type() == encrypt.SSEC) ||
(minio.GlobalGatewaySSE.SSES3() && o.ServerSideEncryption.Type() == encrypt.S3)) {
opts = o
}
if o.ServerSideEncryption == nil {
return l.s3Objects.NewMultipartUpload(ctx, bucket, object, metadata, opts)
}
uploadID, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), map[string]string{}, opts)
if err != nil {
return
}
// Create uploadID and write a temporary dare.meta object under object/uploadID prefix
gwmeta := newGWMetaV1()
gwmeta.Meta = metadata
gwmeta.Stat.ModTime = time.Now().UTC()
err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID), gwmeta, minio.ObjectOptions{})
if err != nil {
return uploadID, minio.ErrorRespToObjectError(err)
}
return uploadID, nil
}
// PutObject creates a new object with the incoming data,
func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object string, data *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
var s3Opts minio.ObjectOptions
if opts.ServerSideEncryption != nil &&
((minio.GlobalGatewaySSE.SSEC() && opts.ServerSideEncryption.Type() == encrypt.SSEC) ||
(minio.GlobalGatewaySSE.SSES3() && opts.ServerSideEncryption.Type() == encrypt.S3)) {
s3Opts = opts
}
if opts.ServerSideEncryption == nil {
defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
defer l.DeleteObject(ctx, bucket, getGWContentPath(object))
return l.s3Objects.PutObject(ctx, bucket, object, data, metadata, s3Opts)
}
oi, err := l.s3Objects.PutObject(ctx, bucket, getGWContentPath(object), data, map[string]string{}, s3Opts)
if err != nil {
return objInfo, minio.ErrorRespToObjectError(err)
}
gwMeta := newGWMetaV1()
gwMeta.Meta = make(map[string]string)
for k, v := range oi.UserDefined {
gwMeta.Meta[k] = v
}
for k, v := range metadata {
gwMeta.Meta[k] = v
}
encMD5 := data.MD5CurrentHexString()
gwMeta.ETag = encMD5
gwMeta.Stat.Size = oi.Size
gwMeta.Stat.ModTime = time.Now().UTC()
if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil {
return objInfo, minio.ErrorRespToObjectError(err)
}
objInfo = gwMeta.ToObjectInfo(bucket, object)
// delete any unencrypted content of the same name created previously
l.s3Objects.DeleteObject(ctx, bucket, object)
return objInfo, nil
}
// PutObjectPart puts a part of object in bucket
func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
if opts.ServerSideEncryption == nil {
return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
}
var s3Opts minio.ObjectOptions
// for sse-s3 encryption options should not be passed to backend
if opts.ServerSideEncryption != nil && opts.ServerSideEncryption.Type() == encrypt.SSEC && minio.GlobalGatewaySSE.SSEC() {
s3Opts = opts
}
uploadPath := getTmpGWMetaPath(object, uploadID)
tmpDareMeta := path.Join(uploadPath, gwdareMetaJSON)
_, err := l.s3Objects.GetObjectInfo(ctx, bucket, tmpDareMeta, minio.ObjectOptions{})
if err != nil {
return pi, minio.InvalidUploadID{UploadID: uploadID}
}
pi, e = l.s3Objects.PutObjectPart(ctx, bucket, getGWContentPath(object), uploadID, partID, data, s3Opts)
if e != nil {
return
}
gwMeta := newGWMetaV1()
gwMeta.Parts = make([]minio.ObjectPartInfo, 1)
// Add incoming part.
gwMeta.Parts[0] = minio.ObjectPartInfo{
Number: partID,
ETag: pi.ETag,
Size: pi.Size,
Name: strconv.Itoa(partID),
}
gwMeta.ETag = data.MD5CurrentHexString() // encrypted ETag
gwMeta.Stat.Size = pi.Size
gwMeta.Stat.ModTime = pi.LastModified
if err = l.writeGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, partID), gwMeta, minio.ObjectOptions{}); err != nil {
return pi, minio.ErrorRespToObjectError(err)
}
return minio.PartInfo{
Size: gwMeta.Stat.Size,
ETag: minio.CanonicalizeETag(gwMeta.ETag),
LastModified: gwMeta.Stat.ModTime,
PartNumber: partID,
}, nil
}
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3EncObjects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) {
// We do not store parts uploaded so far in the dare.meta. Only CompleteMultipartUpload finalizes the parts under upload prefix.Otherwise,
// there could be situations of dare.meta getting corrupted by competing upload parts.
dm, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
if err != nil {
return l.s3Objects.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
}
lpi, err = l.s3Objects.ListObjectParts(ctx, bucket, getGWContentPath(object), uploadID, partNumberMarker, maxParts, opts)
if err != nil {
return lpi, err
}
for i, part := range lpi.Parts {
partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber))
if err != nil || len(partMeta.Parts) == 0 {
return lpi, minio.InvalidPart{}
}
lpi.Parts[i].ETag = partMeta.ETag
}
lpi.UserDefined = dm.Meta
lpi.Object = object
return lpi, nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
if _, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)); err != nil {
return l.s3Objects.AbortMultipartUpload(ctx, bucket, object, uploadID)
}
if err := l.s3Objects.AbortMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID); err != nil {
return err
}
uploadPrefix := getTmpGWMetaPath(object, uploadID)
var continuationToken, startAfter, delimiter string
for {
loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter)
if err != nil {
return minio.InvalidUploadID{UploadID: uploadID}
}
for _, obj := range loi.Objects {
if err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name); err != nil {
return minio.ErrorRespToObjectError(err)
}
startAfter = obj.Name
}
continuationToken = loi.NextContinuationToken
if !loi.IsTruncated {
break
}
}
return nil
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
if err != nil {
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
if e == nil {
// delete any encrypted version of object that might exist
defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
defer l.DeleteObject(ctx, bucket, getGWContentPath(object))
}
return oi, e
}
gwMeta := newGWMetaV1()
gwMeta.Meta = make(map[string]string)
for k, v := range tmpMeta.Meta {
gwMeta.Meta[k] = v
}
// Allocate parts similar to incoming slice.
gwMeta.Parts = make([]minio.ObjectPartInfo, len(uploadedParts))
bkUploadedParts := make([]minio.CompletePart, len(uploadedParts))
// Calculate full object size.
var objectSize int64
// Validate each part and then commit to disk.
for i, part := range uploadedParts {
partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber))
if err != nil || len(partMeta.Parts) == 0 {
return oi, minio.InvalidPart{}
}
bkUploadedParts[i] = minio.CompletePart{PartNumber: part.PartNumber, ETag: partMeta.Parts[0].ETag}
gwMeta.Parts[i] = partMeta.Parts[0]
objectSize += partMeta.Parts[0].Size
}
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID, bkUploadedParts, opts)
if e != nil {
return oi, e
}
//delete any unencrypted version of object that might be on the backend
defer l.s3Objects.DeleteObject(ctx, bucket, object)
// Save the final object size and modtime.
gwMeta.Stat.Size = objectSize
gwMeta.Stat.ModTime = time.Now().UTC()
gwMeta.ETag = oi.ETag
if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil {
return oi, minio.ErrorRespToObjectError(err)
}
// Clean up any uploaded parts that are not being committed by this CompleteMultipart operation
var continuationToken, startAfter, delimiter string
uploadPrefix := getTmpGWMetaPath(object, uploadID)
done := false
for {
loi, lerr := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter)
if lerr != nil {
done = true
break
}
for _, obj := range loi.Objects {
if !strings.HasPrefix(obj.Name, uploadPrefix) {
done = true
break
}
startAfter = obj.Name
l.s3Objects.DeleteObject(ctx, bucket, obj.Name)
}
continuationToken = loi.NextContinuationToken
if !loi.IsTruncated || done {
break
}
}
return gwMeta.ToObjectInfo(bucket, object), nil
}
// getTmpGWMetaPath returns the prefix under which uploads in progress are stored on backend
func getTmpGWMetaPath(object, uploadID string) string {
return path.Join(object, defaultMinioGWPrefix, uploadID)
}
// getGWMetaPath returns the prefix under which custom object metadata and object are stored on backend after upload completes
func getGWMetaPath(object string) string {
return path.Join(object, defaultMinioGWPrefix)
}
// getGWContentPath returns the prefix under which custom object is stored on backend after upload completes
func getGWContentPath(object string) string {
return path.Join(object, defaultMinioGWPrefix, defaultGWContentFileName)
}
// Clean-up the stale incomplete encrypted multipart uploads. Should be run in a Go routine.
func (l *s3EncObjects) cleanupStaleEncMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
ticker := time.NewTicker(cleanupInterval)
defer ticker.Stop()
for {
select {
case <-doneCh:
return
case <-ticker.C:
l.cleanupStaleEncMultipartUploadsOnGW(ctx, expiry)
}
}
}
// cleanupStaleMultipartUploads removes old custom encryption multipart uploads on backend
func (l *s3EncObjects) cleanupStaleEncMultipartUploadsOnGW(ctx context.Context, expiry time.Duration) {
for {
buckets, err := l.s3Objects.ListBuckets(ctx)
if err != nil {
break
}
for _, b := range buckets {
expParts := l.getStalePartsForBucket(ctx, b.Name, expiry)
for k := range expParts {
l.s3Objects.DeleteObject(ctx, b.Name, k)
}
}
}
}
func (l *s3EncObjects) getStalePartsForBucket(ctx context.Context, bucket string, expiry time.Duration) (expParts map[string]string) {
var prefix, continuationToken, delimiter, startAfter string
expParts = make(map[string]string)
now := time.Now()
for {
loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter)
if err != nil {
break
}
for _, obj := range loi.Objects {
startAfter = obj.Name
if !strings.Contains(obj.Name, defaultMinioGWPrefix) {
continue
}
if isGWObject(obj.Name) {
continue
}
// delete temporary part.meta or dare.meta files for incomplete uploads that are past expiry
if (strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON)) &&
now.Sub(obj.ModTime) > expiry {
expParts[obj.Name] = ""
}
}
continuationToken = loi.NextContinuationToken
if !loi.IsTruncated {
break
}
}
return
}
func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string) error {
var prefix, continuationToken, delimiter, startAfter string
expParts := make(map[string]string)
for {
loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter)
if err != nil {
break
}
for _, obj := range loi.Objects {
startAfter = obj.Name
if !strings.Contains(obj.Name, defaultMinioGWPrefix) {
return minio.BucketNotEmpty{}
}
if isGWObject(obj.Name) {
return minio.BucketNotEmpty{}
}
// delete temporary part.meta or dare.meta files for incomplete uploads
if strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON) {
expParts[obj.Name] = ""
}
}
continuationToken = loi.NextContinuationToken
if !loi.IsTruncated {
break
}
}
for k := range expParts {
l.s3Objects.DeleteObject(ctx, bucket, k)
}
err := l.Client.RemoveBucket(bucket)
if err != nil {
return minio.ErrorRespToObjectError(err, bucket)
}
return nil
}

View file

@ -0,0 +1,47 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import minio "github.com/minio/minio/cmd"
// List of header keys to be filtered, usually
// from all S3 API http responses.
var defaultFilterKeys = []string{
"Connection",
"Transfer-Encoding",
"Accept-Ranges",
"Date",
"Server",
"Vary",
"x-amz-bucket-region",
"x-amz-request-id",
"x-amz-id-2",
"Content-Security-Policy",
"X-Xss-Protection",
// Add new headers to be ignored.
}
// FromGatewayObjectPart converts ObjectInfo for custom part stored as object to PartInfo
func FromGatewayObjectPart(partID int, oi minio.ObjectInfo) (pi minio.PartInfo) {
return minio.PartInfo{
Size: oi.Size,
ETag: minio.CanonicalizeETag(oi.ETag),
LastModified: oi.ModTime,
PartNumber: partID,
}
}

View file

@ -28,12 +28,13 @@ import (
"github.com/minio/cli"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/policy"
minio "github.com/minio/minio/cmd"
)
const (
@ -223,9 +224,20 @@ func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error)
return nil, err
}
return &s3Objects{
s := s3Objects{
Client: clnt,
}, nil
}
// Enables single encyption of KMS is configured.
if minio.GlobalKMS != nil {
encS := s3EncObjects{s}
// Start stale enc multipart uploads cleanup routine.
go encS.cleanupStaleEncMultipartUploads(context.Background(),
minio.GlobalMultipartCleanupInterval, minio.GlobalMultipartExpiry, minio.GlobalServiceDoneCh)
return &encS, nil
}
return &s, nil
}
// Production - s3 gateway is production ready.
@ -330,6 +342,7 @@ func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix strin
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys, startAfter)
if err != nil {
return loi, minio.ErrorRespToObjectError(err, bucket)
@ -387,7 +400,6 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
return minio.ErrorRespToObjectError(err, bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return minio.ErrorRespToObjectError(err, bucket, key)
}
@ -396,7 +408,11 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{GetObjectOptions: miniogo.GetObjectOptions{ServerSideEncryption: opts.ServerSideEncryption}})
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{
GetObjectOptions: miniogo.GetObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
},
})
if err != nil {
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
}
@ -426,6 +442,18 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE"
srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag
header := make(http.Header)
if srcOpts.ServerSideEncryption != nil {
encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header)
}
if dstOpts.ServerSideEncryption != nil {
dstOpts.ServerSideEncryption.Marshal(header)
}
for k, v := range header {
srcInfo.UserDefined[k] = v[0]
}
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
}
@ -478,10 +506,21 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
srcInfo.UserDefined = map[string]string{
"x-amz-copy-source-if-match": srcInfo.ETag,
}
header := make(http.Header)
if srcOpts.ServerSideEncryption != nil {
encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header)
}
if dstOpts.ServerSideEncryption != nil {
dstOpts.ServerSideEncryption.Marshal(header)
}
for k, v := range header {
srcInfo.UserDefined[k] = v[0]
}
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, srcInfo.UserDefined)
if err != nil {
@ -493,7 +532,7 @@ func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, de
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, e error) {
func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return lpi, minio.ErrorRespToObjectError(err, bucket, object)
@ -557,3 +596,8 @@ func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error
func (l *s3Objects) IsCompressionSupported() bool {
return false
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (l *s3Objects) IsEncryptionSupported() bool {
return minio.GlobalKMS != nil || len(minio.GlobalGatewaySSE) > 0
}

View file

@ -110,8 +110,8 @@ func TestS3ToObjectError(t *testing.T) {
// Special test case for error that is not of type
// miniogo.ErrorResponse
{
inputErr: fmt.Errorf("not a minio.ErrorResponse"),
expectedErr: fmt.Errorf("not a minio.ErrorResponse"),
inputErr: fmt.Errorf("not a ErrorResponse"),
expectedErr: fmt.Errorf("not a ErrorResponse"),
},
}

View file

@ -79,10 +79,11 @@ const (
// date and server date during signature verification.
globalMaxSkewTime = 15 * time.Minute // 15 minutes skew allowed.
// Expiry duration after which the multipart uploads are deemed stale.
globalMultipartExpiry = time.Hour * 24 * 14 // 2 weeks.
// Cleanup interval when the stale multipart cleanup is initiated.
globalMultipartCleanupInterval = time.Hour * 24 // 24 hrs.
// GlobalMultipartExpiry - Expiry duration after which the multipart uploads are deemed stale.
GlobalMultipartExpiry = time.Hour * 24 * 14 // 2 weeks.
// GlobalMultipartCleanupInterval - Cleanup interval when the stale multipart cleanup is initiated.
GlobalMultipartCleanupInterval = time.Hour * 24 // 24 hrs.
// Refresh interval to update in-memory bucket policy cache.
globalRefreshBucketPolicyInterval = 5 * time.Minute
// Refresh interval to update in-memory iam config cache.
@ -236,8 +237,9 @@ var (
// KMS key id
globalKMSKeyID string
// Allocated KMS
globalKMS crypto.KMS
// GlobalKMS initialized KMS configuration
GlobalKMS crypto.KMS
// Auto-Encryption, if enabled, turns any non-SSE-C request
// into an SSE-S3 request. If enabled a valid, non-empty KMS
@ -269,6 +271,9 @@ var (
// Deployment ID - unique per deployment
globalDeploymentID string
// GlobalGatewaySSE sse options
GlobalGatewaySSE gatewaySSE
// Add new variable global values here.
)

View file

@ -78,7 +78,7 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error {
defer ticker.Stop()
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
sys.refresh(objAPI)

View file

@ -166,7 +166,7 @@ func startLockMaintenance(lkSrv *lockRPCReceiver) {
for {
// Verifies every minute for locks held more than 2minutes.
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
lkSrv.lockMaintenance(lockValidityCheckInterval)

View file

@ -103,7 +103,7 @@ type ObjectInfo struct {
UserDefined map[string]string
// List of individual parts, maximum size of upto 10,000
Parts []objectPartInfo `json:"-"`
Parts []ObjectPartInfo `json:"-"`
// Implements writer and reader used by CopyObject API
Writer io.WriteCloser `json:"-"`

View file

@ -75,7 +75,7 @@ type ObjectLayer interface {
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error)
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)

View file

@ -1429,7 +1429,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
}
for i, testCase := range testCases {
actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts, ObjectOptions{})
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
}
@ -1667,7 +1667,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
}
for i, testCase := range testCases {
actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts, ObjectOptions{})
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
}

View file

@ -487,7 +487,6 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
copySource := h.Get(crypto.SSECopyAlgorithm) != ""
cFns = append(cleanUpFns, cFns...)
@ -577,7 +576,6 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func())
return r, nil
}
}
return fn, off, length, nil
}
@ -663,3 +661,17 @@ func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
}
return fn
}
// CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal
// encryption metadata that was sent by minio gateway
func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string {
var newMeta = make(map[string]string, len(metadata))
for k, v := range metadata {
if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") {
newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v
} else {
newMeta[k] = v
}
}
return newMeta
}

View file

@ -431,7 +431,7 @@ func TestGetActualSize(t *testing.T) {
"X-Minio-Internal-actual-size": "100000001",
"content-type": "application/octet-stream",
"etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"},
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Size: 39235668,
ActualSize: 67108864,
@ -450,7 +450,7 @@ func TestGetActualSize(t *testing.T) {
"X-Minio-Internal-actual-size": "841",
"content-type": "application/octet-stream",
"etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"},
Parts: []objectPartInfo{},
Parts: []ObjectPartInfo{},
},
result: 841,
},
@ -459,7 +459,7 @@ func TestGetActualSize(t *testing.T) {
UserDefined: map[string]string{"X-Minio-Internal-compression": "golang/snappy/LZ77",
"content-type": "application/octet-stream",
"etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"},
Parts: []objectPartInfo{},
Parts: []ObjectPartInfo{},
},
result: -1,
},
@ -482,7 +482,7 @@ func TestGetCompressedOffsets(t *testing.T) {
}{
{
objInfo: ObjectInfo{
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Size: 39235668,
ActualSize: 67108864,
@ -499,7 +499,7 @@ func TestGetCompressedOffsets(t *testing.T) {
},
{
objInfo: ObjectInfo{
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Size: 39235668,
ActualSize: 67108864,
@ -516,7 +516,7 @@ func TestGetCompressedOffsets(t *testing.T) {
},
{
objInfo: ObjectInfo{
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Size: 39235668,
ActualSize: 67108864,

View file

@ -35,6 +35,7 @@ import (
"github.com/gorilla/mux"
"github.com/klauspost/readahead"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"
@ -91,7 +92,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrBadRequest, r.URL, guessIsBrowserReq(r))
return
}
@ -99,6 +100,12 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
bucket := vars["bucket"]
object := vars["object"]
// get gateway encryption options
opts, err := getEncryptionOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
@ -127,7 +134,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIErrorCode(ctx, err) == ErrNoSuchKey {
s3Error = ErrNoSuchKey
}
@ -148,6 +155,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrEmptyRequestBody, r.URL, guessIsBrowserReq(r))
return
}
var selectReq s3select.ObjectSelectRequest
if err := xmlDecoder(r.Body, &selectReq, r.ContentLength); err != nil {
writeErrorResponse(w, ErrMalformedXML, r.URL, guessIsBrowserReq(r))
@ -168,7 +176,6 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
var opts ObjectOptions
gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header, readLock, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
@ -246,6 +253,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
objInfo.UserDefined = CleanMinioInternalMetadataKeys(objInfo.UserDefined)
if crypto.IsEncrypted(objInfo.UserDefined) {
switch {
case crypto.S3.IsEncrypted(objInfo.UserDefined):
@ -322,7 +330,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, ErrBadRequest, r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrBadRequest, r.URL, guessIsBrowserReq(r))
return
}
@ -335,10 +343,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return
}
var (
opts ObjectOptions
err error
)
// get gateway encryption options
opts, err := getEncryptionOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
@ -409,6 +420,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
objInfo := gr.ObjInfo
if objectAPI.IsEncryptionSupported() {
objInfo.UserDefined = CleanMinioInternalMetadataKeys(objInfo.UserDefined)
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -498,7 +510,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponseHeadersOnly(w, ErrBadRequest)
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrBadRequest, r.URL, guessIsBrowserReq(r))
return
}
@ -516,10 +528,11 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
getObjectInfo = api.CacheAPI().GetObjectInfo
}
var (
opts ObjectOptions
err error
)
opts, err := getEncryptionOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous {
@ -574,12 +587,12 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
objInfo.UserDefined = CleanMinioInternalMetadataKeys(objInfo.UserDefined)
}
// Set encryption response headers
@ -626,7 +639,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
if err != nil {
host, port = "", ""
}
// Notify object accessed via a HEAD request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedHead,
@ -650,6 +662,9 @@ func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta m
defaultMeta[k] = v
}
// remove SSE Headers from source info
crypto.RemoveSSEHeaders(defaultMeta)
// if x-amz-metadata-directive says REPLACE then
// we extract metadata from the input headers.
if isMetadataReplace(r.Header) {
@ -712,7 +727,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r)) // SSE-KMS is not supported
return
}
if !objectAPI.IsEncryptionSupported() && (hasServerSideEncryptionHeader(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
if !api.EncryptionEnabled() && (hasServerSideEncryptionHeader(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -767,8 +782,30 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
writeErrorResponse(w, ErrInvalidMetadataDirective, r.URL, guessIsBrowserReq(r))
return
}
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcEncryptionOpts(ctx, r, srcBucket, srcObject)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// convert copy src encryption options for GET calls
var getOpts = ObjectOptions{}
getSSE := encrypt.SSE(srcOpts.ServerSideEncryption)
if getSSE != srcOpts.ServerSideEncryption {
getOpts.ServerSideEncryption = getSSE
}
dstOpts, err = copyDstEncryptionOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
@ -791,7 +828,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
}
var rs *HTTPRangeSpec
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, srcOpts)
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -890,9 +927,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
rawReader := srcInfo.Reader
pReader := NewPutObjReader(srcInfo.Reader, nil, nil)
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
var encMetadata = make(map[string]string)
if objectAPI.IsEncryptionSupported() && !isCompressed {
// Encryption parameters not applicable for this object.
@ -927,6 +961,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// - the object is encrypted using SSE-C and two different SSE-C keys are present
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
// than execute a key rotation.
var keyRotation bool
if cpSrcDstSame && ((sseCopyC && sseC) || (sseS3 && sseCopyS3)) {
if sseCopyC && sseC {
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
@ -950,12 +985,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Since we are rotating the keys, make sure to update the metadata.
srcInfo.metadataOnly = true
keyRotation = true
} else {
if isSourceEncrypted || isTargetEncrypted {
// We are not only copying just metadata instead
// we are creating a new object at this point, even
// if source and destination are same objects.
srcInfo.metadataOnly = false
if !keyRotation {
srcInfo.metadataOnly = false
}
}
// Calculate the size of the target object
@ -1017,7 +1055,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(srcInfo.UserDefined)
// Check if x-amz-metadata-directive was not set to REPLACE and source,
// desination are same objects. Apply this restriction also when
// metadataOnly is true indicating that we are not overwriting the object.
@ -1115,7 +1152,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r)) // SSE-KMS is not supported
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -1276,7 +1313,17 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
@ -1285,9 +1332,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return
}
}
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
var objectEncryptionKey []byte
if objectAPI.IsEncryptionSupported() {
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
@ -1384,7 +1429,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r)) // SSE-KMS is not supported
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -1392,10 +1437,21 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
bucket := vars["bucket"]
object := vars["object"]
var (
opts ObjectOptions
err error
)
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
var err error
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL, guessIsBrowserReq(r))
return
@ -1419,9 +1475,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
var encMetadata = map[string]string{}
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
if objectAPI.IsEncryptionSupported() {
if hasServerSideEncryptionHeader(r.Header) {
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
@ -1487,7 +1540,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r)) // SSE-KMS is not supported
return
}
if !objectAPI.IsEncryptionSupported() && (hasServerSideEncryptionHeader(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
if !api.EncryptionEnabled() && (hasServerSideEncryptionHeader(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -1552,6 +1605,21 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
}
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcEncryptionOpts(ctx, r, srcBucket, srcObject)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// convert copy src and dst encryption options for GET/PUT calls
var getOpts = ObjectOptions{}
if srcOpts.ServerSideEncryption != nil {
getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
}
dstOpts, err = copyDstEncryptionOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
@ -1582,7 +1650,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
}
}
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, srcOpts)
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -1627,7 +1695,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
var reader io.Reader
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, dstBucket, dstObject, uploadID, 0, 1)
li, err = objectAPI.ListObjectParts(ctx, dstBucket, dstObject, uploadID, 0, 1, dstOpts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -1669,7 +1737,19 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
isEncrypted := false
var objectEncryptionKey []byte
if objectAPI.IsEncryptionSupported() && !isCompressed {
li, lerr := objectAPI.ListObjectParts(ctx, dstBucket, dstObject, uploadID, 0, 1, dstOpts)
if lerr != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, lerr), r.URL, guessIsBrowserReq(r))
return
}
li.UserDefined = CleanMinioInternalMetadataKeys(li.UserDefined)
dstOpts, err = copyDstEncryptionOpts(ctx, r, dstBucket, dstObject, li.UserDefined)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if crypto.IsEncrypted(li.UserDefined) {
isEncrypted = true
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL, guessIsBrowserReq(r))
return
@ -1714,7 +1794,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
pReader = NewPutObjReader(rawReader, srcInfo.Reader, objectEncryptionKey)
}
}
srcInfo.PutObjReader = pReader
// Copy source object to destination, if source and destination
// object is same then only metadata is updated.
@ -1724,6 +1803,9 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if isEncrypted {
partInfo.ETag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
}
if isEncrypted {
partInfo.ETag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
@ -1751,7 +1833,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r)) // SSE-KMS is not supported
return
}
if !objectAPI.IsEncryptionSupported() && hasServerSideEncryptionHeader(r.Header) {
if !api.EncryptionEnabled() && hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrNotImplemented, r.URL, guessIsBrowserReq(r))
return
}
@ -1856,8 +1938,17 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
var pipeReader *io.PipeReader
var pipeWriter *io.PipeWriter
// get encryption options
var opts ObjectOptions
if crypto.SSEC.IsRequested(r.Header) {
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -1897,10 +1988,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var opts ObjectOptions
// Deny if WORM is enabled
if globalWORMEnabled {
@ -1914,17 +2003,25 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
var objectEncryptionKey []byte
if objectAPI.IsEncryptionSupported() && !isCompressed {
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1, ObjectOptions{})
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
li.UserDefined = CleanMinioInternalMetadataKeys(li.UserDefined)
if crypto.IsEncrypted(li.UserDefined) {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL, guessIsBrowserReq(r))
return
}
isEncrypted = true // to detect SSE-S3 encryption
opts, err = putEncryptionOpts(ctx, r, bucket, object, li.UserDefined)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
var key []byte
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
@ -1940,7 +2037,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
var partIDbin [4]byte
binary.LittleEndian.PutUint32(partIDbin[:], uint32(partID)) // marshal part ID
@ -2067,16 +2163,16 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
writeErrorResponse(w, ErrInvalidMaxParts, r.URL, guessIsBrowserReq(r))
return
}
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
var opts ObjectOptions
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
var ssec bool
if objectAPI.IsEncryptionSupported() {
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -2169,10 +2265,9 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
var objectEncryptionKey []byte
var opts ObjectOptions
var isEncrypted, ssec bool
if objectAPI.IsEncryptionSupported() {
var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -2181,6 +2276,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
isEncrypted = true
ssec = crypto.SSEC.IsEncrypted(li.UserDefined)
var key []byte
isEncrypted = true
ssec = crypto.SSEC.IsEncrypted(li.UserDefined)
if crypto.S3.IsEncrypted(li.UserDefined) {
// Calculating object encryption key
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
@ -2193,13 +2290,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
}
partsMap := make(map[string]PartInfo)
var listPartsInfo ListPartsInfo
if isEncrypted {
var partNumberMarker int
maxParts := 1000
for {
listPartsInfo, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL, guessIsBrowserReq(r))
return
@ -2213,6 +2308,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
}
}
}
// Complete parts.
var completeParts []CompletePart
for _, part := range complMultipartUpload.Parts {

View file

@ -1731,7 +1731,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// See if the new part has been uploaded.
// testing whether the copy was successful.
var results ListPartsInfo
results, err = obj.ListObjectParts(context.Background(), testCase.bucketName, testObject, testCase.uploadID, 0, 1)
results, err = obj.ListObjectParts(context.Background(), testCase.bucketName, testObject, testCase.uploadID, 0, 1, ObjectOptions{})
if err != nil {
t.Fatalf("Test %d: %s: Failed to look for copied object part: <ERROR> %s", i+1, instanceType, err)
}
@ -2245,7 +2245,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Error decoding the recorded response Body")
}
// verify the uploadID my making an attempt to list parts.
_, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1)
_, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1, ObjectOptions{})
if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
}
@ -2297,7 +2297,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Error decoding the recorded response Body")
}
// verify the uploadID my making an attempt to list parts.
_, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1)
_, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1, ObjectOptions{})
if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
}
@ -2412,7 +2412,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam
wg.Wait()
// Validate the upload ID by an attempt to list parts using it.
for _, uploadID := range testUploads.uploads {
_, err := obj.ListObjectParts(context.Background(), bucketName, objectName, uploadID, 0, 1)
_, err := obj.ListObjectParts(context.Background(), bucketName, objectName, uploadID, 0, 1, ObjectOptions{})
if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
}

View file

@ -141,7 +141,7 @@ func (sys *PolicySys) Init(objAPI ObjectLayer) error {
defer ticker.Stop()
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
sys.refresh(objAPI)

View file

@ -202,7 +202,7 @@ func newPosix(path string) (*posix, error) {
}
if !p.diskMount {
go p.diskUsage(globalServiceDoneCh)
go p.diskUsage(GlobalServiceDoneCh)
}
// Success.

View file

@ -123,8 +123,8 @@ func configureServerHandler(endpoints EndpointList) (http.Handler, error) {
}
}
// Add API router.
registerAPIRouter(router)
// Add API router, additionally all server mode support encryption.
registerAPIRouter(router, true)
// Register rest of the handlers.
return registerHandlers(router, globalHandlers...), nil

View file

@ -34,12 +34,12 @@ const (
// Global service signal channel.
var globalServiceSignalCh chan serviceSignal
// Global service done channel.
var globalServiceDoneCh chan struct{}
// GlobalServiceDoneCh - Global service done channel.
var GlobalServiceDoneCh chan struct{}
// Initialize service mutex once.
func init() {
globalServiceDoneCh = make(chan struct{}, 1)
GlobalServiceDoneCh = make(chan struct{}, 1)
globalServiceSignalCh = make(chan serviceSignal)
}

View file

@ -2115,7 +2115,7 @@ func registerBucketLevelFunc(bucket *mux.Router, api objectAPIHandlers, apiFunct
func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFunctions ...string) {
if len(apiFunctions) == 0 {
// Register all api endpoints by default.
registerAPIRouter(muxRouter)
registerAPIRouter(muxRouter, true)
return
}
// API Router.
@ -2133,8 +2133,9 @@ func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFuncti
// to underlying cache layer to manage object layer operation and disk caching
// operation
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
EncryptionEnabled: func() bool { return true },
}
// Register ListBuckets handler.
@ -2155,7 +2156,7 @@ func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Hand
registerAPIFunctions(muxRouter, objLayer, apiFunctions...)
return muxRouter
}
registerAPIRouter(muxRouter)
registerAPIRouter(muxRouter, true)
return muxRouter
}

View file

@ -203,4 +203,16 @@ Example 1:
"Please check the passed value",
"Compress extensions/mime-types are delimited by `,`. For eg, MINIO_COMPRESS_ATTR=\"A,B,C\"",
)
uiErrInvalidGWSSEValue = newUIErrFn(
"Invalid gateway SSE value",
"Please check the passed value",
"MINIO_GATEWAY_SSE: Gateway SSE accepts only C and S3 as valid values. Delimit by `;` to set more than one value",
)
uiErrInvalidGWSSEEnvValue = newUIErrFn(
"Invalid gateway SSE configuration",
"",
"Refer to https://docs.minio.io/docs/minio-kms-quickstart-guide.html for setting up SSE",
)
)

View file

@ -931,7 +931,13 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
}
}
pReader = NewPutObjReader(hashReader, nil, nil)
// get gateway encryption options
var opts ObjectOptions
opts, err = putEncryptionOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
rawReader := hashReader
@ -954,7 +960,6 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(metadata)
var opts ObjectOptions
// Deny if WORM is enabled
if globalWORMEnabled {
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {

View file

@ -234,7 +234,7 @@ func (s *xlSets) monitorAndConnectEndpoints(monitorInterval time.Duration) {
for {
select {
case <-globalServiceDoneCh:
case <-GlobalServiceDoneCh:
return
case <-s.disksConnectDoneCh:
return
@ -288,7 +288,7 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP
nsMutex: mutex,
bp: bp,
}
go s.sets[i].cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go s.sets[i].cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
}
// Connect disks right away, but wait until we have `format.json` quorum.
@ -521,7 +521,7 @@ func (s *xlSets) IsListenBucketSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (s *xlSets) IsEncryptionSupported() bool {
return s.getHashedSet("").IsEncryptionSupported()
}
@ -840,8 +840,8 @@ func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID str
}
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
}
// Aborts an in-progress multipart operation on hashedSet based on the object name.

View file

@ -299,7 +299,7 @@ func (xl xlObjects) IsListenBucketSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (xl xlObjects) IsEncryptionSupported() bool {
return true
}

View file

@ -97,7 +97,7 @@ func partsMetaFromModTimes(modTimes []time.Time, algorithm BitrotAlgorithm, chec
Stat: statInfo{
ModTime: modTime,
},
Parts: []objectPartInfo{
Parts: []ObjectPartInfo{
{
Name: "part.1",
},

View file

@ -31,9 +31,9 @@ import (
const erasureAlgorithmKlauspost = "klauspost/reedsolomon/vandermonde"
// objectPartInfo Info of each part kept in the multipart metadata
// ObjectPartInfo Info of each part kept in the multipart metadata
// file after CompleteMultipartUpload() is called.
type objectPartInfo struct {
type ObjectPartInfo struct {
Number int `json:"number"`
Name string `json:"name"`
ETag string `json:"etag"`
@ -42,7 +42,7 @@ type objectPartInfo struct {
}
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []objectPartInfo
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
@ -153,7 +153,7 @@ type xlMetaV1 struct {
// Metadata map for current object `xl.json`.
Meta map[string]string `json:"meta,omitempty"`
// Captures all the individual object `xl.json`.
Parts []objectPartInfo `json:"parts,omitempty"`
Parts []ObjectPartInfo `json:"parts,omitempty"`
}
// XL metadata constants.
@ -243,7 +243,7 @@ func (m xlMetaV1) ToObjectInfo(bucket, object string) ObjectInfo {
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []objectPartInfo, partNumber int) int {
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
@ -254,7 +254,7 @@ func objectPartIndex(parts []objectPartInfo, partNumber int) int {
// AddObjectPart - add a new object part in order.
func (m *xlMetaV1) AddObjectPart(partNumber int, partName string, partETag string, partSize int64, actualSize int64) {
partInfo := objectPartInfo{
partInfo := ObjectPartInfo{
Number: partNumber,
Name: partName,
ETag: partETag,
@ -351,7 +351,7 @@ func pickValidXLMeta(ctx context.Context, metaArr []xlMetaV1, modTime time.Time,
var objMetadataOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound, errFileAccessDenied, errCorruptedFormat)
// readXLMetaParts - returns the XL Metadata Parts from xl.json of one of the disks picked at random.
func (xl xlObjects) readXLMetaParts(ctx context.Context, bucket, object string) (xlMetaParts []objectPartInfo, xlMeta map[string]string, err error) {
func (xl xlObjects) readXLMetaParts(ctx context.Context, bucket, object string) (xlMetaParts []ObjectPartInfo, xlMeta map[string]string, err error) {
var ignoredErrs []error
for _, disk := range xl.getLoadBalancedDisks() {
if disk == nil {

View file

@ -493,7 +493,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshaled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
return result, err
}
@ -656,7 +656,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
var currentXLMeta = xlMeta
// Allocate parts similar to incoming slice.
xlMeta.Parts = make([]objectPartInfo, len(parts))
xlMeta.Parts = make([]ObjectPartInfo, len(parts))
// Validate each part and then commit to disk.
for i, part := range parts {
@ -695,7 +695,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
objectActualSize += currentXLMeta.Parts[partIdx].ActualSize
// Add incoming parts.
xlMeta.Parts[i] = objectPartInfo{
xlMeta.Parts[i] = ObjectPartInfo{
Number: part.PartNumber,
ETag: part.ETag,
Size: currentXLMeta.Parts[partIdx].Size,

View file

@ -36,7 +36,7 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
// Close the go-routine, we are going to
// manually start it and test in this test case.
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
bucketName := "bucket"
objectName := "object"
@ -48,14 +48,14 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
t.Fatal("Unexpected err: ", err)
}
go xl.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, globalServiceDoneCh)
go xl.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, GlobalServiceDoneCh)
// Wait for 40ms such that - we have given enough time for
// cleanup routine to kick in.
time.Sleep(40 * time.Millisecond)
// Close the routine we do not need it anymore.
globalServiceDoneCh <- struct{}{}
GlobalServiceDoneCh <- struct{}{}
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {

View file

@ -526,7 +526,7 @@ func (xl xlObjects) renameCorruptedObject(ctx context.Context, bucket, object st
},
}
validMeta.Parts = []objectPartInfo{
validMeta.Parts = []ObjectPartInfo{
{
Number: 1,
Name: "part.1",

View file

@ -192,12 +192,12 @@ func parseXLErasureInfo(ctx context.Context, xlMetaBuf []byte) (ErasureInfo, err
return erasure, nil
}
func parseXLParts(xlMetaBuf []byte) []objectPartInfo {
func parseXLParts(xlMetaBuf []byte) []ObjectPartInfo {
// Parse the XL Parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
partInfo := make([]objectPartInfo, len(partsResult))
partInfo := make([]ObjectPartInfo, len(partsResult))
for i, p := range partsResult {
info := objectPartInfo{}
info := ObjectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
@ -249,7 +249,7 @@ func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMeta
}
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]objectPartInfo, map[string]string, error) {
func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]ObjectPartInfo, map[string]string, error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {

View file

@ -174,7 +174,7 @@ func (m *xlMetaV1) AddTestObjectCheckSum(checkSumNum int, name string, algorithm
// AddTestObjectPart - add a new object part in order.
func (m *xlMetaV1) AddTestObjectPart(partNumber int, partName string, partETag string, partSize int64) {
partInfo := objectPartInfo{
partInfo := ObjectPartInfo{
Number: partNumber,
Name: partName,
ETag: partETag,
@ -201,7 +201,7 @@ func getSampleXLMeta(totalParts int) xlMetaV1 {
// Number of checksum info == total parts.
xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts)
// total number of parts.
xlMeta.Parts = make([]objectPartInfo, totalParts)
xlMeta.Parts = make([]ObjectPartInfo, totalParts)
for i := 0; i < totalParts; i++ {
partName := "part." + strconv.Itoa(i+1)
// hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.json the magnitude doesn't affect the test,

View file

@ -108,7 +108,22 @@ export MINIO_SSE_VAULT_NAMESPACE=ns1
Note: If [Vault Namespaces](https://learn.hashicorp.com/vault/operations/namespaces) are in use, MINIO_SSE_VAULT_NAMESPACE variable needs to be set before setting approle and transit secrets engine.
Minio gateway to S3 supports encryption. Three encryption modes are possible - encryption can be set to ``pass-through`` to backend, ``single encryption`` (at the gateway) or ``double encryption`` (single encryption at gateway and pass through to backend). This can be specified by setting MINIO_GATEWAY_SSE and KMS environment variables set in Step 2.1.2.
If MINIO_GATEWAY_SSE and KMS are not setup, all encryption headers are passed through to the backend. If KMS environment variables are set up, ``single encryption`` is automatically performed at the gateway and encrypted object is saved at the backend.
To specify ``double encryption``, MINIO_GATEWAY_SSE environment variable needs to be set to "s3" for sse-s3
and "c" for sse-c encryption. More than one encryption option can be set, delimited by ";". Objects are encrypted at the gateway and the gateway also does a pass-through to backend. Note that in the case of SSE-C encryption, gateway derives a unique SSE-C key for pass through from the SSE-C client key using a KDF.
```sh
export MINIO_GATEWAY_SSE="s3;c"
export MINIO_SSE_VAULT_APPROLE_ID=9b56cc08-8258-45d5-24a3-679876769126
export MINIO_SSE_VAULT_APPROLE_SECRET=4e30c52f-13e4-a6f5-0763-d50e8cb4321f
export MINIO_SSE_VAULT_ENDPOINT=https://vault-endpoint-ip:8200
export MINIO_SSE_VAULT_KEY_NAME=my-minio-key
export MINIO_SSE_VAULT_AUTH_TYPE=approle
minio gateway s3
```
#### 2.2 Specify a master key
@ -126,7 +141,6 @@ head -c 32 /dev/urandom | xxd -c 32 -ps
```
### 3. Test your setup
To test this setup, start minio server with environment variables set in Step 3, and server is ready to handle SSE-S3 requests.
### Auto-Encryption

View file

@ -1,9 +1,9 @@
### Backend format `fs.json`
```go
// objectPartInfo Info of each part kept in the multipart metadata
// ObjectPartInfo Info of each part kept in the multipart metadata
// file after CompleteMultipartUpload() is called.
type objectPartInfo struct {
type ObjectPartInfo struct {
Number int `json:"number"`
Name string `json:"name"`
ETag string `json:"etag"`
@ -19,6 +19,6 @@ type fsMetaV1 struct {
} `json:"minio"`
// Metadata map for current object `fs.json`.
Meta map[string]string `json:"meta,omitempty"`
Parts []objectPartInfo `json:"parts,omitempty"`
Parts []ObjectPartInfo `json:"parts,omitempty"`
}
```

View file

@ -1,9 +1,9 @@
### Backend format `xl.json`
```go
// objectPartInfo Info of each part kept in the multipart metadata
// ObjectPartInfo Info of each part kept in the multipart metadata
// file after CompleteMultipartUpload() is called.
type objectPartInfo struct {
type ObjectPartInfo struct {
Number int `json:"number"`
Name string `json:"name"`
ETag string `json:"etag"`
@ -49,6 +49,6 @@ type xlMetaV1 struct {
// Metadata map for current object `xl.json`.
Meta map[string]string `json:"meta,omitempty"`
// Captures all the individual object `xl.json`.
Parts []objectPartInfo `json:"parts,omitempty"`
Parts []ObjectPartInfo `json:"parts,omitempty"`
}
```