Implement presigned policy

This commit is contained in:
Harshavardhana 2015-10-01 23:51:17 -07:00
parent 09dc360e06
commit c8de5bad2f
18 changed files with 560 additions and 240 deletions

View file

@ -35,6 +35,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
@ -235,7 +236,7 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
@ -306,7 +307,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
@ -429,27 +430,25 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe
}
// writeObjectData -
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, *probe.Error) {
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) {
encoder, err := newEncoder(k, m, "Cauchy")
chunkSize := int64(10 * 1024 * 1024)
if err != nil {
return 0, 0, err.Trace()
}
chunkSize := int64(10 * 1024 * 1024)
chunkCount := 0
totalLength := 0
remaining := size
for remaining > 0 {
readSize := chunkSize
if remaining < chunkSize {
readSize = remaining
}
remaining = remaining - readSize
totalLength = totalLength + int(readSize)
encodedBlocks, inputData, err := encoder.EncodeStream(objectData, readSize)
var e error
for e == nil {
var length int
inputData := make([]byte, chunkSize)
length, e = objectData.Read(inputData)
encodedBlocks, err := encoder.Encode(inputData)
if err != nil {
return 0, 0, err.Trace()
}
if _, err := writer.Write(inputData); err != nil {
if _, err := hashWriter.Write(inputData[0:length]); err != nil {
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
@ -464,8 +463,12 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
return 0, 0, probe.NewError(err)
}
}
totalLength += length
chunkCount = chunkCount + 1
}
if e != io.EOF {
return 0, 0, probe.NewError(e)
}
return chunkCount, totalLength, nil
}

View file

@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// config files used inside Donut
@ -127,7 +128,7 @@ func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys i
}
// putObject - put object
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@ -159,7 +160,7 @@ func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Read
}
// putObject - put object
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) {
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
@ -336,7 +337,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
}
// completeMultipartUpload complete an incomplete multipart upload
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@ -374,7 +375,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}

View file

@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/donut/cache/metadata"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
signv4 "github.com/minio/minio/pkg/signature"
)
// total Number of buckets allowed
@ -204,7 +205,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
}
// GetBucketMetadata -
func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) {
func (donut API) GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -214,7 +215,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
return BucketMetadata{}, err.Trace()
}
if !ok {
return BucketMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@ -237,7 +238,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
}
// SetBucketMetadata -
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error {
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -247,7 +248,7 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@ -288,7 +289,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
}
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -301,7 +302,7 @@ func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, da
}
// createObject - PUT object to cache buffer
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if len(donut.config.NodeDiskMap) == 0 {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
@ -381,10 +382,12 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
totalLength += int64(length)
go debug.FreeOSMemory()
}
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
if size != 0 {
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
}
if err != io.EOF {
return ObjectMetadata{}, probe.NewError(err)
@ -403,7 +406,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@ -425,7 +428,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
}
// MakeBucket - create bucket in cache
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) *probe.Error {
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -445,7 +448,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@ -484,7 +487,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
}
// ListObjects - list objects from cache
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -494,7 +497,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s
return nil, BucketResourcesMetadata{}, err.Trace()
}
if !ok {
return nil, BucketResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return nil, BucketResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@ -587,7 +590,7 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) {
func (donut API) ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -597,7 +600,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
@ -621,7 +624,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
}
// GetObjectMetadata - get object metadata from cache
func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) GetObjectMetadata(bucket, key string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -632,7 +635,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
} else {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
@ -640,7 +643,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
}

View file

@ -17,8 +17,6 @@
package donut
import (
"io"
encoding "github.com/minio/minio/pkg/erasure"
"github.com/minio/minio/pkg/probe"
)
@ -83,14 +81,6 @@ func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
return encodedData, nil
}
func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) {
encodedData, inputData, err := e.encoder.EncodeStream(data, size)
if err != nil {
return nil, nil, probe.NewError(err)
}
return encodedData, inputData, nil
}
// Decode - erasure decode input encoded bytes
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)

View file

@ -125,6 +125,13 @@ func (e ChecksumMismatch) Error() string {
return "Checksum mismatch"
}
// MissingPOSTPolicy missing post policy
type MissingPOSTPolicy struct{}
func (e MissingPOSTPolicy) Error() string {
return "Missing POST policy in multipart form"
}
// MissingErasureTechnique missing erasure technique
type MissingErasureTechnique struct{}
@ -318,37 +325,6 @@ func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}
// SignatureDoesNotMatch invalid signature
type SignatureDoesNotMatch struct {
SignatureSent string
SignatureCalculated string
}
func (e SignatureDoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// MissingDateHeader date header missing
type MissingDateHeader struct{}
func (e MissingDateHeader) Error() string {
return "Missing date header"
}
// InvalidPart One or more of the specified parts could not be found
type InvalidPart struct{}

View file

@ -20,6 +20,7 @@ import (
"io"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// Collection of Donut specification interfaces
@ -33,31 +34,31 @@ type Interface interface {
// CloudStorage is a donut cloud storage interface
type CloudStorage interface {
// Storage service operations
GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error
ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) *probe.Error
GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error
ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *signv4.Signature) *probe.Error
// Bucket operations
ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
ListObjects(string, BucketResourcesMetadata, *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
// Object operations
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, *probe.Error)
GetObjectMetadata(bucket, object string, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, *probe.Error)
CreateObject(string, string, string, int64, io.Reader, map[string]string, *signv4.Signature) (ObjectMetadata, *probe.Error)
Multipart
}
// Multipart API
type Multipart interface {
NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, *probe.Error)
NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signv4.Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *signv4.Signature) (ObjectResourcesMetadata, *probe.Error)
}
// Management is a donut management system interface

View file

@ -35,12 +35,13 @@ import (
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
/// V2 API functions
// NewMultipartUpload - initiate a new multipart session
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) {
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -56,7 +57,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
// if len(donut.config.NodeDiskMap) > 0 {
@ -88,7 +89,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
}
// AbortMultipartUpload - abort an incomplete multipart session
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error {
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -104,7 +105,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
@ -125,7 +126,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
}
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
donut.lock.Unlock()
@ -136,7 +137,7 @@ func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, cont
}
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@ -240,7 +241,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
}
@ -303,7 +304,7 @@ func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string,
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
size := int64(donut.multiPartObjects[uploadID].Stats().Bytes)
@ -321,7 +322,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
return objectMetadata, nil
}
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *Signature) (io.Reader, *probe.Error) {
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (io.Reader, *probe.Error) {
if !IsValidBucket(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@ -355,7 +356,7 @@ func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
@ -380,7 +381,7 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
// TODO handle delimiter, low priority
donut.lock.Lock()
defer donut.lock.Unlock()
@ -391,7 +392,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if !ok {
return BucketMultipartResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMultipartResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@ -465,7 +466,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts - list parts from incomplete multipart session for a given object
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, *probe.Error) {
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *signv4.Signature) (ObjectResourcesMetadata, *probe.Error) {
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
@ -476,7 +477,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
return ObjectResourcesMetadata{}, err.Trace()
}
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}

View file

@ -22,7 +22,6 @@ package erasure
import "C"
import (
"errors"
"io"
"unsafe"
)
@ -195,53 +194,3 @@ func (e *Erasure) Encode(inputData []byte) (encodedBlocks [][]byte, err error) {
return encodedBlocks, nil
}
// EncodeStream erasure codes a block of data in "k" data blocks and "m" parity blocks.
// Output is [k+m][]blocks of data and parity slices.
func (e *Erasure) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, error) {
k := int(e.params.K) // "k" data blocks
m := int(e.params.M) // "m" parity blocks
n := k + m // "n" total encoded blocks
// Length of a single encoded chunk.
// Total number of encoded chunks = "k" data + "m" parity blocks
encodedBlockLen := GetEncodedBlockLen(int(size), uint8(k))
// Length of total number of "n" data chunks
encodedDataBlocksLen := encodedBlockLen * n
// allocate byte array for encodedBlock length
inputData := make([]byte, size, encodedDataBlocksLen)
_, err := io.ReadFull(data, inputData)
if err != nil {
// do not check for io.ErrUnexpectedEOF, we know the right amount of size
// to be read if its a short read we need to throw error since reader could
// have been prematurely closed.
if err != io.EOF {
return nil, nil, err
}
}
// Allocate memory to the "encoded blocks" return buffer
encodedBlocks := make([][]byte, n) // Return buffer
// Neccessary to bridge Go to the C world. C requires 2D arry of pointers to
// byte array. "encodedBlocks" is a 2D slice.
pointersToEncodedBlock := make([]*byte, n) // Pointers to encoded blocks.
// Copy data block slices to encoded block buffer
for i := 0; i < n; i++ {
encodedBlocks[i] = inputData[i*encodedBlockLen : (i+1)*encodedBlockLen]
pointersToEncodedBlock[i] = &encodedBlocks[i][0]
}
// Erasure code the data into K data blocks and M parity
// blocks. Only the parity blocks are filled. Data blocks remain
// intact.
C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(m), e.encodeTbls,
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[:k][0])), // Pointers to data blocks
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[k:][0]))) // Pointers to parity blocks
return encodedBlocks, inputData[0:size], nil
}

32
pkg/signature/errors.go Normal file
View file

@ -0,0 +1,32 @@
package signature
// MissingDateHeader date header missing
type MissingDateHeader struct{}
func (e MissingDateHeader) Error() string {
return "Missing date header"
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
}
// DoesNotMatch invalid signature
type DoesNotMatch struct {
SignatureSent string
SignatureCalculated string
}
func (e DoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided"
}

View file

@ -0,0 +1,141 @@
package signature
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/minio/minio/pkg/probe"
)
// toString - Safely convert interface to string without causing panic.
func toString(val interface{}) string {
switch v := val.(type) {
case string:
return v
}
return ""
}
// toInteger _ Safely convert interface to integer without causing panic.
func toInteger(val interface{}) int {
switch v := val.(type) {
case int:
return v
}
return 0
}
// isString - Safely check if val is of type string without causing panic.
func isString(val interface{}) bool {
switch val.(type) {
case string:
return true
}
return false
}
// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
type PostPolicyForm struct {
Expiration time.Time // Expiration date and time of the POST policy.
Conditions struct { // Conditional policy structure.
Policies map[string]struct {
Operator string
Value string
}
ContentLengthRange struct {
Min int
Max int
}
}
}
// ParsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure.
func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) {
// Convert po into interfaces and
// perform strict type conversion using reflection.
var rawPolicy struct {
Expiration string `json:"expiration"`
Conditions []interface{} `json:"conditions"`
}
e := json.Unmarshal([]byte(policy), &rawPolicy)
if e != nil {
return PostPolicyForm{}, probe.NewError(e)
}
parsedPolicy := PostPolicyForm{}
// Parse expiry time.
parsedPolicy.Expiration, e = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if e != nil {
return PostPolicyForm{}, probe.NewError(e)
}
parsedPolicy.Conditions.Policies = make(map[string]struct {
Operator string
Value string
})
// Parse conditions.
for _, val := range rawPolicy.Conditions {
switch condt := val.(type) {
case map[string]interface{}: // Handle key:value map types.
for k, v := range condt {
if !isString(v) { // Pre-check value type.
// All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
// In this case we will just collapse this into "eq" for all use cases.
parsedPolicy.Conditions.Policies["$"+k] = struct {
Operator string
Value string
}{
Operator: "eq",
Value: toString(v),
}
}
case []interface{}: // Handle array types.
if len(condt) != 3 { // Return error if we have insufficient elements.
return parsedPolicy, probe.NewError(fmt.Errorf("Malformed conditional fields ‘%s’ of type ‘%s’ found in POST policy form.",
condt, reflect.TypeOf(condt).String()))
}
switch toString(condt[0]) {
case "eq", "starts-with":
for _, v := range condt { // Pre-check all values for type.
if !isString(v) {
// All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
}
operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2])
parsedPolicy.Conditions.Policies[matchType] = struct {
Operator string
Value string
}{
Operator: operator,
Value: value,
}
case "content-length-range":
parsedPolicy.Conditions.ContentLengthRange = struct {
Min int
Max int
}{
Min: toInteger(condt[1]),
Max: toInteger(condt[2]),
}
default:
// Condition should be valid.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
default:
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown field ‘%s’ of type ‘%s’ found in POST policy form.",
condt, reflect.TypeOf(condt).String()))
}
}
return parsedPolicy, nil
}

View file

@ -14,7 +14,7 @@
* limitations under the License.
*/
package donut
package signature
import (
"bytes"
@ -38,7 +38,7 @@ type Signature struct {
AccessKeyID string
SecretAccessKey string
Presigned bool
PresignedPolicy bool
PresignedPolicy []byte
SignedHeaders []string
Signature string
Request *http.Request
@ -57,18 +57,18 @@ func sumHMAC(key []byte, data []byte) []byte {
return hash.Sum(nil)
}
// urlEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodeName(name string) (string, *probe.Error) {
func getURLEncodedName(name string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name, nil
return name
}
var encodedName string
for _, s := range name {
@ -83,7 +83,7 @@ func urlEncodeName(name string) (string, *probe.Error) {
default:
len := utf8.RuneLen(s)
if len < 0 {
return "", probe.NewError(InvalidArgument{})
return name
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
@ -93,7 +93,7 @@ func urlEncodeName(name string) (string, *probe.Error) {
}
}
}
return encodedName, nil
return encodedName
}
// getCanonicalHeaders generate a list of request headers with their values
@ -166,7 +166,7 @@ func (r Signature) extractSignedHeaders() map[string][]string {
func (r *Signature) getCanonicalRequest() string {
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
encodedPath := getURLEncodedName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
@ -192,7 +192,7 @@ func (r *Signature) getCanonicalRequest() string {
//
func (r *Signature) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
encodedPath := getURLEncodedName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
@ -243,8 +243,17 @@ func (r *Signature) getSignature(signingKey []byte, stringToSign string) string
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPolicySignatureMatch() (bool, *probe.Error) {
// FIXME: Implement this
func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) {
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.NewError(err)
}
signingKey := r.getSigningKey(t)
stringToSign := string(r.PresignedPolicy)
newSignature := r.getSignature(signingKey, stringToSign)
if newSignature != r.Signature {
return false, nil
}
return true, nil
}

View file

@ -22,9 +22,10 @@ import (
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
func (api API) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
vars := mux.Vars(req)
bucket := vars["bucket"]
@ -67,7 +68,7 @@ func (api MinioAPI) isValidOp(w http.ResponseWriter, req *http.Request, acceptsC
// using the Initiate Multipart Upload request, but has not yet been completed or aborted.
// This operation returns at most 1,000 multipart uploads in the response.
//
func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
func (api API) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -94,7 +95,7 @@ func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -110,7 +111,7 @@ func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http
if err != nil {
errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path)
@ -134,7 +135,7 @@ func (api MinioAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
func (api API) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -166,7 +167,7 @@ func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request)
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -190,7 +191,7 @@ func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request)
return
}
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
@ -210,7 +211,7 @@ func (api MinioAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request)
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
func (api API) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -228,7 +229,7 @@ func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request)
// return
// }
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -252,7 +253,7 @@ func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request)
return
}
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
default:
errorIf(err.Trace(), "ListBuckets failed.", nil)
@ -263,7 +264,7 @@ func (api MinioAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request)
// PutBucketHandler - PUT Bucket
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -295,7 +296,7 @@ func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -321,7 +322,7 @@ func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.TooManyBuckets:
writeErrorResponse(w, req, TooManyBuckets, acceptsContentType, req.URL.Path)
@ -339,10 +340,95 @@ func (api MinioAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
writeSuccessResponse(w, acceptsContentType)
}
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
func (api API) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
// Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary
// files
reader, err := req.MultipartReader()
if err != nil {
errorIf(probe.NewError(err), "Unable to initialize multipart reader.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, 1, req.URL.Path)
return
}
fileBody, formValues, perr := extractHTTPFormValues(reader)
if perr != nil {
errorIf(perr.Trace(), "Unable to parse form values.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, 1, req.URL.Path)
return
}
bucket := mux.Vars(req)["bucket"]
formValues["Bucket"] = bucket
object := formValues["key"]
signature, perr := initPostPresignedPolicyV4(formValues)
if perr != nil {
errorIf(perr.Trace(), "Unable to initialize post policy presigned.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, 1, req.URL.Path)
return
}
if perr = applyPolicy(formValues, signature.PresignedPolicy); perr != nil {
errorIf(perr.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, 1, req.URL.Path)
return
}
var ok bool
if ok, perr = signature.DoesPolicySignatureMatch(formValues["X-Amz-Date"]); perr != nil {
errorIf(perr.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, req, SignatureDoesNotMatch, 1, req.URL.Path)
return
}
if ok == false {
writeErrorResponse(w, req, SignatureDoesNotMatch, 1, req.URL.Path)
return
}
metadata, perr := api.Donut.CreateObject(bucket, object, "", 0, fileBody, nil, nil)
if perr != nil {
errorIf(perr.Trace(), "CreateObject failed.", nil)
switch perr.ToGoError().(type) {
case donut.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, 1, req.URL.Path)
case donut.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, 1, req.URL.Path)
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, 1, req.URL.Path)
case donut.BadDigest:
writeErrorResponse(w, req, BadDigest, 1, req.URL.Path)
case signv4.MissingDateHeader:
writeErrorResponse(w, req, RequestTimeTooSkewed, 1, req.URL.Path)
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, 1, req.URL.Path)
case donut.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, 1, req.URL.Path)
case donut.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, 1, req.URL.Path)
case donut.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, 1, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, 1, req.URL.Path)
}
return
}
w.Header().Set("ETag", metadata.MD5Sum)
writeSuccessResponse(w, 1)
}
// PutBucketACLHandler - PUT Bucket ACL
// ----------
// This implementation of the PUT operation modifies the bucketACL for authenticated request
func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
func (api API) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -364,7 +450,7 @@ func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -380,7 +466,7 @@ func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request
if err != nil {
errorIf(err.Trace(), "PutBucketACL failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
@ -400,7 +486,7 @@ func (api MinioAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
func (api API) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -415,7 +501,7 @@ func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request)
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -431,7 +517,7 @@ func (api MinioAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path)

View file

@ -70,11 +70,12 @@ const (
InvalidPart
InvalidPartOrder
AuthorizationHeaderMalformed
MalformedPOSTRequest
)
// Error codes, non exhaustive list - standard HTTP errors
const (
NotAcceptable = iota + 30
NotAcceptable = iota + 31
)
// APIError code to Error structure map
@ -229,6 +230,11 @@ var errorCodeResponse = map[int]APIError{
Description: "The authorization header is malformed; the region is wrong; expecting 'milkyway'.",
HTTPStatusCode: http.StatusBadRequest,
},
MalformedPOSTRequest: {
Code: "MalformedPOSTRequest",
Description: "The body of your POST request is not well-formed multipart/form-data.",
HTTPStatusCode: http.StatusBadRequest,
},
}
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown

View file

@ -23,6 +23,7 @@ import (
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
@ -33,7 +34,7 @@ const (
// ----------
// This implementation of the GET operation retrieves object. To use GET,
// you must have READ access to the object.
func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
func (api API) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
// ticket master block
{
op := APIOperation{}
@ -53,7 +54,7 @@ func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
bucket = vars["bucket"]
object = vars["object"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -85,7 +86,7 @@ func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
if err != nil {
errorIf(err.Trace(), "GetObject failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
@ -116,7 +117,7 @@ func (api MinioAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
// HeadObjectHandler - HEAD Object
// -----------
// The HEAD operation retrieves metadata from an object without returning the object itself.
func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
func (api API) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
// ticket master block
{
op := APIOperation{}
@ -136,7 +137,7 @@ func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request)
bucket = vars["bucket"]
object = vars["object"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -152,7 +153,7 @@ func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request)
if err != nil {
errorIf(err.Trace(), "GetObjectMetadata failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
@ -174,7 +175,7 @@ func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request)
// PutObjectHandler - PUT Object
// ----------
// This implementation of the PUT operation adds an object to a bucket.
func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -231,7 +232,7 @@ func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
}
}
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -255,9 +256,9 @@ func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
case donut.BadDigest:
writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path)
case donut.MissingDateHeader:
case signv4.MissingDateHeader:
writeErrorResponse(w, req, RequestTimeTooSkewed, acceptsContentType, req.URL.Path)
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, acceptsContentType, req.URL.Path)
@ -277,7 +278,7 @@ func (api MinioAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
/// Multipart API
// NewMultipartUploadHandler - New multipart upload
func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
func (api API) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -302,7 +303,7 @@ func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.R
bucket = vars["bucket"]
object = vars["object"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -318,7 +319,7 @@ func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.R
if err != nil {
errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
@ -337,7 +338,7 @@ func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.R
}
// PutObjectPartHandler - Upload part
func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -399,7 +400,7 @@ func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Reques
}
}
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -421,7 +422,7 @@ func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Reques
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
case donut.BadDigest:
writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path)
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, acceptsContentType, req.URL.Path)
@ -439,7 +440,7 @@ func (api MinioAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Reques
}
// AbortMultipartUploadHandler - Abort multipart upload
func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
func (api API) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -460,7 +461,7 @@ func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http
objectResourcesMetadata := getObjectResources(req.URL.Query())
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -476,7 +477,7 @@ func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http
if err != nil {
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
@ -490,7 +491,7 @@ func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http
}
// ListObjectPartsHandler - List object parts
func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
func (api API) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -522,7 +523,7 @@ func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Requ
bucket := vars["bucket"]
object := vars["object"]
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -538,7 +539,7 @@ func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Requ
if err != nil {
errorIf(err.Trace(), "ListObjectParts failed.", nil)
switch err.ToGoError().(type) {
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
@ -556,7 +557,7 @@ func (api MinioAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Requ
}
// CompleteMultipartUploadHandler - Complete multipart upload
func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
@ -577,7 +578,7 @@ func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *h
objectResourcesMetadata := getObjectResources(req.URL.Query())
var signature *donut.Signature
var signature *signv4.Signature
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
var err *probe.Error
@ -598,9 +599,9 @@ func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *h
writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path)
case donut.InvalidPartOrder:
writeErrorResponse(w, req, InvalidPartOrder, acceptsContentType, req.URL.Path)
case donut.MissingDateHeader:
case signv4.MissingDateHeader:
writeErrorResponse(w, req, RequestTimeTooSkewed, acceptsContentType, req.URL.Path)
case donut.SignatureDoesNotMatch:
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path)
case donut.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, acceptsContentType, req.URL.Path)
@ -622,13 +623,13 @@ func (api MinioAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *h
/// Delete API
// DeleteBucketHandler - Delete bucket
func (api MinioAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
func (api API) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
error := getErrorCode(MethodNotAllowed)
w.WriteHeader(error.HTTPStatusCode)
}
// DeleteObjectHandler - Delete object
func (api MinioAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
func (api API) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
error := getErrorCode(MethodNotAllowed)
w.WriteHeader(error.HTTPStatusCode)
}

View file

@ -17,13 +17,19 @@
package main
import (
"bytes"
"encoding/base64"
"errors"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"strings"
"time"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
@ -95,7 +101,7 @@ func stripAccessKeyID(authHeaderValue string) (string, *probe.Error) {
}
// initSignatureV4 initializing signature verification
func initSignatureV4(req *http.Request) (*donut.Signature, *probe.Error) {
func initSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) {
// strip auth from authorization header
authHeaderValue := req.Header.Get("Authorization")
accessKeyID, err := stripAccessKeyID(authHeaderValue)
@ -111,7 +117,7 @@ func initSignatureV4(req *http.Request) (*donut.Signature, *probe.Error) {
signature := strings.Split(strings.TrimSpace(authFields[2]), "=")[1]
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &donut.Signature{
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: signature,
@ -124,8 +130,112 @@ func initSignatureV4(req *http.Request) (*donut.Signature, *probe.Error) {
return nil, probe.NewError(errors.New("AccessKeyID not found"))
}
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var err error
for err == nil {
var part *multipart.Part
part, err = reader.NextPart()
if part != nil {
if part.FileName() == "" {
buffer, err := ioutil.ReadAll(part)
if err != nil {
return nil, nil, probe.NewError(err)
}
formValues[part.FormName()] = string(buffer)
} else {
// FIXME: this will hog memory
_, err := io.Copy(filePart, part)
if err != nil {
return nil, nil, probe.NewError(err)
}
}
}
}
return filePart, formValues, nil
}
func applyPolicy(formValues map[string]string, policy []byte) *probe.Error {
if formValues["X-Amz-Algorithm"] != "AWS4-HMAC-SHA256" {
return probe.NewError(errUnsupportedAlgorithm)
}
postPolicyForm, perr := signv4.ParsePostPolicyForm(string(policy))
if perr != nil {
return perr.Trace()
}
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return probe.NewError(errPolicyAlreadyExpired)
}
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["key"], postPolicyForm.Conditions.Policies["$key"].Value) {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
if formValues["key"] != postPolicyForm.Conditions.Policies["$key"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
return nil
}
// initPostPresignedPolicyV4 initializing post policy signature verification
func initPostPresignedPolicyV4(formValues map[string]string) (*signv4.Signature, *probe.Error) {
/// Decoding policy
policyBytes, err := base64.StdEncoding.DecodeString(formValues["Policy"])
if err != nil {
return nil, probe.NewError(err)
}
credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
accessKeyID := credentialElements[0]
if !auth.IsValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
authConfig, perr := auth.LoadConfig()
if perr != nil {
return nil, perr.Trace()
}
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: formValues["X-Amz-Signature"],
PresignedPolicy: policyBytes,
}
return signature, nil
}
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}
// initPresignedSignatureV4 initializing presigned signature verification
func initPresignedSignatureV4(req *http.Request) (*donut.Signature, *probe.Error) {
func initPresignedSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) {
credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
@ -142,7 +252,7 @@ func initPresignedSignatureV4(req *http.Request) (*donut.Signature, *probe.Error
signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature"))
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &donut.Signature{
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: signature,

View file

@ -19,42 +19,52 @@ package main
import "errors"
// errMissingAuthHeader means that Authorization header
// has missing value or it is empty
// has missing value or it is empty.
var errMissingAuthHeaderValue = errors.New("Missing auth header value")
// errInvalidAuthHeaderValue means that Authorization
// header is available but is malformed and not in
// accordance with signature v4
// accordance with signature v4.
var errInvalidAuthHeaderValue = errors.New("Invalid auth header value")
// errInvalidAuthHeaderPrefix means that Authorization header
// has a wrong prefix only supported value should be "AWS4-HMAC-SHA256"
// has a wrong prefix only supported value should be "AWS4-HMAC-SHA256".
var errInvalidAuthHeaderPrefix = errors.New("Invalid auth header prefix")
// errMissingFieldsAuthHeader means that Authorization
// header is available but has some missing fields
// header is available but has some missing fields.
var errMissingFieldsAuthHeader = errors.New("Missing fields in auth header")
// errMissingFieldsCredentialTag means that Authorization
// header credentials tag has some missing fields
// header credentials tag has some missing fields.
var errMissingFieldsCredentialTag = errors.New("Missing fields in crendential tag")
// errMissingFieldsSignedHeadersTag means that Authorization
// header signed headers tag has some missing fields
// header signed headers tag has some missing fields.
var errMissingFieldsSignedHeadersTag = errors.New("Missing fields in signed headers tag")
// errMissingFieldsSignatureTag means that Authorization
// header signature tag has missing fields
// header signature tag has missing fields.
var errMissingFieldsSignatureTag = errors.New("Missing fields in signature tag")
// errCredentialTagMalformed means that Authorization header
// credential tag is malformed
// credential tag is malformed.
var errCredentialTagMalformed = errors.New("Invalid credential tag malformed")
// errInvalidRegion means that the region element from credential tag
// in Authorization header is invalid
// in Authorization header is invalid.
var errInvalidRegion = errors.New("Invalid region")
// errAccessKeyIDInvalid means that the accessKeyID element from
// credential tag in Authorization header is invalid
// credential tag in Authorization header is invalid.
var errAccessKeyIDInvalid = errors.New("AccessKeyID invalid")
// errUnsupportedAlgorithm means that the provided X-Amz-Algorithm is unsupported.
var errUnsupportedAlgorithm = errors.New("Unsupported Algorithm")
// errPolicyAlreadyExpired means that the client request carries an post policy
// header which is already expired.
var errPolicyAlreadyExpired = errors.New("Policy already expired")
// errPolicyMissingFields means that form values and policy header have some fields missing.
var errPolicyMissingFields = errors.New("Some fields are missing or do not match in policy")

View file

@ -121,9 +121,9 @@ func configureServerRPC(conf minioConfig, rpcHandler http.Handler) (*http.Server
}
// Start ticket master
func startTM(a MinioAPI) {
func startTM(api API) {
for {
for op := range a.OP {
for op := range api.OP {
op.ProceedCh <- struct{}{}
}
}

View file

@ -26,11 +26,12 @@ import (
)
// registerAPI - register all the object API handlers to their respective paths
func registerAPI(mux *router.Router, a MinioAPI) {
func registerAPI(mux *router.Router, a API) {
mux.HandleFunc("/", a.ListBucketsHandler).Methods("GET")
mux.HandleFunc("/{bucket}", a.ListObjectsHandler).Methods("GET")
mux.HandleFunc("/{bucket}", a.PutBucketHandler).Methods("PUT")
mux.HandleFunc("/{bucket}", a.HeadBucketHandler).Methods("HEAD")
mux.HandleFunc("/{bucket}", a.PostPolicyBucketHandler).Methods("POST")
mux.HandleFunc("/{bucket}/{object:.*}", a.HeadObjectHandler).Methods("HEAD")
mux.HandleFunc("/{bucket}/{object:.*}", a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").Methods("PUT")
mux.HandleFunc("/{bucket}/{object:.*}", a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}").Methods("GET")
@ -61,26 +62,26 @@ type APIOperation struct {
ProceedCh chan struct{}
}
// MinioAPI container for API and also carries OP (operation) channel
type MinioAPI struct {
// API container for API and also carries OP (operation) channel
type API struct {
OP chan APIOperation
Donut donut.Interface
}
// getNewAPI instantiate a new minio API
func getNewAPI() MinioAPI {
func getNewAPI() API {
// ignore errors for now
d, err := donut.New()
fatalIf(err.Trace(), "Instantiating donut failed.", nil)
return MinioAPI{
return API{
OP: make(chan APIOperation),
Donut: d,
}
}
// getAPIHandler api handler
func getAPIHandler(minioAPI MinioAPI) http.Handler {
func getAPIHandler(api API) http.Handler {
var mwHandlers = []MiddlewareHandler{
ValidContentTypeHandler,
TimeValidityHandler,
@ -90,7 +91,7 @@ func getAPIHandler(minioAPI MinioAPI) http.Handler {
CorsHandler,
}
mux := router.NewRouter()
registerAPI(mux, minioAPI)
registerAPI(mux, api)
apiHandler := registerCustomMiddleware(mux, mwHandlers...)
return apiHandler
}