accessPolicy: Implement Put, Get, Delete access policy.

This patch implements Get,Put,Delete bucket policies

Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html

Currently supports following actions.

   "*":                             true,
   "s3:*":                          true,
   "s3:GetObject":                  true,
   "s3:ListBucket":                 true,
   "s3:PutObject":                  true,
   "s3:CreateBucket":               true,
   "s3:GetBucketLocation":          true,
   "s3:DeleteBucket":               true,
   "s3:DeleteObject":               true,
   "s3:AbortMultipartUpload":       true,
   "s3:ListBucketMultipartUploads": true,
   "s3:ListMultipartUploadParts":   true,

following conditions for "StringEquals" and "StringNotEquals"

   "s3:prefix", "s3:max-keys"
This commit is contained in:
Harshavardhana 2016-02-03 16:46:56 -08:00
parent 846410c563
commit d5057b3c51
24 changed files with 1107 additions and 755 deletions

View file

@ -42,7 +42,8 @@ type APIErrorResponse struct {
// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
const (
AccessDenied = iota
None = iota
AccessDenied
BadDigest
BucketAlreadyExists
EntityTooSmall
@ -60,11 +61,13 @@ const (
InvalidRequestBody
InvalidCopySource
InvalidCopyDest
InvalidPolicyDocument
MalformedXML
MissingContentLength
MissingContentMD5
MissingRequestBodyError
NoSuchBucket
NoSuchBucketPolicy
NoSuchKey
NoSuchUpload
NotImplemented
@ -80,6 +83,7 @@ const (
RootPathFull
ObjectExistsAsPrefix
AllAccessDisabled
MalformedPolicy
)
// APIError code to Error structure map
@ -119,6 +123,11 @@ var errorCodeResponse = map[int]APIError{
Description: "Argument partNumberMarker must be an integer.",
HTTPStatusCode: http.StatusBadRequest,
},
InvalidPolicyDocument: {
Code: "InvalidPolicyDocument",
Description: "The content of the form does not meet the conditions specified in the policy document.",
HTTPStatusCode: http.StatusBadRequest,
},
AccessDenied: {
Code: "AccessDenied",
Description: "Access Denied.",
@ -199,6 +208,11 @@ var errorCodeResponse = map[int]APIError{
Description: "The specified bucket does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
NoSuchBucketPolicy: {
Code: "NoSuchBucketPolicy",
Description: "The specified bucket does not have a bucket policy.",
HTTPStatusCode: http.StatusNotFound,
},
NoSuchKey: {
Code: "NoSuchKey",
Description: "The specified key does not exist.",
@ -274,6 +288,11 @@ var errorCodeResponse = map[int]APIError{
Description: "All access to this bucket has been disabled.",
HTTPStatusCode: http.StatusForbidden,
},
MalformedPolicy: {
Code: "MalformedPolicy",
Description: "Policy has invalid resource",
HTTPStatusCode: http.StatusBadRequest,
},
}
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown

View file

@ -268,43 +268,6 @@ func generateListBucketsResponse(buckets []fs.BucketMetadata) ListBucketsRespons
return data
}
// generates an AccessControlPolicy response for the said ACL.
func generateAccessControlPolicyResponse(acl fs.BucketACL) AccessControlPolicyResponse {
accessCtrlPolicyResponse := AccessControlPolicyResponse{}
accessCtrlPolicyResponse.Owner = Owner{
ID: "minio",
DisplayName: "minio",
}
defaultGrant := Grant{}
defaultGrant.Grantee.ID = "minio"
defaultGrant.Grantee.DisplayName = "minio"
defaultGrant.Permission = "FULL_CONTROL"
accessCtrlPolicyResponse.AccessControlList.Grants = append(accessCtrlPolicyResponse.AccessControlList.Grants, defaultGrant)
switch {
case acl.IsPublicRead():
publicReadGrant := Grant{}
publicReadGrant.Grantee.ID = "minio"
publicReadGrant.Grantee.DisplayName = "minio"
publicReadGrant.Grantee.URI = "http://acs.amazonaws.com/groups/global/AllUsers"
publicReadGrant.Permission = "READ"
accessCtrlPolicyResponse.AccessControlList.Grants = append(accessCtrlPolicyResponse.AccessControlList.Grants, publicReadGrant)
case acl.IsPublicReadWrite():
publicReadGrant := Grant{}
publicReadGrant.Grantee.ID = "minio"
publicReadGrant.Grantee.DisplayName = "minio"
publicReadGrant.Grantee.URI = "http://acs.amazonaws.com/groups/global/AllUsers"
publicReadGrant.Permission = "READ"
publicReadWriteGrant := Grant{}
publicReadWriteGrant.Grantee.ID = "minio"
publicReadWriteGrant.Grantee.DisplayName = "minio"
publicReadWriteGrant.Grantee.URI = "http://acs.amazonaws.com/groups/global/AllUsers"
publicReadWriteGrant.Permission = "WRITE"
accessCtrlPolicyResponse.AccessControlList.Grants = append(accessCtrlPolicyResponse.AccessControlList.Grants, publicReadGrant)
accessCtrlPolicyResponse.AccessControlList.Grants = append(accessCtrlPolicyResponse.AccessControlList.Grants, publicReadWriteGrant)
}
return accessCtrlPolicyResponse
}
// generates an ListObjects response for the said bucket with other enumerated options.
func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKeys int, resp fs.ListObjectsResult) ListObjectsResponse {
var contents []Object

View file

@ -61,41 +61,77 @@ func isRequestPresignedSignatureV4(r *http.Request) bool {
// Verify if request has AWS Post policy Signature Version '4'.
func isRequestPostPolicySignatureV4(r *http.Request) bool {
if _, ok := r.Header["Content-Type"]; ok {
if strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") {
if strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && r.Method == "POST" {
return true
}
}
return false
}
// Verify if request requires ACL check.
func isRequestRequiresACLCheck(r *http.Request) bool {
if isRequestSignatureV4(r) || isRequestPresignedSignatureV4(r) || isRequestPostPolicySignatureV4(r) {
// Verify if incoming request is anonymous.
func isRequestAnonymous(r *http.Request) bool {
if isRequestJWT(r) || isRequestSignatureV4(r) || isRequestPresignedSignatureV4(r) || isRequestPostPolicySignatureV4(r) {
return false
}
return true
}
// Authorization type.
type authType int
// List of all supported auth types.
const (
authTypeUnknown authType = iota
authTypeAnonymous
authTypePresigned
authTypePostPolicy
authTypeSigned
authTypeJWT
)
// Get request authentication type.
func getRequestAuthType(r *http.Request) authType {
if _, ok := r.Header["Authorization"]; !ok {
return authTypeAnonymous
}
if isRequestSignatureV4(r) {
return authTypeSigned
} else if isRequestPresignedSignatureV4(r) {
return authTypePresigned
} else if isRequestJWT(r) {
return authTypeJWT
} else if isRequestPostPolicySignatureV4(r) {
return authTypePostPolicy
}
return authTypeUnknown
}
// Verify if request has valid AWS Signature Version '4'.
func isSignV4ReqAuthenticated(sign *signature4.Sign, r *http.Request) bool {
func isSignV4ReqAuthenticated(sign *signature4.Sign, r *http.Request) (match bool, s3Error int) {
auth := sign.SetHTTPRequestToVerify(r)
if isRequestSignatureV4(r) {
dummyPayload := sha256.Sum256([]byte(""))
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(dummyPayload[:]))
if err != nil {
errorIf(err.Trace(), "Signature verification failed.", nil)
return false
return false, InternalError
}
return ok
if !ok {
return false, SignatureDoesNotMatch
}
return ok, None
} else if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(), "Presigned signature verification failed.", nil)
return false
return false, InternalError
}
return ok
if !ok {
return false, SignatureDoesNotMatch
}
return ok, None
}
return false
return false, AccessDenied
}
// authHandler - handles all the incoming authorization headers and
@ -111,41 +147,21 @@ func setAuthHandler(h http.Handler) http.Handler {
// handler for validating incoming authorization headers.
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Verify if request is presigned, validate signature inside each handlers.
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
case authTypeAnonymous, authTypePresigned, authTypeSigned, authTypePostPolicy:
// Let top level caller validate for anonymous and known
// signed requests.
a.handler.ServeHTTP(w, r)
return
}
// Verify if request has post policy signature, validate signature
// inside POST policy handler.
if isRequestPostPolicySignatureV4(r) && r.Method == "POST" {
a.handler.ServeHTTP(w, r)
return
}
// No authorization found, let the top level caller validate if
// public request is allowed.
if _, ok := r.Header["Authorization"]; !ok {
a.handler.ServeHTTP(w, r)
return
}
// Verify if the signature algorithms are known.
if !isRequestSignatureV4(r) && !isRequestJWT(r) {
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
return
}
// Verify JWT authorization header is present.
if isRequestJWT(r) {
// Validate Authorization header if its valid.
case authTypeJWT:
// Validate Authorization header if its valid for JWT request.
if !isJWTReqAuthenticated(r) {
w.WriteHeader(http.StatusUnauthorized)
return
}
a.handler.ServeHTTP(w, r)
default:
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
return
}
// For all other signed requests, let top level caller verify.
a.handler.ServeHTTP(w, r)
}

View file

@ -1,71 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "net/http"
// Please read for more information - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
//
// Here We are only supporting 'acl's through request headers not through their request body
// http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#setting-acls
// Minio only supports three types for now i.e 'private, public-read, public-read-write'
// ACLType - different acl types
type ACLType int
const (
unsupportedACLType ACLType = iota
privateACLType
publicReadACLType
publicReadWriteACLType
)
// Get acl type requested from 'x-amz-acl' header
func getACLType(req *http.Request) ACLType {
aclHeader := req.Header.Get("x-amz-acl")
if aclHeader != "" {
switch {
case aclHeader == "private":
return privateACLType
case aclHeader == "public-read":
return publicReadACLType
case aclHeader == "public-read-write":
return publicReadWriteACLType
default:
return unsupportedACLType
}
}
// make it default private
return privateACLType
}
// ACL type to human readable string
func getACLTypeString(acl ACLType) string {
switch acl {
case privateACLType:
return "private"
case publicReadACLType:
return "public-read"
case publicReadWriteACLType:
return "public-read-write"
case unsupportedACLType:
return ""
default:
return "private"
}
}

View file

@ -26,14 +26,56 @@ import (
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"strings"
mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/access"
"github.com/minio/minio/pkg/s3/signature4"
)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (isAllowed bool, s3Error int) {
// Read saved bucket policy.
policy, err := readBucketPolicy(bucket)
if err != nil {
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNotFound:
return false, NoSuchBucket
case fs.BucketNameInvalid:
return false, InvalidBucketName
default:
// For any other error just return AccessDenied.
return false, AccessDenied
}
}
// Parse the saved policy.
accessPolicy, e := accesspolicy.Validate(policy)
if e != nil {
errorIf(probe.NewError(e), "Parse policy failed.", nil)
return false, AccessDenied
}
// Construct resource in 'arn:aws:s3:::examplebucket' format.
resource := accesspolicy.AWSResourcePrefix + strings.TrimPrefix(reqURL.Path, "/")
// Get conditions for policy verification.
conditions := make(map[string]string)
for queryParam := range reqURL.Query() {
conditions[queryParam] = reqURL.Query().Get("queryParam")
}
// Validate action, resource and conditions with current policy statements.
if !bucketPolicyEvalStatements(action, resource, conditions, accessPolicy.Statements) {
return false, AccessDenied
}
return true, None
}
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
@ -41,14 +83,23 @@ func (api storageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Re
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:GetBucketLocation", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypeSigned, authTypePresigned:
match, s3Error := isSignV4ReqAuthenticated(api.Signature, r)
if !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
_, err := api.Filesystem.GetBucketMetadata(bucket)
@ -88,14 +139,23 @@ func (api storageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:ListBucketMultipartUploads", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned, authTypeSigned:
match, s3Error := isSignV4ReqAuthenticated(api.Signature, r)
if !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
resources := getBucketMultipartResources(r.URL.Query())
@ -137,16 +197,23 @@ func (api storageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request)
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:ListBucket", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypeSigned, authTypePresigned:
match, s3Error := isSignV4ReqAuthenticated(api.Signature, r)
if !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
// TODO handle encoding type.
@ -190,14 +257,17 @@ func (api storageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request)
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api storageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
if isRequestRequiresACLCheck(r) {
// List buckets does not support bucket policies.
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
case authTypeSigned, authTypePresigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
buckets, err := api.Filesystem.ListBuckets()
@ -220,11 +290,6 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
@ -253,8 +318,19 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
return
}
// Check if request is presigned.
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:DeleteObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned:
// Check if request is presigned.
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
@ -265,7 +341,7 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
} else if isRequestSignatureV4(r) {
case authTypeSigned:
// Check if request is signed.
sha := sha256.New()
mdSh := md5.New()
@ -356,31 +432,20 @@ func (api storageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
// read from 'x-amz-acl'
aclType := getACLType(r)
if aclType == unsupportedACLType {
writeErrorResponse(w, r, NotImplemented, r.URL.Path)
return
}
// if body of request is non-nil then check for validity of Content-Length
if r.Body != nil {
/// if Content-Length is unknown/missing, deny the request
if r.ContentLength == -1 && !contains(r.TransferEncoding, "chunked") {
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return
}
}
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:CreateBucket", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned:
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
@ -391,7 +456,7 @@ func (api storageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
} else if isRequestSignatureV4(r) {
case authTypeSigned:
// Verify signature for the incoming body if any.
locationBytes, e := ioutil.ReadAll(r.Body)
if e != nil {
@ -414,7 +479,7 @@ func (api storageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
}
// Make bucket.
err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType))
err := api.Filesystem.MakeBucket(bucket)
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
switch err.ToGoError().(type) {
@ -538,87 +603,6 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponse(w, nil)
}
// PutBucketACLHandler - PUT Bucket ACL
// ----------
// This implementation of the PUT operation modifies the bucketACL for authenticated request
func (api storageAPI) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
// read from 'x-amz-acl'
aclType := getACLType(r)
if aclType == unsupportedACLType {
writeErrorResponse(w, r, NotImplemented, r.URL.Path)
return
}
err := api.Filesystem.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
if err != nil {
errorIf(err.Trace(), "PutBucketACL failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNameInvalid:
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
default:
writeErrorResponse(w, r, InternalError, r.URL.Path)
}
return
}
writeSuccessResponse(w, nil)
}
// GetBucketACLHandler - GET ACL on a Bucket
// ----------
// This operation uses acl subresource to the return the ``acl``
// of a bucket. One must have permission to access the bucket to
// know its ``acl``. This operation willl return response of 404
// if bucket not found and 403 for invalid credentials.
func (api storageAPI) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
bucketMetadata, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNotFound:
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid:
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
default:
writeErrorResponse(w, r, InternalError, r.URL.Path)
}
return
}
// Generate response
response := generateAccessControlPolicyResponse(bucketMetadata.ACL)
encodedSuccessResponse := encodeResponse(response)
// Write headers
setCommonHeaders(w)
// Write success response.
writeSuccessResponse(w, encodedSuccessResponse)
}
// HeadBucketHandler - HEAD Bucket
// ----------
// This operation is useful to determine if a bucket exists.
@ -629,18 +613,18 @@ func (api storageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request)
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypePresigned, authTypeSigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
_, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
@ -662,14 +646,22 @@ func (api storageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:DeleteBucket", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned, authTypeSigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
err := api.Filesystem.DeleteBucket(bucket)
@ -685,5 +677,10 @@ func (api storageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request
}
return
}
// Delete bucket access policy, if present - ignore any errors.
removeBucketPolicy(bucket)
// Write success response.
writeSuccessNoContent(w)
}

283
bucket-policy-handlers.go Normal file
View file

@ -0,0 +1,283 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"regexp"
"strings"
mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/access"
)
// maximum supported access policy size.
const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
// Verify if a given action is valid for the url path based on the
// existing bucket access policy.
func bucketPolicyEvalStatements(action string, resource string, conditions map[string]string, statements []accesspolicy.Statement) bool {
for _, statement := range statements {
if bucketPolicyMatchStatement(action, resource, conditions, statement) {
if statement.Effect == "Allow" {
return true
}
// else statement.Effect == "Deny"
return false
}
}
// None match so deny.
return false
}
// Verify if action, resource and conditions match input policy statement.
func bucketPolicyMatchStatement(action string, resource string, conditions map[string]string, statement accesspolicy.Statement) bool {
// Verify if action matches.
if bucketPolicyActionMatch(action, statement) {
// Verify if resource matches.
if bucketPolicyResourceMatch(resource, statement) {
// Verify if condition matches.
if bucketPolicyConditionMatch(conditions, statement) {
return true
}
}
}
return false
}
// Verify if given action matches with policy statement.
func bucketPolicyActionMatch(action string, statement accesspolicy.Statement) bool {
for _, policyAction := range statement.Actions {
// Policy action can be a regex, validate the action with matching string.
matched, e := regexp.MatchString(policyAction, action)
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil)
if matched {
return true
}
}
return false
}
// Verify if given resource matches with policy statement.
func bucketPolicyResourceMatch(resource string, statement accesspolicy.Statement) bool {
for _, presource := range statement.Resources {
matched, e := regexp.MatchString(presource, strings.TrimPrefix(resource, "/"))
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil)
// For any path matches, we return quickly and the let the caller continue.
if matched {
return true
}
}
return false
}
// Verify if given condition matches with policy statement.
func bucketPolicyConditionMatch(conditions map[string]string, statement accesspolicy.Statement) bool {
// Supports following conditions.
// - StringEquals
// - StringNotEquals
//
// Supported applicable condition keys for each conditions.
// - s3:prefix
// - s3:max-keys
var conditionMatches = true
for condition, conditionKeys := range statement.Conditions {
if condition == "StringEquals" {
if conditionKeys["s3:prefix"] != conditions["prefix"] {
conditionMatches = false
break
}
if conditionKeys["s3:max-keys"] != conditions["max-keys"] {
conditionMatches = false
break
}
} else if condition == "StringNotEquals" {
if conditionKeys["s3:prefix"] == conditions["prefix"] {
conditionMatches = false
break
}
if conditionKeys["s3:max-keys"] == conditions["max-keys"] {
conditionMatches = false
break
}
}
}
return conditionMatches
}
// PutBucketPolicyHandler - PUT Bucket policy
// -----------------
// This implementation of the PUT operation uses the policy
// subresource to add to or replace a policy on a bucket
func (api storageAPI) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
// If Content-Length is unknown or zero, deny the
// request. PutBucketPolicy always needs a Content-Length if
// incoming request is not chunked.
if !contains(r.TransferEncoding, "chunked") {
if r.ContentLength == -1 || r.ContentLength == 0 {
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return
}
// If Content-Length is greater than maximum allowed policy size.
if r.ContentLength > maxAccessPolicySize {
writeErrorResponse(w, r, EntityTooLarge, r.URL.Path)
return
}
}
// Read access policy up to maxAccessPolicySize.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
// bucket policies are limited to 20KB in size, using a limit reader.
accessPolicyBytes, e := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if e != nil {
errorIf(probe.NewError(e).Trace(bucket), "Reading policy failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
// Parse access access.
accessPolicy, e := accesspolicy.Validate(accessPolicyBytes)
if e != nil {
writeErrorResponse(w, r, InvalidPolicyDocument, r.URL.Path)
return
}
// If the policy resource has different bucket name, reject it.
for _, statement := range accessPolicy.Statements {
for _, resource := range statement.Resources {
resourcePrefix := strings.SplitAfter(resource, accesspolicy.AWSResourcePrefix)[1]
if !strings.HasPrefix(resourcePrefix, bucket) {
writeErrorResponse(w, r, MalformedPolicy, r.URL.Path)
return
}
}
}
// Set http request for signature verification.
auth := api.Signature.SetHTTPRequestToVerify(r)
if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
} else if isRequestSignatureV4(r) {
sh := sha256.New()
sh.Write(accessPolicyBytes)
ok, err := api.Signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
errorIf(err.Trace(string(accessPolicyBytes)), "SaveBucketPolicy failed.", nil)
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
}
// Save bucket policy.
err := writeBucketPolicy(bucket, accessPolicyBytes)
if err != nil {
errorIf(err.Trace(bucket), "SaveBucketPolicy failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNameInvalid:
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
default:
writeErrorResponse(w, r, InternalError, r.URL.Path)
}
return
}
writeSuccessNoContent(w)
}
// DeleteBucketPolicyHandler - DELETE Bucket policy
// -----------------
// This implementation of the DELETE operation uses the policy
// subresource to add to remove a policy on a bucket.
func (api storageAPI) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
// Validate incoming signature.
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Delete bucket access policy.
err := removeBucketPolicy(bucket)
if err != nil {
errorIf(err.Trace(bucket), "DeleteBucketPolicy failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNameInvalid:
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketPolicyNotFound:
writeErrorResponse(w, r, NoSuchBucketPolicy, r.URL.Path)
default:
writeErrorResponse(w, r, InternalError, r.URL.Path)
}
return
}
writeSuccessNoContent(w)
}
// GetBucketPolicyHandler - GET Bucket policy
// -----------------
// This operation uses the policy
// subresource to return the policy of a specified bucket.
func (api storageAPI) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
// Validate incoming signature.
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Read bucket access policy.
p, err := readBucketPolicy(bucket)
if err != nil {
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNameInvalid:
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketPolicyNotFound:
writeErrorResponse(w, r, NoSuchBucketPolicy, r.URL.Path)
default:
writeErrorResponse(w, r, InternalError, r.URL.Path)
}
return
}
io.Copy(w, bytes.NewReader(p))
}

147
bucket-policy.go Normal file
View file

@ -0,0 +1,147 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe"
)
// getBucketsConfigPath - get buckets path.
func getBucketsConfigPath() (string, *probe.Error) {
configPath, err := getConfigPath()
if err != nil {
return "", err.Trace()
}
return filepath.Join(configPath, "buckets"), nil
}
// createBucketsConfigPath - create buckets directory.
func createBucketsConfigPath() *probe.Error {
bucketsConfigPath, err := getBucketsConfigPath()
if err != nil {
return err
}
if e := os.MkdirAll(bucketsConfigPath, 0700); e != nil {
return probe.NewError(e)
}
return nil
}
// getBucketConfigPath - get bucket path.
func getBucketConfigPath(bucket string) (string, *probe.Error) {
bucketsConfigPath, err := getBucketsConfigPath()
if err != nil {
return "", err.Trace()
}
return filepath.Join(bucketsConfigPath, bucket), nil
}
// createBucketConfigPath - create bucket directory.
func createBucketConfigPath(bucket string) *probe.Error {
bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil {
return err
}
if e := os.MkdirAll(bucketConfigPath, 0700); e != nil {
return probe.NewError(e)
}
return nil
}
// readBucketPolicy - read bucket policy.
func readBucketPolicy(bucket string) ([]byte, *probe.Error) {
// Verify bucket is valid.
if !fs.IsValidBucketName(bucket) {
return nil, probe.NewError(fs.BucketNameInvalid{Bucket: bucket})
}
bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil {
return nil, err.Trace()
}
// Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil {
if os.IsNotExist(e) {
return nil, probe.NewError(fs.BucketPolicyNotFound{Bucket: bucket})
}
return nil, probe.NewError(e)
}
accessPolicyBytes, e := ioutil.ReadFile(bucketPolicyFile)
if e != nil {
return nil, probe.NewError(e)
}
return accessPolicyBytes, nil
}
// removeBucketPolicy - remove bucket policy.
func removeBucketPolicy(bucket string) *probe.Error {
// Verify bucket is valid.
if !fs.IsValidBucketName(bucket) {
return probe.NewError(fs.BucketNameInvalid{Bucket: bucket})
}
bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil {
return err.Trace(bucket)
}
// Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil {
if os.IsNotExist(e) {
return probe.NewError(fs.BucketPolicyNotFound{Bucket: bucket})
}
return probe.NewError(e)
}
return nil
}
// writeBucketPolicy - save bucket policy.
func writeBucketPolicy(bucket string, accessPolicyBytes []byte) *probe.Error {
// Verify if bucket path legal
if !fs.IsValidBucketName(bucket) {
return probe.NewError(fs.BucketNameInvalid{Bucket: bucket})
}
bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil {
return err.Trace()
}
// Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil {
if !os.IsNotExist(e) {
return probe.NewError(e)
}
}
// Write bucket policy.
if e := ioutil.WriteFile(bucketPolicyFile, accessPolicyBytes, 0600); e != nil {
return probe.NewError(e)
}
return nil
}

View file

@ -280,7 +280,7 @@ func ignoreNotImplementedObjectResources(req *http.Request) bool {
// List of not implemented bucket queries
var notimplementedBucketResourceNames = map[string]bool{
"policy": true,
"acl": true,
"cors": true,
"lifecycle": true,
"logging": true,

View file

@ -61,16 +61,22 @@ func (api storageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:GetObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned, authTypeSigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
@ -225,15 +231,8 @@ func (api storageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request)
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
@ -286,14 +285,22 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request)
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:GetBucketLocation", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned, authTypeSigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
// TODO: Reject requests where body/payload is present, for now we
@ -413,13 +420,6 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
}
// get Content-Md5 sent by client and verify if valid
md5 := r.Header.Get("Content-Md5")
if !isValidMD5(md5) {
@ -440,11 +440,23 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
var metadata fs.ObjectMetadata
var err *probe.Error
// For presigned requests verify them right here.
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Create anonymous object.
metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil)
case authTypePresigned:
// For presigned requests verify them right here.
var ok bool
ok, err = auth.DoesPresignedSignatureMatch()
if err != nil {
@ -458,7 +470,7 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
}
// Create presigned object.
metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil)
} else {
case authTypeSigned:
// Create object.
metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, &auth)
}
@ -501,16 +513,18 @@ func (api storageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *http.R
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
default:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object)
@ -547,13 +561,6 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
}
// get Content-Md5 sent by client and verify if valid
md5 := r.Header.Get("Content-Md5")
if !isValidMD5(md5) {
@ -585,11 +592,20 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
var partMD5 string
var err *probe.Error
// For presigned requests verify right here.
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// No need to verify signature, anonymous request access is
// already allowed.
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, nil)
case authTypePresigned:
// For presigned requests verify right here.
var ok bool
ok, err = auth.DoesPresignedSignatureMatch()
if err != nil {
@ -602,7 +618,7 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
return
}
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, nil)
} else {
default:
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, &auth)
}
if err != nil {
@ -637,16 +653,18 @@ func (api storageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r *http
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:AbortMultipartUpload", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
default:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
objectResourcesMetadata := getObjectResources(r.URL.Query())
@ -678,16 +696,18 @@ func (api storageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Requ
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:ListMultipartUploadParts", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
default:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
objectResourcesMetadata := getObjectResources(r.URL.Query())
@ -724,9 +744,9 @@ func (api storageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Requ
}
response := generateListPartsResponse(objectResourcesMetadata)
encodedSuccessResponse := encodeResponse(response)
// write headers.
// Write headers.
setCommonHeaders(w)
// write success response.
// Write success response.
writeSuccessResponse(w, encodedSuccessResponse)
}
@ -736,13 +756,6 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
}
// Extract object resources.
objectResourcesMetadata := getObjectResources(r.URL.Query())
@ -751,8 +764,21 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
var metadata fs.ObjectMetadata
var err *probe.Error
// For presigned requests verify right here.
if isRequestPresignedSignatureV4(r) {
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
// Complete multipart upload anonymous.
metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil)
case authTypePresigned:
// For presigned requests verify right here.
var ok bool
ok, err = auth.DoesPresignedSignatureMatch()
if err != nil {
@ -766,7 +792,7 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
}
// Complete multipart upload presigned.
metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil)
} else {
case authTypeSigned:
// Complete multipart upload.
metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, &auth)
}
@ -817,18 +843,23 @@ func (api storageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if isAllowed, s3Error := enforceBucketPolicy("s3:DeleteObject", bucket, r.URL); !isAllowed {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypeSigned, authTypePresigned:
if match, s3Error := isSignV4ReqAuthenticated(api.Signature, r); !match {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
err := api.Filesystem.DeleteObject(bucket, object)
if err != nil {
errorIf(err.Trace(), "DeleteObject failed.", nil)

View file

@ -38,7 +38,6 @@ func APITestSuite(c *check.C, create func() Filesystem) {
testPaging(c, create)
testObjectOverwriteWorks(c, create)
testNonExistantBucketOperations(c, create)
testBucketMetadata(c, create)
testBucketRecreateFails(c, create)
testPutObjectInSubdir(c, create)
testListBuckets(c, create)
@ -53,13 +52,13 @@ func APITestSuite(c *check.C, create func() Filesystem) {
func testMakeBucket(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
}
func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
uploadID, err := fs.NewMultipartUpload("bucket", "key")
c.Assert(err, check.IsNil)
@ -94,7 +93,7 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
uploadID, err := fs.NewMultipartUpload("bucket", "key")
c.Assert(err, check.IsNil)
@ -126,7 +125,7 @@ func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
objects := make(map[string][]byte)
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10)
@ -161,7 +160,7 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
func testPaging(c *check.C, create func() Filesystem) {
fs := create()
fs.MakeBucket("bucket", "")
fs.MakeBucket("bucket")
result, err := fs.ListObjects("bucket", "", "", "", 0)
c.Assert(err, check.IsNil)
c.Assert(len(result.Objects), check.Equals, 0)
@ -262,7 +261,7 @@ func testPaging(c *check.C, create func() Filesystem) {
func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
hasher1 := md5.New()
@ -292,27 +291,17 @@ func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
c.Assert(err, check.Not(check.IsNil))
}
func testBucketMetadata(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("string", "")
c.Assert(err, check.IsNil)
metadata, err := fs.GetBucketMetadata("string")
c.Assert(err, check.IsNil)
c.Assert(metadata.ACL, check.Equals, BucketACL("private"))
}
func testBucketRecreateFails(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("string", "")
err := fs.MakeBucket("string")
c.Assert(err, check.IsNil)
err = fs.MakeBucket("string", "")
err = fs.MakeBucket("string")
c.Assert(err, check.Not(check.IsNil))
}
func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
hasher := md5.New()
@ -339,7 +328,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(len(buckets), check.Equals, 0)
// add one and test exists
err = fs.MakeBucket("bucket1", "")
err = fs.MakeBucket("bucket1")
c.Assert(err, check.IsNil)
buckets, err = fs.ListBuckets()
@ -347,7 +336,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(err, check.IsNil)
// add two and test exists
err = fs.MakeBucket("bucket2", "")
err = fs.MakeBucket("bucket2")
c.Assert(err, check.IsNil)
buckets, err = fs.ListBuckets()
@ -355,7 +344,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(err, check.IsNil)
// add three and test exists + prefix
err = fs.MakeBucket("bucket22", "")
err = fs.MakeBucket("bucket22")
buckets, err = fs.ListBuckets()
c.Assert(len(buckets), check.Equals, 3)
@ -368,9 +357,9 @@ func testListBucketsOrder(c *check.C, create func() Filesystem) {
for i := 0; i < 10; i++ {
fs := create()
// add one and test exists
err := fs.MakeBucket("bucket1", "")
err := fs.MakeBucket("bucket1")
c.Assert(err, check.IsNil)
err = fs.MakeBucket("bucket2", "")
err = fs.MakeBucket("bucket2")
c.Assert(err, check.IsNil)
buckets, err := fs.ListBuckets()
c.Assert(err, check.IsNil)
@ -390,7 +379,7 @@ func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesyst
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
@ -408,7 +397,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
_, err = fs.CreateObject("bucket", "dir1/dir2/object", "", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
@ -443,7 +432,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem)
func testDefaultContentType(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
// test empty
@ -455,7 +444,7 @@ func testDefaultContentType(c *check.C, create func() Filesystem) {
func testContentMD5Set(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
// test md5 invalid

View file

@ -37,7 +37,6 @@ func APITestSuite(c *check.C, create func() Filesystem) {
testPaging(c, create)
testObjectOverwriteWorks(c, create)
testNonExistantBucketOperations(c, create)
testBucketMetadata(c, create)
testBucketRecreateFails(c, create)
testPutObjectInSubdir(c, create)
testListBuckets(c, create)
@ -52,13 +51,13 @@ func APITestSuite(c *check.C, create func() Filesystem) {
func testMakeBucket(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
}
func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
uploadID, err := fs.NewMultipartUpload("bucket", "key")
c.Assert(err, check.IsNil)
@ -93,7 +92,7 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
uploadID, err := fs.NewMultipartUpload("bucket", "key")
c.Assert(err, check.IsNil)
@ -125,7 +124,7 @@ func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
objects := make(map[string][]byte)
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10)
@ -160,7 +159,7 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
func testPaging(c *check.C, create func() Filesystem) {
fs := create()
fs.MakeBucket("bucket", "")
fs.MakeBucket("bucket")
result, err := fs.ListObjects("bucket", "", "", "", 0)
c.Assert(err, check.IsNil)
c.Assert(len(result.Objects), check.Equals, 0)
@ -260,7 +259,7 @@ func testPaging(c *check.C, create func() Filesystem) {
func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
fs := create()
fs.MakeBucket("bucket", "")
fs.MakeBucket("bucket")
hasher1 := md5.New()
hasher1.Write([]byte("one"))
@ -289,27 +288,17 @@ func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
c.Assert(err, check.Not(check.IsNil))
}
func testBucketMetadata(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("string", "private")
c.Assert(err, check.IsNil)
metadata, err := fs.GetBucketMetadata("string")
c.Assert(err, check.IsNil)
c.Assert(metadata.ACL, check.Equals, BucketACL("private"))
}
func testBucketRecreateFails(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("string", "private")
err := fs.MakeBucket("string")
c.Assert(err, check.IsNil)
err = fs.MakeBucket("string", "private")
err = fs.MakeBucket("string")
c.Assert(err, check.Not(check.IsNil))
}
func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "private")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
hasher := md5.New()
@ -336,7 +325,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(len(buckets), check.Equals, 0)
// add one and test exists
err = fs.MakeBucket("bucket1", "")
err = fs.MakeBucket("bucket1")
c.Assert(err, check.IsNil)
buckets, err = fs.ListBuckets()
@ -344,7 +333,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(err, check.IsNil)
// add two and test exists
err = fs.MakeBucket("bucket2", "")
err = fs.MakeBucket("bucket2")
c.Assert(err, check.IsNil)
buckets, err = fs.ListBuckets()
@ -352,7 +341,7 @@ func testListBuckets(c *check.C, create func() Filesystem) {
c.Assert(err, check.IsNil)
// add three and test exists + prefix
err = fs.MakeBucket("bucket22", "")
err = fs.MakeBucket("bucket22")
buckets, err = fs.ListBuckets()
c.Assert(len(buckets), check.Equals, 3)
@ -365,9 +354,9 @@ func testListBucketsOrder(c *check.C, create func() Filesystem) {
for i := 0; i < 10; i++ {
fs := create()
// add one and test exists
err := fs.MakeBucket("bucket1", "")
err := fs.MakeBucket("bucket1")
c.Assert(err, check.IsNil)
err = fs.MakeBucket("bucket2", "")
err = fs.MakeBucket("bucket2")
c.Assert(err, check.IsNil)
buckets, err := fs.ListBuckets()
c.Assert(err, check.IsNil)
@ -387,7 +376,7 @@ func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesyst
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
@ -409,7 +398,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
_, err = fs.CreateObject("bucket", "dir1/dir2/object", "", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
@ -444,7 +433,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem)
func testDefaultContentType(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
// test empty
@ -456,7 +445,7 @@ func testDefaultContentType(c *check.C, create func() Filesystem) {
func testContentMD5Set(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket", "")
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
// test md5 invalid

View file

@ -21,12 +21,7 @@ import (
"github.com/minio/minio/pkg/quick"
)
var multipartsMetadataPath, bucketsMetadataPath string
// setFSBucketsMetadataPath - set fs buckets metadata path.
func setFSBucketsMetadataPath(metadataPath string) {
bucketsMetadataPath = metadataPath
}
var multipartsMetadataPath string
// SetFSMultipartsMetadataPath - set custom multiparts session
// metadata path.
@ -46,18 +41,6 @@ func saveMultipartsSession(multiparts Multiparts) *probe.Error {
return nil
}
// saveBucketsMetadata - save metadata of all buckets
func saveBucketsMetadata(buckets Buckets) *probe.Error {
qc, err := quick.New(buckets)
if err != nil {
return err.Trace()
}
if err := qc.Save(bucketsMetadataPath); err != nil {
return err.Trace()
}
return nil
}
// loadMultipartsSession load multipart session file
func loadMultipartsSession() (*Multiparts, *probe.Error) {
multiparts := &Multiparts{}
@ -72,18 +55,3 @@ func loadMultipartsSession() (*Multiparts, *probe.Error) {
}
return qc.Data().(*Multiparts), nil
}
// loadBucketsMetadata load buckets metadata file
func loadBucketsMetadata() (*Buckets, *probe.Error) {
buckets := &Buckets{}
buckets.Version = "1"
buckets.Metadata = make(map[string]*BucketMetadata)
qc, err := quick.New(buckets)
if err != nil {
return nil, err.Trace()
}
if err := qc.Load(bucketsMetadataPath); err != nil {
return nil, err.Trace()
}
return qc.Data().(*Buckets), nil
}

View file

@ -1,96 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fs
// IsPrivateBucket - is private bucket
func (fs Filesystem) IsPrivateBucket(bucket string) bool {
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true
}
return bucketMetadata.ACL.IsPrivate()
}
// IsPublicBucket - is public bucket
func (fs Filesystem) IsPublicBucket(bucket string) bool {
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true
}
return bucketMetadata.ACL.IsPublicReadWrite()
}
// IsReadOnlyBucket - is read only bucket
func (fs Filesystem) IsReadOnlyBucket(bucket string) bool {
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true
}
return bucketMetadata.ACL.IsPublicRead()
}
// BucketACL - bucket level access control
type BucketACL string
// different types of ACL's currently supported for buckets
const (
BucketPrivate = BucketACL("private")
BucketPublicRead = BucketACL("public-read")
BucketPublicReadWrite = BucketACL("public-read-write")
)
func (b BucketACL) String() string {
return string(b)
}
// IsPrivate - is acl Private
func (b BucketACL) IsPrivate() bool {
return b == BucketACL("private")
}
// IsPublicRead - is acl PublicRead
func (b BucketACL) IsPublicRead() bool {
return b == BucketACL("public-read")
}
// IsPublicReadWrite - is acl PublicReadWrite
func (b BucketACL) IsPublicReadWrite() bool {
return b == BucketACL("public-read-write")
}
// IsValidBucketACL - is provided acl string supported
func IsValidBucketACL(acl string) bool {
switch acl {
case "private":
fallthrough
case "public-read":
fallthrough
case "public-read-write":
return true
case "":
// by default its "private"
return true
default:
return false
}
}

View file

@ -52,15 +52,6 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
}
return probe.NewError(e)
}
// Critical region hold write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
delete(fs.buckets.Metadata, bucket)
if err := saveBucketsMetadata(*fs.buckets); err != nil {
return err.Trace(bucket)
}
return nil
}
@ -112,8 +103,8 @@ func removeDuplicateBuckets(buckets []BucketMetadata) []BucketMetadata {
return buckets
}
// MakeBucket - PUT Bucket.
func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
// MakeBucket - PUT Bucket
func (fs Filesystem) MakeBucket(bucket string) *probe.Error {
di, err := disk.GetInfo(fs.path)
if err != nil {
return probe.NewError(err)
@ -131,12 +122,6 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket acl is valid.
if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl})
}
// Get bucket path.
bucket = fs.denormalizeBucket(bucket)
bucketDir := filepath.Join(fs.path, bucket)
if _, e := os.Stat(bucketDir); e == nil {
@ -147,33 +132,6 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
if e := os.Mkdir(bucketDir, 0700); e != nil {
return probe.NewError(err)
}
fi, e := os.Stat(bucketDir)
// Check if bucket exists.
if e != nil {
if os.IsNotExist(e) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
return probe.NewError(e)
}
if acl == "" {
acl = "private"
}
// Get a new bucket name metadata.
bucketMetadata := &BucketMetadata{}
bucketMetadata.Name = fi.Name()
bucketMetadata.Created = fi.ModTime()
bucketMetadata.ACL = BucketACL(acl)
// Critical region hold a write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.buckets.Metadata[bucket] = bucketMetadata
if err := saveBucketsMetadata(*fs.buckets); err != nil {
return err.Trace(bucket)
}
return nil
}
@ -200,7 +158,6 @@ func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Er
if !IsValidBucketName(bucket) {
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
bucket = fs.denormalizeBucket(bucket)
// Get bucket path.
bucketDir := filepath.Join(fs.path, bucket)
@ -212,44 +169,8 @@ func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Er
}
return BucketMetadata{}, probe.NewError(e)
}
fs.rwLock.RLock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
fs.rwLock.RUnlock()
// If metadata value is not found, get it from disk.
if !ok {
bucketMetadata = &BucketMetadata{}
bucketMetadata.Name = fi.Name()
bucketMetadata.Created = fi.ModTime()
bucketMetadata.ACL = BucketACL("private")
}
return *bucketMetadata, nil
}
// SetBucketMetadata - set bucket metadata.
func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
bucketMetadata, err := fs.GetBucketMetadata(bucket)
if err != nil {
return err
}
// Save the acl.
acl := metadata["acl"]
if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl})
} else if acl == "" {
acl = "private"
}
bucketMetadata.ACL = BucketACL(acl)
bucket = fs.denormalizeBucket(bucket)
// Critical region handle write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.buckets.Metadata[bucket] = &bucketMetadata
if err := saveBucketsMetadata(*fs.buckets); err != nil {
return err.Trace(bucket)
}
return nil
bucketMetadata := BucketMetadata{}
bucketMetadata.Name = fi.Name()
bucketMetadata.Created = fi.ModTime()
return bucketMetadata, nil
}

View file

@ -65,7 +65,7 @@ func BenchmarkDeleteBucket(b *testing.B) {
b.StopTimer()
// Create and delete the bucket over and over.
err = filesystem.MakeBucket("bucket", "public-read-write")
err = filesystem.MakeBucket("bucket")
if err != nil {
b.Fatal(err)
}
@ -94,7 +94,7 @@ func BenchmarkGetBucketMetadata(b *testing.B) {
}
// Put up a bucket with some metadata.
err = filesystem.MakeBucket("bucket", "public-read-write")
err = filesystem.MakeBucket("bucket")
if err != nil {
b.Fatal(err)
}
@ -109,37 +109,3 @@ func BenchmarkGetBucketMetadata(b *testing.B) {
}
}
}
func BenchmarkSetBucketMetadata(b *testing.B) {
// Make a temporary directory to use as the filesystem.
directory, fserr := ioutil.TempDir("", "minio-benchmark")
if fserr != nil {
b.Fatal(fserr)
}
defer os.RemoveAll(directory)
// Create the filesystem.
filesystem, err := New(directory, 0)
if err != nil {
b.Fatal(err)
}
// Put up a bucket with some metadata.
err = filesystem.MakeBucket("bucket", "public-read-write")
if err != nil {
b.Fatal(err)
}
metadata := make(map[string]string)
metadata["acl"] = "public-read-write"
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Set all the metadata!
err = filesystem.SetBucketMetadata("bucket", metadata)
if err != nil {
b.Fatal(err)
}
}
}

View file

@ -25,7 +25,6 @@ import (
type BucketMetadata struct {
Name string
Created time.Time
ACL BucketACL
}
// ObjectMetadata - object key and its relevant metadata

View file

@ -163,6 +163,13 @@ type GenericBucketError struct {
Bucket string
}
// BucketPolicyNotFound - no bucket policy found.
type BucketPolicyNotFound GenericBucketError
func (e BucketPolicyNotFound) Error() string {
return "No bucket policy found for bucket: " + e.Bucket
}
// GenericObjectError - generic object error
type GenericObjectError struct {
Bucket string
@ -183,17 +190,6 @@ type DigestError struct {
MD5 string
}
/// ACL related errors
// InvalidACL - acl invalid
type InvalidACL struct {
ACL string
}
func (e InvalidACL) Error() string {
return "Requested ACL is " + e.ACL + " invalid"
}
/// Bucket related errors
// BucketNameInvalid - bucketname provided is invalid

View file

@ -31,17 +31,10 @@ type Filesystem struct {
minFreeDisk int64
rwLock *sync.RWMutex
multiparts *Multiparts
buckets *Buckets
listServiceReqCh chan<- listServiceReq
timeoutReqCh chan<- uint32
}
// Buckets holds acl information
type Buckets struct {
Version string `json:"version"`
Metadata map[string]*BucketMetadata
}
// MultipartSession holds active session information
type MultipartSession struct {
TotalParts int
@ -59,7 +52,6 @@ type Multiparts struct {
// New instantiate a new donut
func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) {
setFSBucketsMetadataPath(filepath.Join(rootPath, "$buckets.json"))
setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json"))
var err *probe.Error
@ -80,27 +72,12 @@ func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) {
}
}
var buckets *Buckets
buckets, err = loadBucketsMetadata()
if err != nil {
if os.IsNotExist(err.ToGoError()) {
buckets = &Buckets{
Version: "1",
Metadata: make(map[string]*BucketMetadata),
}
if err = saveBucketsMetadata(*buckets); err != nil {
return Filesystem{}, err.Trace()
}
} else {
return Filesystem{}, err.Trace()
}
}
fs := Filesystem{
rwLock: &sync.RWMutex{},
}
fs.path = rootPath
fs.multiparts = multiparts
fs.buckets = buckets
/// Defaults
// minium free disk required for i/o operations to succeed.

33
pkg/s3/access/README.md Normal file
View file

@ -0,0 +1,33 @@
## Access Policy
This package implements parsing and validating bucket access policies based on Access Policy Language specification - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
### Supports following effects.
Allow
Deny
### Supports following set of operations.
*
s3:*
s3:GetObject
s3:ListBucket
s3:PutObject
s3:CreateBucket
s3:GetBucketLocation
s3:DeleteBucket
s3:DeleteObject
s3:AbortMultipartUpload
s3:ListBucketMultipartUploads
s3:ListMultipartUploadParts
### Supports following conditions.
StringEquals
StringNotEquals
Supported applicable condition keys for each conditions.
s3:prefix
s3:max-keys

230
pkg/s3/access/policy.go Normal file
View file

@ -0,0 +1,230 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package accesspolicy implements AWS Access Policy Language parser in
// accordance with http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
package accesspolicy
import (
"encoding/json"
"errors"
"fmt"
"strings"
)
const (
// AWSResourcePrefix - bucket policy resource prefix.
AWSResourcePrefix = "arn:aws:s3:::"
)
// supportedActionMap - lists all the actions supported by minio.
var supportedActionMap = map[string]struct{}{
"*": {},
"s3:*": {},
"s3:GetObject": {},
"s3:ListBucket": {},
"s3:PutObject": {},
"s3:CreateBucket": {},
"s3:GetBucketLocation": {},
"s3:DeleteBucket": {},
"s3:DeleteObject": {},
"s3:AbortMultipartUpload": {},
"s3:ListBucketMultipartUploads": {},
"s3:ListMultipartUploadParts": {},
}
// User - canonical users list.
type User struct {
AWS []string
}
// Statement - minio policy statement
type Statement struct {
Sid string
Effect string
Principal User
Actions []string `json:"Action"`
Resources []string `json:"Resource"`
Conditions map[string]map[string]string `json:"Condition"`
}
// BucketPolicy - minio policy collection
type BucketPolicy struct {
Version string // date in 0000-00-00 format
Statements []Statement `json:"Statement"`
}
// supportedEffectMap - supported effects.
var supportedEffectMap = map[string]struct{}{
"Allow": {},
"Deny": {},
}
// isValidActions - are actions valid.
func isValidActions(actions []string) (err error) {
// Statement actions cannot be empty.
if len(actions) == 0 {
err = errors.New("Action list cannot be empty.")
return err
}
for _, action := range actions {
if _, ok := supportedActionMap[action]; !ok {
err = errors.New("Unsupported action found: " + action + ", please validate your policy document.")
return err
}
}
return nil
}
// isValidEffect - is effect valid.
func isValidEffect(effect string) error {
// Statement effect cannot be empty.
if len(effect) == 0 {
err := errors.New("Policy effect cannot be empty.")
return err
}
_, ok := supportedEffectMap[effect]
if !ok {
err := errors.New("Unsupported Effect found: " + effect + ", please validate your policy document.")
return err
}
return nil
}
// isValidResources - are valid resources.
func isValidResources(resources []string) (err error) {
// Statement resources cannot be empty.
if len(resources) == 0 {
err = errors.New("Resource list cannot be empty.")
return err
}
for _, resource := range resources {
if !strings.HasPrefix(resource, AWSResourcePrefix) {
err = errors.New("Unsupported resource style found: " + resource + ", please validate your policy document.")
return err
}
resourceSuffix := strings.SplitAfter(resource, AWSResourcePrefix)[1]
if len(resourceSuffix) == 0 || strings.HasPrefix(resourceSuffix, "/") {
err = errors.New("Invalid resource style found: " + resource + ", please validate your policy document.")
return err
}
}
return nil
}
// isValidPrincipals - are valid principals.
func isValidPrincipals(principals []string) (err error) {
// Statement principal should have a value.
if len(principals) == 0 {
err = errors.New("Principal cannot be empty.")
return err
}
var ok bool
for _, principal := range principals {
// Minio does not support or implement IAM, "*" is the only valid value.
if principal == "*" {
ok = true
continue
}
ok = false
}
if !ok {
err = errors.New("Unsupported principal style found: " + strings.Join(principals, " ") + ", please validate your policy document.")
return err
}
return nil
}
func isValidConditions(conditions map[string]map[string]string) (err error) {
// Verify conditions should be valid.
if len(conditions) > 0 {
// Validate if stringEquals, stringNotEquals are present
// if not throw an error.
_, stringEqualsOK := conditions["StringEquals"]
_, stringNotEqualsOK := conditions["StringNotEquals"]
if !stringEqualsOK && !stringNotEqualsOK {
err = fmt.Errorf("Unsupported condition type found: %s, please validate your policy document.", conditions)
return err
}
// Validate s3:prefix, s3:max-keys are present if not
// throw an error.
if len(conditions["StringEquals"]) > 0 {
_, s3PrefixOK := conditions["StringEquals"]["s3:prefix"]
_, s3MaxKeysOK := conditions["StringEquals"]["s3:max-keys"]
if !s3PrefixOK && !s3MaxKeysOK {
err = fmt.Errorf("Unsupported condition keys found: %s, please validate your policy document.",
conditions["StringEquals"])
return err
}
}
if len(conditions["StringNotEquals"]) > 0 {
_, s3PrefixOK := conditions["StringNotEquals"]["s3:prefix"]
_, s3MaxKeysOK := conditions["StringNotEquals"]["s3:max-keys"]
if !s3PrefixOK && !s3MaxKeysOK {
err = fmt.Errorf("Unsupported condition keys found: %s, please validate your policy document.",
conditions["StringNotEquals"])
return err
}
}
}
return nil
}
// Validate - validate if request body is of proper JSON and in
// accordance with policy standards.
func Validate(bucketPolicyBuf []byte) (policy BucketPolicy, err error) {
if err = json.Unmarshal(bucketPolicyBuf, &policy); err != nil {
return BucketPolicy{}, err
}
// Policy version cannot be empty.
if len(policy.Version) == 0 {
err = errors.New("Policy version cannot be empty.")
return BucketPolicy{}, err
}
// Policy statements cannot be empty.
if len(policy.Statements) == 0 {
err = errors.New("Policy statement cannot be empty.")
return BucketPolicy{}, err
}
// Loop through all policy statements and validate entries.
for _, statement := range policy.Statements {
// Statement effect should be valid.
if err := isValidEffect(statement.Effect); err != nil {
return BucketPolicy{}, err
}
// Statement principal should be supported format.
if err := isValidPrincipals(statement.Principal.AWS); err != nil {
return BucketPolicy{}, err
}
// Statement actions should be valid.
if err := isValidActions(statement.Actions); err != nil {
return BucketPolicy{}, err
}
// Statment resources should be valid.
if err := isValidResources(statement.Resources); err != nil {
return BucketPolicy{}, err
}
// Statement conditions should be valid.
if err := isValidConditions(statement.Conditions); err != nil {
return BucketPolicy{}, err
}
}
// Return successfully parsed policy structure.
return policy, nil
}

View file

@ -137,14 +137,14 @@ func registerAPIHandlers(mux *router.Router, a storageAPI, w *webAPI) {
// GetBucketLocation
bucket.Methods("GET").HandlerFunc(a.GetBucketLocationHandler).Queries("location", "")
// GetBucketACL
bucket.Methods("GET").HandlerFunc(a.GetBucketACLHandler).Queries("acl", "")
// GetBucketPolicy
bucket.Methods("GET").HandlerFunc(a.GetBucketPolicyHandler).Queries("policy", "")
// ListMultipartUploads
bucket.Methods("GET").HandlerFunc(a.ListMultipartUploadsHandler).Queries("uploads", "")
// ListObjects
bucket.Methods("GET").HandlerFunc(a.ListObjectsHandler)
// PutBucketACL
bucket.Methods("PUT").HandlerFunc(a.PutBucketACLHandler).Queries("acl", "")
// PutBucketPolicy
bucket.Methods("PUT").HandlerFunc(a.PutBucketPolicyHandler).Queries("policy", "")
// PutBucket
bucket.Methods("PUT").HandlerFunc(a.PutBucketHandler)
// HeadBucket
@ -152,7 +152,9 @@ func registerAPIHandlers(mux *router.Router, a storageAPI, w *webAPI) {
// DeleteMultipleObjects
bucket.Methods("POST").HandlerFunc(a.DeleteMultipleObjectsHandler)
// PostPolicy
bucket.Methods("POST").HandlerFunc(a.PostPolicyBucketHandler)
bucket.Methods("POST").Headers("Content-Type", "multipart/form-data").HandlerFunc(a.PostPolicyBucketHandler)
// DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(a.DeleteBucketPolicyHandler).Queries("policy", "")
// DeleteBucket
bucket.Methods("DELETE").HandlerFunc(a.DeleteBucketHandler)

View file

@ -154,8 +154,8 @@ func createConfigPath() *probe.Error {
if err != nil {
return err.Trace()
}
if err := os.MkdirAll(configPath, 0700); err != nil {
return probe.NewError(err)
if e := os.MkdirAll(configPath, 0700); e != nil {
return probe.NewError(e)
}
return nil
}

View file

@ -187,6 +187,9 @@ func getConfig() (*configV2, *probe.Error) {
if err := createConfigPath(); err != nil {
return nil, err.Trace()
}
if err := createBucketsConfigPath(); err != nil {
return nil, err.Trace()
}
config, err := loadConfigV2()
if err != nil {
if os.IsNotExist(err.ToGoError()) {

View file

@ -524,7 +524,6 @@ func (s *MyAPIFSCacheSuite) TestHeader(c *C) {
func (s *MyAPIFSCacheSuite) TestPutBucket(c *C) {
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-bucket", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client := http.Client{}
response, err := client.Do(request)
@ -533,7 +532,6 @@ func (s *MyAPIFSCacheSuite) TestPutBucket(c *C) {
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-bucket-slash/", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client = http.Client{}
response, err = client.Do(request)
@ -582,7 +580,6 @@ func (s *MyAPIFSCacheSuite) TestCopyObject(c *C) {
func (s *MyAPIFSCacheSuite) TestPutObject(c *C) {
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/put-object", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client := http.Client{}
response, err := client.Do(request)
@ -627,7 +624,6 @@ func (s *MyAPIFSCacheSuite) TestNotBeAbleToCreateObjectInNonexistantBucket(c *C)
func (s *MyAPIFSCacheSuite) TestHeadOnObject(c *C) {
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/headonobject", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client := http.Client{}
response, err := client.Do(request)
@ -671,7 +667,6 @@ func (s *MyAPIFSCacheSuite) TestHeadOnObject(c *C) {
func (s *MyAPIFSCacheSuite) TestHeadOnBucket(c *C) {
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/headonbucket", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client := http.Client{}
response, err := client.Do(request)
@ -837,7 +832,6 @@ func (s *MyAPIFSCacheSuite) TestListObjectsHandlerErrors(c *C) {
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/objecthandlererrors", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client = http.Client{}
response, err = client.Do(request)
@ -855,7 +849,6 @@ func (s *MyAPIFSCacheSuite) TestListObjectsHandlerErrors(c *C) {
func (s *MyAPIFSCacheSuite) TestPutBucketErrors(c *C) {
request, err := s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket-.", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client := http.Client{}
response, err := client.Do(request)
@ -864,7 +857,6 @@ func (s *MyAPIFSCacheSuite) TestPutBucketErrors(c *C) {
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
client = http.Client{}
response, err = client.Do(request)
@ -873,7 +865,6 @@ func (s *MyAPIFSCacheSuite) TestPutBucketErrors(c *C) {
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "private")
response, err = client.Do(request)
c.Assert(err, IsNil)
@ -881,7 +872,6 @@ func (s *MyAPIFSCacheSuite) TestPutBucketErrors(c *C) {
request, err = s.newRequest("PUT", testAPIFSCacheServer.URL+"/putbucket?acl", 0, nil)
c.Assert(err, IsNil)
request.Header.Add("x-amz-acl", "unknown")
response, err = client.Do(request)
c.Assert(err, IsNil)