From eb2894233c79e1a412c21995ddbf140e305aecba Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 5 Dec 2017 17:58:09 -0800 Subject: [PATCH] Convert gateways into respective packages (#5200) - Make azure gateway a package - Make b2 gateway a package - Make gcs gateway a package - Make s3 gateway a package - Make sia gateway a package --- Makefile | 6 +- appveyor.yml | 19 +- buildscripts/go-coverage.sh | 4 +- cmd/gateway-anonymous.go | 74 -- cmd/gateway-common.go | 325 +++++++++ cmd/gateway-main.go | 57 +- cmd/gateway-main_test.go | 14 +- cmd/gateway-router.go | 18 +- cmd/gateway-s3.go | 655 ------------------ cmd/gateway-unsupported.go | 69 +- .../azure}/gateway-azure-anonymous.go | 40 +- cmd/{ => gateway/azure}/gateway-azure.go | 212 +++--- cmd/{ => gateway/azure}/gateway-azure_test.go | 34 +- cmd/{ => gateway/b2}/gateway-b2-anonymous.go | 14 +- cmd/{ => gateway/b2}/gateway-b2.go | 184 +++-- cmd/{ => gateway/b2}/gateway-b2_test.go | 30 +- cmd/gateway/gateway.go | 26 + .../gcs}/gateway-gcs-anonymous.go | 30 +- cmd/{ => gateway/gcs}/gateway-gcs.go | 367 +++++----- cmd/{ => gateway/gcs}/gateway-gcs_test.go | 53 +- cmd/{ => gateway/s3}/gateway-s3-anonymous.go | 54 +- cmd/gateway/s3/gateway-s3.go | 416 +++++++++++ cmd/{ => gateway/s3}/gateway-s3_test.go | 63 +- cmd/{ => gateway/sia}/gateway-sia.go | 270 ++++---- cmd/{ => gateway/sia}/gateway-sia_test.go | 2 +- cmd/globals.go | 3 - cmd/storage-errors.go | 4 - cmd/test-utils_test.go | 9 - cmd/utils.go | 32 +- cmd/utils_test.go | 4 +- main.go | 3 + 31 files changed, 1586 insertions(+), 1505 deletions(-) delete mode 100644 cmd/gateway-anonymous.go create mode 100644 cmd/gateway-common.go delete mode 100644 cmd/gateway-s3.go rename cmd/{ => gateway/azure}/gateway-azure-anonymous.go (88%) rename cmd/{ => gateway/azure}/gateway-azure.go (87%) rename cmd/{ => gateway/azure}/gateway-azure_test.go (93%) rename cmd/{ => gateway/b2}/gateway-b2-anonymous.go (94%) rename cmd/{ => gateway/b2}/gateway-b2.go (84%) rename cmd/{ => gateway/b2}/gateway-b2_test.go (91%) create mode 100644 cmd/gateway/gateway.go rename cmd/{ => gateway/gcs}/gateway-gcs-anonymous.go (78%) rename cmd/{ => gateway/gcs}/gateway-gcs.go (77%) rename cmd/{ => gateway/gcs}/gateway-gcs_test.go (90%) rename cmd/{ => gateway/s3}/gateway-s3-anonymous.go (58%) create mode 100644 cmd/gateway/s3/gateway-s3.go rename cmd/{ => gateway/s3}/gateway-s3_test.go (60%) rename cmd/{ => gateway/sia}/gateway-sia.go (75%) rename cmd/{ => gateway/sia}/gateway-sia_test.go (98%) diff --git a/Makefile b/Makefile index 6891e7c4b..b24b3447f 100644 --- a/Makefile +++ b/Makefile @@ -58,15 +58,13 @@ spelling: check: test test: verifiers build @echo "Running unit tests" - @go test $(GOFLAGS) . - @go test $(GOFLAGS) github.com/minio/minio/cmd... - @go test $(GOFLAGS) github.com/minio/minio/pkg... + @go test $(GOFLAGS) ./... @echo "Verifying build" @(env bash $(PWD)/buildscripts/verify-build.sh) coverage: build @echo "Running all coverage for minio" - @./buildscripts/go-coverage.sh + @(env bash $(PWD)/buildscripts/go-coverage.sh) # Builds minio locally. build: diff --git a/appveyor.yml b/appveyor.yml index e88d6ed1c..7e8b0fb2a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -38,21 +38,18 @@ test_script: # Unit tests - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - mkdir build\coverage - - go test -v -timeout 17m -race github.com/minio/minio/cmd... - - go test -v -race github.com/minio/minio/pkg... - # FIXME(aead): enable codecov after issue https://github.com/golang/go/issues/18468 is solved. - # - go test -v -timeout 17m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd + - for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G ) + - go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed after_test: - # FIXME(aead): enable codecov after issue https://github.com/golang/go/issues/18468 is solved. - # - go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html - # - ps: Push-AppveyorArtifact build\coverage\coverage.txt - # - ps: Push-AppveyorArtifact build\coverage\coverage.html + - go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html + - ps: Push-AppveyorArtifact build\coverage\coverage.txt + - ps: Push-AppveyorArtifact build\coverage\coverage.html # Upload coverage report. - # - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%" - # - pip install codecov - # - codecov -X gcov -f "build\coverage\coverage.txt" + - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%" + - pip install codecov + - codecov -X gcov -f "build\coverage\coverage.txt" # to disable deployment deploy: off diff --git a/buildscripts/go-coverage.sh b/buildscripts/go-coverage.sh index 494b176ba..be7e3e345 100755 --- a/buildscripts/go-coverage.sh +++ b/buildscripts/go-coverage.sh @@ -3,8 +3,8 @@ set -e echo "" > coverage.txt -for d in $(go list ./... | grep -v vendor); do - go test -coverprofile=profile.out -covermode=atomic $d +for d in $(go list ./... | grep -v browser); do + go test -coverprofile=profile.out -covermode=atomic "$d" if [ -f profile.out ]; then cat profile.out >> coverage.txt rm profile.out diff --git a/cmd/gateway-anonymous.go b/cmd/gateway-anonymous.go deleted file mode 100644 index 2c1a975b2..000000000 --- a/cmd/gateway-anonymous.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -func anonErrToObjectErr(statusCode int, params ...string) error { - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - switch statusCode { - case http.StatusNotFound: - if object != "" { - return ObjectNotFound{bucket, object} - } - return BucketNotFound{Bucket: bucket} - case http.StatusBadRequest: - if object != "" { - return ObjectNameInvalid{bucket, object} - } - return BucketNameInvalid{Bucket: bucket} - case http.StatusForbidden: - fallthrough - case http.StatusUnauthorized: - return AllAccessDisabled{bucket, object} - } - - return errUnexpected -} - -// newCustomHTTPTransport returns a new http configuration -// used while communicating with the cloud backends. -// This sets the value for MaxIdleConns from 2 (go default) to -// 100. -func newCustomHTTPTransport() http.RoundTripper { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{RootCAs: globalRootCAs}, - DisableCompression: true, - } -} diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go new file mode 100644 index 000000000..16cd0460e --- /dev/null +++ b/cmd/gateway-common.go @@ -0,0 +1,325 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/http" + + "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio-go" +) + +var ( + // CanonicalizeETag provides canonicalizeETag function alias. + CanonicalizeETag = canonicalizeETag + + // MustGetUUID function alias. + MustGetUUID = mustGetUUID + + // ErrorIf provides errorIf function alias. + ErrorIf = errorIf + + // FatalIf provides fatalIf function alias. + FatalIf = fatalIf +) + +// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors. +func AnonErrToObjectErr(statusCode int, params ...string) error { + bucket := "" + object := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + + switch statusCode { + case http.StatusNotFound: + if object != "" { + return ObjectNotFound{bucket, object} + } + return BucketNotFound{Bucket: bucket} + case http.StatusBadRequest: + if object != "" { + return ObjectNameInvalid{bucket, object} + } + return BucketNameInvalid{Bucket: bucket} + case http.StatusForbidden: + fallthrough + case http.StatusUnauthorized: + return AllAccessDisabled{bucket, object} + } + + return errUnexpected +} + +// FromMinioClientMetadata converts minio metadata to map[string]string +func FromMinioClientMetadata(metadata map[string][]string) map[string]string { + mm := map[string]string{} + for k, v := range metadata { + mm[http.CanonicalHeaderKey(k)] = v[0] + } + return mm +} + +// FromMinioClientObjectPart converts minio ObjectPart to PartInfo +func FromMinioClientObjectPart(op minio.ObjectPart) PartInfo { + return PartInfo{ + Size: op.Size, + ETag: canonicalizeETag(op.ETag), + LastModified: op.LastModified, + PartNumber: op.PartNumber, + } +} + +// FromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo +func FromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo { + // Convert minio ObjectPart to PartInfo + fromMinioClientObjectParts := func(parts []minio.ObjectPart) []PartInfo { + toParts := make([]PartInfo, len(parts)) + for i, part := range parts { + toParts[i] = FromMinioClientObjectPart(part) + } + return toParts + } + + return ListPartsInfo{ + UploadID: lopr.UploadID, + Bucket: lopr.Bucket, + Object: lopr.Key, + StorageClass: "", + PartNumberMarker: lopr.PartNumberMarker, + NextPartNumberMarker: lopr.NextPartNumberMarker, + MaxParts: lopr.MaxParts, + IsTruncated: lopr.IsTruncated, + EncodingType: lopr.EncodingType, + Parts: fromMinioClientObjectParts(lopr.ObjectParts), + } +} + +// FromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo +func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo { + uploads := make([]MultipartInfo, len(lmur.Uploads)) + + for i, um := range lmur.Uploads { + uploads[i] = MultipartInfo{ + Object: um.Key, + UploadID: um.UploadID, + Initiated: um.Initiated, + } + } + + commonPrefixes := make([]string, len(lmur.CommonPrefixes)) + for i, cp := range lmur.CommonPrefixes { + commonPrefixes[i] = cp.Prefix + } + + return ListMultipartsInfo{ + KeyMarker: lmur.KeyMarker, + UploadIDMarker: lmur.UploadIDMarker, + NextKeyMarker: lmur.NextKeyMarker, + NextUploadIDMarker: lmur.NextUploadIDMarker, + MaxUploads: int(lmur.MaxUploads), + IsTruncated: lmur.IsTruncated, + Uploads: uploads, + Prefix: lmur.Prefix, + Delimiter: lmur.Delimiter, + CommonPrefixes: commonPrefixes, + EncodingType: lmur.EncodingType, + } + +} + +// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo +func FromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo { + userDefined := FromMinioClientMetadata(oi.Metadata) + userDefined["Content-Type"] = oi.ContentType + + return ObjectInfo{ + Bucket: bucket, + Name: oi.Key, + ModTime: oi.LastModified, + Size: oi.Size, + ETag: canonicalizeETag(oi.ETag), + UserDefined: userDefined, + ContentType: oi.ContentType, + ContentEncoding: oi.Metadata.Get("Content-Encoding"), + } +} + +// FromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo +func FromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info { + objects := make([]ObjectInfo, len(result.Contents)) + + for i, oi := range result.Contents { + objects[i] = FromMinioClientObjectInfo(bucket, oi) + } + + prefixes := make([]string, len(result.CommonPrefixes)) + for i, p := range result.CommonPrefixes { + prefixes[i] = p.Prefix + } + + return ListObjectsV2Info{ + IsTruncated: result.IsTruncated, + Prefixes: prefixes, + Objects: objects, + + ContinuationToken: result.ContinuationToken, + NextContinuationToken: result.NextContinuationToken, + } +} + +// FromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo +func FromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo { + objects := make([]ObjectInfo, len(result.Contents)) + + for i, oi := range result.Contents { + objects[i] = FromMinioClientObjectInfo(bucket, oi) + } + + prefixes := make([]string, len(result.CommonPrefixes)) + for i, p := range result.CommonPrefixes { + prefixes[i] = p.Prefix + } + + return ListObjectsInfo{ + IsTruncated: result.IsTruncated, + NextMarker: result.NextMarker, + Prefixes: prefixes, + Objects: objects, + } +} + +// FromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info +func FromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info { + objects := make([]ObjectInfo, len(result.Contents)) + + for i, oi := range result.Contents { + objects[i] = FromMinioClientObjectInfo(bucket, oi) + } + + prefixes := make([]string, len(result.CommonPrefixes)) + for i, p := range result.CommonPrefixes { + prefixes[i] = p.Prefix + } + + return ListObjectsV2Info{ + IsTruncated: result.IsTruncated, + Prefixes: prefixes, + Objects: objects, + ContinuationToken: result.Marker, + NextContinuationToken: result.NextMarker, + } +} + +// ToMinioClientMetadata converts metadata to map[string][]string +func ToMinioClientMetadata(metadata map[string]string) map[string]string { + mm := make(map[string]string) + for k, v := range metadata { + mm[http.CanonicalHeaderKey(k)] = v + } + return mm +} + +// ToMinioClientCompletePart converts CompletePart to minio CompletePart +func ToMinioClientCompletePart(part CompletePart) minio.CompletePart { + return minio.CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + } +} + +// ToMinioClientCompleteParts converts []CompletePart to minio []CompletePart +func ToMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart { + mparts := make([]minio.CompletePart, len(parts)) + for i, part := range parts { + mparts[i] = ToMinioClientCompletePart(part) + } + return mparts +} + +// ErrorRespToObjectError converts Minio errors to minio object layer errors. +func ErrorRespToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + + e, ok := err.(*errors.Error) + if !ok { + // Code should be fixed if this function is called without doing traceError() + // Else handling different situations in this function makes this function complicated. + errorIf(err, "Expected type *Error") + return err + } + + err = e.Cause + + bucket := "" + object := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + + minioErr, ok := err.(minio.ErrorResponse) + if !ok { + // We don't interpret non Minio errors. As minio errors will + // have StatusCode to help to convert to object errors. + return e + } + + switch minioErr.Code { + case "BucketAlreadyOwnedByYou": + err = BucketAlreadyOwnedByYou{} + case "BucketNotEmpty": + err = BucketNotEmpty{} + case "NoSuchBucketPolicy": + err = PolicyNotFound{} + case "InvalidBucketName": + err = BucketNameInvalid{Bucket: bucket} + case "NoSuchBucket": + err = BucketNotFound{Bucket: bucket} + case "NoSuchKey": + if object != "" { + err = ObjectNotFound{Bucket: bucket, Object: object} + } else { + err = BucketNotFound{Bucket: bucket} + } + case "XMinioInvalidObjectName": + err = ObjectNameInvalid{} + case "AccessDenied": + err = PrefixAccessDenied{ + Bucket: bucket, + Object: object, + } + case "XAmzContentSHA256Mismatch": + err = hash.SHA256Mismatch{} + case "NoSuchUpload": + err = InvalidUploadID{} + case "EntityTooSmall": + err = PartTooSmall{} + } + + e.Cause = err + return e +} diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index 2c280ceae..65c7f4672 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -40,36 +40,15 @@ var ( } ) -// Gateway represents a gateway backend. -type Gateway interface { - // Name returns the unique name of the gateway. - Name() string - // NewGatewayLayer returns a new gateway layer. - NewGatewayLayer() (GatewayLayer, error) -} - // RegisterGatewayCommand registers a new command for gateway. func RegisterGatewayCommand(cmd cli.Command) error { - // We should not have multiple subcommands with same name. - for _, c := range gatewayCmd.Subcommands { - if c.Name == cmd.Name { - return fmt.Errorf("duplicate gateway: %s", cmd.Name) - } - } - + cmd.Flags = append(append(cmd.Flags, append(cmd.Flags, serverFlags...)...), globalFlags...) gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd) return nil } -// MustRegisterGatewayCommand is like RegisterGatewayCommand but panics instead of returning error. -func MustRegisterGatewayCommand(cmd cli.Command) { - if err := RegisterGatewayCommand(cmd); err != nil { - panic(err) - } -} - -// Return endpoint. -func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) { +// ParseGatewayEndpoint - Return endpoint. +func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) { schemeSpecified := len(strings.Split(arg, "://")) > 1 if !schemeSpecified { // Default connection will be "secure". @@ -91,8 +70,8 @@ func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) } } -// Validate gateway arguments. -func validateGatewayArguments(serverAddr, endpointAddr string) error { +// ValidateGatewayArguments - Validate gateway arguments. +func ValidateGatewayArguments(serverAddr, endpointAddr string) error { if err := CheckLocalServerAddr(serverAddr); err != nil { return err } @@ -121,8 +100,18 @@ func validateGatewayArguments(serverAddr, endpointAddr string) error { return nil } -// Handler for 'minio gateway '. -func startGateway(ctx *cli.Context, gw Gateway) { +// StartGateway - handler for 'minio gateway '. +func StartGateway(ctx *cli.Context, gw Gateway) { + if gw == nil { + fatalIf(errUnexpected, "Gateway implementation not initialized, exiting.") + } + + // Validate if we have access, secret set through environment. + gatewayName := gw.Name() + if ctx.Args().First() == "help" { + cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) + } + // Get quiet flag from command line argument. quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet") if quietFlag { @@ -142,7 +131,6 @@ func startGateway(ctx *cli.Context, gw Gateway) { handleCommonEnvVars() // Validate if we have access, secret set through environment. - gatewayName := gw.Name() if !globalIsEnvCreds { errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName) cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) @@ -167,11 +155,7 @@ func startGateway(ctx *cli.Context, gw Gateway) { initNSLock(false) // Enable local namespace lock. - if ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) - } - - newObject, err := gw.NewGatewayLayer() + newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential()) fatalIf(err, "Unable to initialize gateway layer") router := mux.NewRouter().SkipClean(true) @@ -230,6 +214,11 @@ func startGateway(ctx *cli.Context, gw Gateway) { // Check update mode. checkUpdate(mode) + // Print a warning message if gateway is not ready for production before the startup banner. + if !gw.Production() { + log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) + } + // Print gateway startup message. printGatewayStartupMessage(getAPIEndpoints(gatewayAddr), gatewayName) } diff --git a/cmd/gateway-main_test.go b/cmd/gateway-main_test.go index a13cbd913..92a134495 100644 --- a/cmd/gateway-main_test.go +++ b/cmd/gateway-main_test.go @@ -32,16 +32,6 @@ func TestRegisterGatewayCommand(t *testing.T) { if err != nil { t.Errorf("RegisterGatewayCommand got unexpected error: %s", err) } - - // Should returns 'duplicated' error - err = RegisterGatewayCommand(cmd) - if err == nil { - t.Errorf("RegisterGatewayCommand twice with same name should return error") - } else { - if err.Error() != "duplicate gateway: test" { - t.Errorf("RegisterGatewayCommand got unexpected error: %s", err) - } - } } // Test parseGatewayEndpoint @@ -62,7 +52,7 @@ func TestParseGatewayEndpoint(t *testing.T) { } for i, test := range testCases { - endPoint, secure, err := parseGatewayEndpoint(test.arg) + endPoint, secure, err := ParseGatewayEndpoint(test.arg) errReturned := err != nil if endPoint != test.endPoint || @@ -97,7 +87,7 @@ func TestValidateGatewayArguments(t *testing.T) { {":9000", nonLoopBackIP + ":9000", false}, } for i, test := range testCases { - err := validateGatewayArguments(test.serverAddr, test.endpointAddr) + err := ValidateGatewayArguments(test.serverAddr, test.endpointAddr) if test.valid && err != nil { t.Errorf("Test %d expected not to return error but got %s", i+1, err) } diff --git a/cmd/gateway-router.go b/cmd/gateway-router.go index 8c7356c7b..12a3aa3c1 100644 --- a/cmd/gateway-router.go +++ b/cmd/gateway-router.go @@ -22,10 +22,26 @@ import ( router "github.com/gorilla/mux" "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/hash" ) -// GatewayLayer - Interface to implement gateway mode. +// GatewayMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API. +const GatewayMinioSysTmp = "minio.sys.tmp/" + +// Gateway represents a gateway backend. +type Gateway interface { + // Name returns the unique name of the gateway. + Name() string + + // NewGatewayLayer returns a new gateway layer. + NewGatewayLayer(creds auth.Credentials) (GatewayLayer, error) + + // Returns true if gateway is ready for production. + Production() bool +} + +// GatewayLayer - interface to implement gateway mode. type GatewayLayer interface { ObjectLayer diff --git a/cmd/gateway-s3.go b/cmd/gateway-s3.go deleted file mode 100644 index 522fadb2e..000000000 --- a/cmd/gateway-s3.go +++ /dev/null @@ -1,655 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "io" - "net/http" - - "github.com/minio/cli" - minio "github.com/minio/minio-go" - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/s3utils" - "github.com/minio/minio/pkg/errors" - "github.com/minio/minio/pkg/hash" -) - -const ( - s3Backend = "s3" -) - -func init() { - const s3GatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -ENDPOINT: - S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com - -ENVIRONMENT VARIABLES: - ACCESS: - MINIO_ACCESS_KEY: Username or access key of S3 storage. - MINIO_SECRET_KEY: Password or secret key of S3 storage. - - BROWSER: - MINIO_BROWSER: To disable web browser access, set this value to "off". - -EXAMPLES: - 1. Start minio gateway server for AWS S3 backend. - $ export MINIO_ACCESS_KEY=accesskey - $ export MINIO_SECRET_KEY=secretkey - $ {{.HelpName}} - - 2. Start minio gateway server for S3 backend on custom endpoint. - $ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F - $ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG - $ {{.HelpName}} https://play.minio.io:9000 -` - - MustRegisterGatewayCommand(cli.Command{ - Name: s3Backend, - Usage: "Amazon Simple Storage Service (S3).", - Action: s3GatewayMain, - CustomHelpTemplate: s3GatewayTemplate, - Flags: append(serverFlags, globalFlags...), - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway s3' command line. -func s3GatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - host := ctx.Args().First() - // Validate gateway arguments. - fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") - - startGateway(ctx, &S3Gateway{host}) -} - -// S3Gateway implements Gateway. -type S3Gateway struct { - host string -} - -// Name implements Gateway interface. -func (g *S3Gateway) Name() string { - return s3Backend -} - -// NewGatewayLayer returns s3 gatewaylayer. -func (g *S3Gateway) NewGatewayLayer() (GatewayLayer, error) { - return newS3GatewayLayer(g.host) -} - -// s3ToObjectError converts Minio errors to minio object layer errors. -func s3ToObjectError(err error, params ...string) error { - if err == nil { - return nil - } - - e, ok := err.(*errors.Error) - if !ok { - // Code should be fixed if this function is called without doing errors.Trace() - // Else handling different situations in this function makes this function complicated. - errorIf(err, "Expected type *Error") - return err - } - - err = e.Cause - - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - minioErr, ok := err.(minio.ErrorResponse) - if !ok { - // We don't interpret non Minio errors. As minio errors will - // have StatusCode to help to convert to object errors. - return e - } - - switch minioErr.Code { - case "BucketAlreadyOwnedByYou": - err = BucketAlreadyOwnedByYou{} - case "BucketNotEmpty": - err = BucketNotEmpty{} - case "NoSuchBucketPolicy": - err = PolicyNotFound{} - case "InvalidBucketName": - err = BucketNameInvalid{Bucket: bucket} - case "NoSuchBucket": - err = BucketNotFound{Bucket: bucket} - case "NoSuchKey": - if object != "" { - err = ObjectNotFound{Bucket: bucket, Object: object} - } else { - err = BucketNotFound{Bucket: bucket} - } - case "XMinioInvalidObjectName": - err = ObjectNameInvalid{} - case "AccessDenied": - err = PrefixAccessDenied{ - Bucket: bucket, - Object: object, - } - case "XAmzContentSHA256Mismatch": - err = hash.SHA256Mismatch{} - case "NoSuchUpload": - err = InvalidUploadID{} - case "EntityTooSmall": - err = PartTooSmall{} - } - - e.Cause = err - return e -} - -// s3Objects implements gateway for Minio and S3 compatible object storage servers. -type s3Objects struct { - gatewayUnsupported - Client *minio.Core - anonClient *minio.Core -} - -// newS3GatewayLayer returns s3 gatewaylayer -func newS3GatewayLayer(host string) (GatewayLayer, error) { - var err error - var endpoint string - var secure = true - - // Validate host parameters. - if host != "" { - // Override default params if the host is provided - endpoint, secure, err = parseGatewayEndpoint(host) - if err != nil { - return nil, err - } - } - - // Default endpoint parameters - if endpoint == "" { - endpoint = "s3.amazonaws.com" - } - - creds := globalServerConfig.GetCredential() - - // Initialize minio client object. - client, err := minio.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure) - if err != nil { - return nil, err - } - - anonClient, err := minio.NewCore(endpoint, "", "", secure) - if err != nil { - return nil, err - } - anonClient.SetCustomTransport(newCustomHTTPTransport()) - - return &s3Objects{ - Client: client, - anonClient: anonClient, - }, nil -} - -// Shutdown saves any gateway metadata to disk -// if necessary and reload upon next restart. -func (l *s3Objects) Shutdown() error { - // TODO - return nil -} - -// StorageInfo is not relevant to S3 backend. -func (l *s3Objects) StorageInfo() (si StorageInfo) { - return si -} - -// MakeBucket creates a new container on S3 backend. -func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error { - err := l.Client.MakeBucket(bucket, location) - if err != nil { - return s3ToObjectError(errors.Trace(err), bucket) - } - return err -} - -// GetBucketInfo gets bucket metadata.. -func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { - // Verify if bucket name is valid. - // We are using a separate helper function here to validate bucket - // names instead of IsValidBucketName() because there is a possibility - // that certains users might have buckets which are non-DNS compliant - // in us-east-1 and we might severely restrict them by not allowing - // access to these buckets. - // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - if s3utils.CheckValidBucketName(bucket) != nil { - return bi, errors.Trace(BucketNameInvalid{Bucket: bucket}) - } - - buckets, err := l.Client.ListBuckets() - if err != nil { - return bi, s3ToObjectError(errors.Trace(err), bucket) - } - - for _, bi := range buckets { - if bi.Name != bucket { - continue - } - - return BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - }, nil - } - - return bi, errors.Trace(BucketNotFound{Bucket: bucket}) -} - -// ListBuckets lists all S3 buckets -func (l *s3Objects) ListBuckets() ([]BucketInfo, error) { - buckets, err := l.Client.ListBuckets() - if err != nil { - return nil, s3ToObjectError(errors.Trace(err)) - } - - b := make([]BucketInfo, len(buckets)) - for i, bi := range buckets { - b[i] = BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - } - } - - return b, err -} - -// DeleteBucket deletes a bucket on S3 -func (l *s3Objects) DeleteBucket(bucket string) error { - err := l.Client.RemoveBucket(bucket) - if err != nil { - return s3ToObjectError(errors.Trace(err), bucket) - } - return nil -} - -// ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { - result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return loi, s3ToObjectError(errors.Trace(err), bucket) - } - - return fromMinioClientListBucketResult(bucket, result), nil -} - -// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { - result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) - if err != nil { - return loi, s3ToObjectError(errors.Trace(err), bucket) - } - - return fromMinioClientListBucketV2Result(bucket, result), nil -} - -// fromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo -func fromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = fromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsV2Info{ - IsTruncated: result.IsTruncated, - Prefixes: prefixes, - Objects: objects, - - ContinuationToken: result.ContinuationToken, - NextContinuationToken: result.NextContinuationToken, - } -} - -// fromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo -func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = fromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsInfo{ - IsTruncated: result.IsTruncated, - NextMarker: result.NextMarker, - Prefixes: prefixes, - Objects: objects, - } -} - -// GetObject reads an object from S3. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { - if length < 0 && length != -1 { - return s3ToObjectError(errors.Trace(errInvalidArgument), bucket, key) - } - - opts := minio.GetObjectOptions{} - if startOffset >= 0 && length >= 0 { - if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) - } - } - object, _, err := l.Client.GetObject(bucket, key, opts) - if err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) - } - defer object.Close() - - if _, err := io.Copy(writer, object); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) - } - return nil -} - -// fromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo -func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo { - userDefined := fromMinioClientMetadata(oi.Metadata) - userDefined["Content-Type"] = oi.ContentType - - return ObjectInfo{ - Bucket: bucket, - Name: oi.Key, - ModTime: oi.LastModified, - Size: oi.Size, - ETag: canonicalizeETag(oi.ETag), - UserDefined: userDefined, - ContentType: oi.ContentType, - ContentEncoding: oi.Metadata.Get("Content-Encoding"), - } -} - -// GetObjectInfo reads object info and replies back ObjectInfo -func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { - oi, err := l.Client.StatObject(bucket, object, minio.StatObjectOptions{}) - if err != nil { - return ObjectInfo{}, s3ToObjectError(errors.Trace(err), bucket, object) - } - - return fromMinioClientObjectInfo(bucket, oi), nil -} - -// PutObject creates a new object with the incoming data, -func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { - oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata)) - if err != nil { - return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) - } - - return fromMinioClientObjectInfo(bucket, oi), nil -} - -// CopyObject copies an object from source bucket to a destination bucket. -func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { - // Set this header such that following CopyObject() always sets the right metadata on the destination. - // metadata input is already a trickled down value from interpreting x-amz-metadata-directive at - // handler layer. So what we have right now is supposed to be applied on the destination object anyways. - // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. - metadata["x-amz-metadata-directive"] = "REPLACE" - if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil { - return objInfo, s3ToObjectError(errors.Trace(err), srcBucket, srcObject) - } - return l.GetObjectInfo(dstBucket, dstObject) -} - -// CopyObjectPart creates a part in a multipart upload by copying -// existing object or a part of it. -func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p PartInfo, err error) { - - completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject, - uploadID, partID, startOffset, length, metadata) - if err != nil { - return p, s3ToObjectError(errors.Trace(err), srcBucket, srcObject) - } - p.PartNumber = completePart.PartNumber - p.ETag = completePart.ETag - return p, nil -} - -// DeleteObject deletes a blob in bucket -func (l *s3Objects) DeleteObject(bucket string, object string) error { - err := l.Client.RemoveObject(bucket, object) - if err != nil { - return s3ToObjectError(errors.Trace(err), bucket, object) - } - - return nil -} - -// fromMinioClientMultipartInfo converts ObjectMultipartInfo to MultipartInfo -func fromMinioClientMultipartInfo(omi minio.ObjectMultipartInfo) MultipartInfo { - return MultipartInfo{ - Object: omi.Key, - UploadID: omi.UploadID, - Initiated: omi.Initiated, - } -} - -// fromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo -func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo { - uploads := make([]MultipartInfo, len(lmur.Uploads)) - - for i, um := range lmur.Uploads { - uploads[i] = fromMinioClientMultipartInfo(um) - } - - commonPrefixes := make([]string, len(lmur.CommonPrefixes)) - for i, cp := range lmur.CommonPrefixes { - commonPrefixes[i] = cp.Prefix - } - - return ListMultipartsInfo{ - KeyMarker: lmur.KeyMarker, - UploadIDMarker: lmur.UploadIDMarker, - NextKeyMarker: lmur.NextKeyMarker, - NextUploadIDMarker: lmur.NextUploadIDMarker, - MaxUploads: int(lmur.MaxUploads), - IsTruncated: lmur.IsTruncated, - Uploads: uploads, - Prefix: lmur.Prefix, - Delimiter: lmur.Delimiter, - CommonPrefixes: commonPrefixes, - EncodingType: lmur.EncodingType, - } - -} - -// ListMultipartUploads lists all multipart uploads. -func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { - result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - if err != nil { - return lmi, err - } - - return fromMinioClientListMultipartsInfo(result), nil -} - -// fromMinioClientMetadata converts minio metadata to map[string]string -func fromMinioClientMetadata(metadata map[string][]string) map[string]string { - mm := map[string]string{} - for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = v[0] - } - return mm -} - -// toMinioClientMetadata converts metadata to map[string][]string -func toMinioClientMetadata(metadata map[string]string) map[string]string { - mm := map[string]string{} - for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = v - } - return mm -} - -// NewMultipartUpload upload object in multiple parts -func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { - // Create PutObject options - opts := minio.PutObjectOptions{UserMetadata: metadata} - uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) - if err != nil { - return uploadID, s3ToObjectError(errors.Trace(err), bucket, object) - } - return uploadID, nil -} - -// fromMinioClientObjectPart converts minio ObjectPart to PartInfo -func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo { - return PartInfo{ - Size: op.Size, - ETag: canonicalizeETag(op.ETag), - LastModified: op.LastModified, - PartNumber: op.PartNumber, - } -} - -// PutObjectPart puts a part of object in bucket -func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) { - info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString()) - if err != nil { - return pi, s3ToObjectError(errors.Trace(err), bucket, object) - } - - return fromMinioClientObjectPart(info), nil -} - -// fromMinioClientObjectParts converts minio ObjectPart to PartInfo -func fromMinioClientObjectParts(parts []minio.ObjectPart) []PartInfo { - toParts := make([]PartInfo, len(parts)) - for i, part := range parts { - toParts[i] = fromMinioClientObjectPart(part) - } - return toParts -} - -// fromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo -func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo { - return ListPartsInfo{ - UploadID: lopr.UploadID, - Bucket: lopr.Bucket, - Object: lopr.Key, - StorageClass: "", - PartNumberMarker: lopr.PartNumberMarker, - NextPartNumberMarker: lopr.NextPartNumberMarker, - MaxParts: lopr.MaxParts, - IsTruncated: lopr.IsTruncated, - EncodingType: lopr.EncodingType, - Parts: fromMinioClientObjectParts(lopr.ObjectParts), - } -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, e error) { - result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return lpi, err - } - - return fromMinioClientListPartsInfo(result), nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { - err := l.Client.AbortMultipartUpload(bucket, object, uploadID) - return s3ToObjectError(errors.Trace(err), bucket, object) -} - -// toMinioClientCompletePart converts CompletePart to minio CompletePart -func toMinioClientCompletePart(part CompletePart) minio.CompletePart { - return minio.CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - } -} - -// toMinioClientCompleteParts converts []CompletePart to minio []CompletePart -func toMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart { - mparts := make([]minio.CompletePart, len(parts)) - for i, part := range parts { - mparts[i] = toMinioClientCompletePart(part) - } - return mparts -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, e error) { - err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts)) - if err != nil { - return oi, s3ToObjectError(errors.Trace(err), bucket, object) - } - - return l.GetObjectInfo(bucket, object) -} - -// SetBucketPolicies sets policy on bucket -func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { - if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, "") - } - - return nil -} - -// GetBucketPolicies will get policy on bucket -func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { - policyInfo, err := l.Client.GetBucketPolicy(bucket) - if err != nil { - return policy.BucketAccessPolicy{}, s3ToObjectError(errors.Trace(err), bucket, "") - } - return policyInfo, nil -} - -// DeleteBucketPolicies deletes all policies on bucket -func (l *s3Objects) DeleteBucketPolicies(bucket string) error { - if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, "") - } - return nil -} diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index 302652cfe..85d14817b 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -24,124 +24,125 @@ import ( "github.com/minio/minio/pkg/hash" ) -type gatewayUnsupported struct{} +// GatewayUnsupported list of unsupported call stubs for gateway. +type GatewayUnsupported struct{} // ListMultipartUploads lists all multipart uploads. -func (a gatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { +func (a GatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { return lmi, errors.Trace(NotImplemented{}) } // NewMultipartUpload upload object in multiple parts -func (a gatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { +func (a GatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { return "", errors.Trace(NotImplemented{}) } +// CopyObjectPart copy part of object to other bucket and object +func (a GatewayUnsupported) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64, metadata map[string]string) (pi PartInfo, err error) { + return pi, errors.Trace(NotImplemented{}) +} + // PutObjectPart puts a part of object in bucket -func (a gatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) { +func (a GatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) { return pi, errors.Trace(NotImplemented{}) } // ListObjectParts returns all object parts for specified object in specified bucket -func (a gatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { +func (a GatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { return lpi, errors.Trace(NotImplemented{}) } // AbortMultipartUpload aborts a ongoing multipart upload -func (a gatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error { +func (a GatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error { return errors.Trace(NotImplemented{}) } // CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (a gatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { +func (a GatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { return oi, errors.Trace(NotImplemented{}) } // SetBucketPolicies sets policy on bucket -func (a gatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { +func (a GatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { return errors.Trace(NotImplemented{}) } // GetBucketPolicies will get policy on bucket -func (a gatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) { +func (a GatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) { return bal, errors.Trace(NotImplemented{}) } // DeleteBucketPolicies deletes all policies on bucket -func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error { +func (a GatewayUnsupported) DeleteBucketPolicies(bucket string) error { return errors.Trace(NotImplemented{}) } -// CopyObjectPart - Not implemented. -func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (info PartInfo, err error) { - return info, errors.Trace(NotImplemented{}) -} - -// HealBucket - Not relevant. -func (a gatewayUnsupported) HealBucket(bucket string) error { +// HealBucket - Not implemented stub +func (a GatewayUnsupported) HealBucket(bucket string) error { return errors.Trace(NotImplemented{}) } -// ListBucketsHeal - Not relevant. -func (a gatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) { +// ListBucketsHeal - Not implemented stub +func (a GatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) { return nil, errors.Trace(NotImplemented{}) } -// HealObject - Not relevant. -func (a gatewayUnsupported) HealObject(bucket, object string) (int, int, error) { +// HealObject - Not implemented stub +func (a GatewayUnsupported) HealObject(bucket, object string) (int, int, error) { return 0, 0, errors.Trace(NotImplemented{}) } -func (a gatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { +// ListObjectsV2 - Not implemented stub +func (a GatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { return result, errors.Trace(NotImplemented{}) } -// ListObjectsHeal - Not relevant. -func (a gatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { +// ListObjectsHeal - Not implemented stub +func (a GatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { return loi, errors.Trace(NotImplemented{}) } -// ListUploadsHeal - Not relevant. -func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, +// ListUploadsHeal - Not implemented stub +func (a GatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { return lmi, errors.Trace(NotImplemented{}) } // AnonListObjects - List objects anonymously -func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, +func (a GatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { return loi, errors.Trace(NotImplemented{}) } // AnonListObjectsV2 - List objects in V2 mode, anonymously -func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, +func (a GatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { return loi, errors.Trace(NotImplemented{}) } // AnonGetBucketInfo - Get bucket metadata anonymously. -func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { +func (a GatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { return bi, errors.Trace(NotImplemented{}) } // AnonPutObject creates a new object anonymously with the incoming data, -func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader, +func (a GatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error) { return ObjectInfo{}, errors.Trace(NotImplemented{}) } // AnonGetObject downloads object anonymously. -func (a gatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { +func (a GatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { return errors.Trace(NotImplemented{}) } // AnonGetObjectInfo returns stat information about an object anonymously. -func (a gatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { +func (a GatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { return objInfo, errors.Trace(NotImplemented{}) } // CopyObject copies a blob from source container to destination container. -func (a gatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, +func (a GatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { return objInfo, errors.Trace(NotImplemented{}) } diff --git a/cmd/gateway-azure-anonymous.go b/cmd/gateway/azure/gateway-azure-anonymous.go similarity index 88% rename from cmd/gateway-azure-anonymous.go rename to cmd/gateway/azure/gateway-azure-anonymous.go index fa1bd58b2..532a8b7a6 100644 --- a/cmd/gateway-azure-anonymous.go +++ b/cmd/gateway/azure/gateway-azure-anonymous.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package azure import ( "encoding/xml" @@ -29,6 +29,8 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" "github.com/minio/minio/pkg/errors" + + minio "github.com/minio/minio/cmd" ) // Copied from github.com/Azure/azure-sdk-for-go/storage/container.go @@ -113,21 +115,21 @@ func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, } // AnonGetBucketInfo - Get bucket metadata from azure anonymously. -func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { +func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) { blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() url, err := url.Parse(blobURL) if err != nil { return bucketInfo, azureToObjectError(errors.Trace(err)) } url.RawQuery = "restype=container" - resp, err := azureAnonRequest(httpHEAD, url.String(), nil) + resp, err := azureAnonRequest(http.MethodHead, url.String(), nil) if err != nil { return bucketInfo, azureToObjectError(errors.Trace(err), bucket) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return bucketInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) + return bucketInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket) } t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) @@ -135,12 +137,10 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, return bucketInfo, errors.Trace(err) } - bucketInfo = BucketInfo{ + return minio.BucketInfo{ Name: bucket, Created: t, - } - - return bucketInfo, nil + }, nil } // AnonGetObject - SendGET request without authentication. @@ -154,14 +154,14 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l } blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() - resp, err := azureAnonRequest(httpGET, blobURL, h) + resp, err := azureAnonRequest(http.MethodGet, blobURL, h) if err != nil { return azureToObjectError(errors.Trace(err), bucket, object) } defer resp.Body.Close() if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { - return azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + return azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) } _, err = io.Copy(writer, resp.Body) @@ -170,16 +170,16 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l // AnonGetObjectInfo - Send HEAD request without authentication and convert the // result to ObjectInfo. -func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) { blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() - resp, err := azureAnonRequest(httpHEAD, blobURL, nil) + resp, err := azureAnonRequest(http.MethodHead, blobURL, nil) if err != nil { return objInfo, azureToObjectError(errors.Trace(err), bucket, object) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return objInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + return objInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) } var contentLength int64 @@ -187,7 +187,7 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI if contentLengthStr != "" { contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) if err != nil { - return objInfo, azureToObjectError(errors.Trace(errUnexpected), bucket, object) + return objInfo, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) } } @@ -211,7 +211,7 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI } // AnonListObjects - Use Azure equivalent ListBlobs. -func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { +func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) { params := storage.ListBlobsParameters{ Prefix: prefix, Marker: marker, @@ -230,7 +230,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, } url.RawQuery = q.Encode() - resp, err := azureAnonRequest(httpGET, url.String(), nil) + resp, err := azureAnonRequest(http.MethodGet, url.String(), nil) if err != nil { return result, azureToObjectError(errors.Trace(err)) } @@ -250,7 +250,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, result.IsTruncated = listResp.NextMarker != "" result.NextMarker = listResp.NextMarker for _, object := range listResp.Blobs { - result.Objects = append(result.Objects, ObjectInfo{ + result.Objects = append(result.Objects, minio.ObjectInfo{ Bucket: bucket, Name: object.Name, ModTime: time.Time(object.Properties.LastModified), @@ -265,7 +265,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, } // AnonListObjectsV2 - List objects in V2 mode, anonymously -func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { +func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) { params := storage.ListBlobsParameters{ Prefix: prefix, Marker: continuationToken, @@ -307,12 +307,12 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli result.NextContinuationToken = listResp.NextMarker } for _, object := range listResp.Blobs { - result.Objects = append(result.Objects, ObjectInfo{ + result.Objects = append(result.Objects, minio.ObjectInfo{ Bucket: bucket, Name: object.Name, ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, - ETag: canonicalizeETag(object.Properties.Etag), + ETag: minio.CanonicalizeETag(object.Properties.Etag), ContentType: object.Properties.ContentType, ContentEncoding: object.Properties.ContentEncoding, }) diff --git a/cmd/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go similarity index 87% rename from cmd/gateway-azure.go rename to cmd/gateway/azure/gateway-azure.go index edcb45d1c..25fa6c860 100644 --- a/cmd/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package azure import ( "bytes" @@ -35,14 +35,18 @@ import ( humanize "github.com/dustin/go-humanize" "github.com/minio/cli" "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio/cmd" ) const ( globalAzureAPIVersion = "2016-05-31" azureBlockSize = 100 * humanize.MiByte - metadataObjectNameTemplate = globalMinioSysTmp + "multipart/v1/%s.%x/azure.json" + azureS3MinPartSize = 5 * humanize.MiByte + metadataObjectNameTemplate = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x/azure.json" azureBackend = "azure" ) @@ -80,12 +84,11 @@ EXAMPLES: ` - MustRegisterGatewayCommand(cli.Command{ + minio.RegisterGatewayCommand(cli.Command{ Name: azureBackend, Usage: "Microsoft Azure Blob Storage.", Action: azureGatewayMain, CustomHelpTemplate: azureGatewayTemplate, - Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, }) } @@ -95,24 +98,49 @@ func azureGatewayMain(ctx *cli.Context) { // Validate gateway arguments. host := ctx.Args().First() // Validate gateway arguments. - fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") + minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") - startGateway(ctx, &AzureGateway{host}) + minio.StartGateway(ctx, &Azure{host}) } -// AzureGateway implements Gateway. -type AzureGateway struct { +// Azure implements Gateway. +type Azure struct { host string } // Name implements Gateway interface. -func (g *AzureGateway) Name() string { +func (g *Azure) Name() string { return azureBackend } // NewGatewayLayer initializes azure blob storage client and returns AzureObjects. -func (g *AzureGateway) NewGatewayLayer() (GatewayLayer, error) { - return newAzureLayer(g.host) +func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { + var err error + var endpoint = storage.DefaultBaseURL + var secure = true + + // If user provided some parameters + if g.host != "" { + endpoint, secure, err = minio.ParseGatewayEndpoint(g.host) + if err != nil { + return nil, err + } + } + + c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure) + if err != nil { + return &azureObjects{}, err + } + c.HTTPClient = &http.Client{Transport: minio.NewCustomHTTPTransport()} + + return &azureObjects{ + client: c.GetBlobService(), + }, nil +} + +// Production - Azure gateway is production ready. +func (g *Azure) Production() bool { + return true } // s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY @@ -133,7 +161,7 @@ func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata storage.BlobProperties, error) { for k := range s3Metadata { if strings.Contains(k, "--") { - return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(UnsupportedMetadata{}) + return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(minio.UnsupportedMetadata{}) } } @@ -238,7 +266,7 @@ func azurePropertiesToS3Meta(meta storage.BlobMetadata, props storage.BlobProper // azureObjects - Implements Object layer for Azure blob storage. type azureObjects struct { - gatewayUnsupported + minio.GatewayUnsupported client storage.BlobStorageClient // Azure sdk client } @@ -252,7 +280,7 @@ func azureToObjectError(err error, params ...string) error { if !ok { // Code should be fixed if this function is called without doing errors.Trace() // Else handling different situations in this function makes this function complicated. - errorIf(err, "Expected type *Error") + minio.ErrorIf(err, "Expected type *Error") return err } @@ -275,23 +303,26 @@ func azureToObjectError(err error, params ...string) error { switch azureErr.Code { case "ContainerAlreadyExists": - err = BucketExists{Bucket: bucket} + err = minio.BucketExists{Bucket: bucket} case "InvalidResourceName": - err = BucketNameInvalid{Bucket: bucket} + err = minio.BucketNameInvalid{Bucket: bucket} case "RequestBodyTooLarge": - err = PartTooBig{} + err = minio.PartTooBig{} case "InvalidMetadata": - err = UnsupportedMetadata{} + err = minio.UnsupportedMetadata{} default: switch azureErr.StatusCode { case http.StatusNotFound: if object != "" { - err = ObjectNotFound{bucket, object} + err = minio.ObjectNotFound{ + Bucket: bucket, + Object: object, + } } else { - err = BucketNotFound{Bucket: bucket} + err = minio.BucketNotFound{Bucket: bucket} } case http.StatusBadRequest: - err = BucketNameInvalid{Bucket: bucket} + err = minio.BucketNameInvalid{Bucket: bucket} } } e.Cause = err @@ -316,11 +347,15 @@ func mustGetAzureUploadID() string { // checkAzureUploadID - returns error in case of given string is upload ID. func checkAzureUploadID(uploadID string) (err error) { if len(uploadID) != 16 { - return errors.Trace(MalformedUploadID{uploadID}) + return errors.Trace(minio.MalformedUploadID{ + UploadID: uploadID, + }) } if _, err = hex.DecodeString(uploadID); err != nil { - return errors.Trace(MalformedUploadID{uploadID}) + return errors.Trace(minio.MalformedUploadID{ + UploadID: uploadID, + }) } return nil @@ -360,32 +395,6 @@ func azureParseBlockID(blockID string) (partID, subPartNumber int, uploadID, md5 return } -// Inits azure blob storage client and returns AzureObjects. -func newAzureLayer(host string) (GatewayLayer, error) { - var err error - var endpoint = storage.DefaultBaseURL - var secure = true - - // If user provided some parameters - if host != "" { - endpoint, secure, err = parseGatewayEndpoint(host) - if err != nil { - return nil, err - } - } - - creds := globalServerConfig.GetCredential() - c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure) - if err != nil { - return &azureObjects{}, err - } - c.HTTPClient = &http.Client{Transport: newCustomHTTPTransport()} - - return &azureObjects{ - client: c.GetBlobService(), - }, nil -} - // Shutdown - save any gateway metadata to disk // if necessary and reload upon next restart. func (a *azureObjects) Shutdown() error { @@ -393,7 +402,7 @@ func (a *azureObjects) Shutdown() error { } // StorageInfo - Not relevant to Azure backend. -func (a *azureObjects) StorageInfo() (si StorageInfo) { +func (a *azureObjects) StorageInfo() (si minio.StorageInfo) { return si } @@ -407,13 +416,13 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error { } // GetBucketInfo - Get bucket metadata.. -func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { +func (a *azureObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) { // Verify if bucket (container-name) is valid. // IsValidBucketName has same restrictions as container names mentioned // in azure documentation, so we will simply use the same function here. // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata - if !IsValidBucketName(bucket) { - return bi, errors.Trace(BucketNameInvalid{Bucket: bucket}) + if !minio.IsValidBucketName(bucket) { + return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) } // Azure does not have an equivalent call, hence use @@ -428,18 +437,18 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { if container.Name == bucket { t, e := time.Parse(time.RFC1123, container.Properties.LastModified) if e == nil { - return BucketInfo{ + return minio.BucketInfo{ Name: bucket, Created: t, }, nil } // else continue } } - return bi, errors.Trace(BucketNotFound{Bucket: bucket}) + return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) } // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. -func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { +func (a *azureObjects) ListBuckets() (buckets []minio.BucketInfo, err error) { resp, err := a.client.ListContainers(storage.ListContainersParameters{}) if err != nil { return nil, azureToObjectError(errors.Trace(err)) @@ -449,7 +458,7 @@ func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { if e != nil { return nil, errors.Trace(e) } - buckets = append(buckets, BucketInfo{ + buckets = append(buckets, minio.BucketInfo{ Name: container.Name, Created: t, }) @@ -465,8 +474,8 @@ func (a *azureObjects) DeleteBucket(bucket string) error { // ListObjects - lists all blobs on azure with in a container filtered by prefix // and marker, uses Azure equivalent ListBlobs. -func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { - var objects []ObjectInfo +func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) { + var objects []minio.ObjectInfo var prefixes []string container := a.client.GetContainerReference(bucket) for len(objects) == 0 && len(prefixes) == 0 { @@ -481,15 +490,15 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max } for _, object := range resp.Blobs { - if strings.HasPrefix(object.Name, globalMinioSysTmp) { + if strings.HasPrefix(object.Name, minio.GatewayMinioSysTmp) { continue } - objects = append(objects, ObjectInfo{ + objects = append(objects, minio.ObjectInfo{ Bucket: bucket, Name: object.Name, ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, - ETag: toS3ETag(object.Properties.Etag), + ETag: minio.ToS3ETag(object.Properties.Etag), ContentType: object.Properties.ContentType, ContentEncoding: object.Properties.ContentEncoding, }) @@ -497,7 +506,7 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max // Remove minio.sys.tmp prefix. for _, prefix := range resp.BlobPrefixes { - if prefix != globalMinioSysTmp { + if prefix != minio.GatewayMinioSysTmp { prefixes = append(prefixes, prefix) } } @@ -516,13 +525,13 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max } // ListObjectsV2 - list all blobs in Azure bucket filtered by prefix -func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { +func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) { marker := continuationToken if startAfter != "" { marker = startAfter } - var resultV1 ListObjectsInfo + var resultV1 minio.ListObjectsInfo resultV1, err = a.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { return result, err @@ -545,7 +554,7 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimite func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { // startOffset cannot be negative. if startOffset < 0 { - return toObjectErr(errors.Trace(errUnexpected), bucket, object) + return azureToObjectError(errors.Trace(minio.InvalidRange{}), bucket, object) } blobRange := &storage.BlobRange{Start: uint64(startOffset)} @@ -571,32 +580,30 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt return errors.Trace(err) } -// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo, +// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo, // uses zure equivalent GetBlobProperties. -func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) { blob := a.client.GetContainerReference(bucket).GetBlobReference(object) err = blob.GetProperties(nil) if err != nil { return objInfo, azureToObjectError(errors.Trace(err), bucket, object) } - meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties) - objInfo = ObjectInfo{ + return minio.ObjectInfo{ Bucket: bucket, - UserDefined: meta, - ETag: toS3ETag(blob.Properties.Etag), + UserDefined: azurePropertiesToS3Meta(blob.Metadata, blob.Properties), + ETag: minio.ToS3ETag(blob.Properties.Etag), ModTime: time.Time(blob.Properties.LastModified), Name: object, Size: blob.Properties.ContentLength, ContentType: blob.Properties.ContentType, ContentEncoding: blob.Properties.ContentEncoding, - } - return objInfo, nil + }, nil } // PutObject - Create a new blob with the incoming data, // uses Azure equivalent CreateBlockBlobFromReader. -func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata) if err != nil { @@ -611,7 +618,7 @@ func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metad // CopyObject - Copies a blob from source container to destination container. // Uses Azure equivalent CopyBlob API. -func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL() destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject) azureMeta, props, err := s3MetaToAzureProperties(metadata) @@ -643,7 +650,7 @@ func (a *azureObjects) DeleteObject(bucket, object string) error { } // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. -func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { +func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) { // It's decided not to support List Multipart Uploads, hence returning empty result. return result, nil } @@ -662,9 +669,14 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri getAzureMetadataObjectName(objectName, uploadID)) err = blob.GetMetadata(nil) err = azureToObjectError(errors.Trace(err), bucketName, objectName) - oerr := ObjectNotFound{bucketName, objectName} + oerr := minio.ObjectNotFound{ + Bucket: bucketName, + Object: objectName, + } if errors.Cause(err) == oerr { - err = errors.Trace(InvalidUploadID{}) + err = errors.Trace(minio.InvalidUploadID{ + UploadID: uploadID, + }) } return err } @@ -692,7 +704,7 @@ func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[st } // PutObjectPart - Use Azure equivalent PutBlockWithLength. -func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) { +func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) { if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { return info, err } @@ -703,7 +715,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int etag := data.MD5HexString() if etag == "" { - etag = genETag() + etag = minio.GenETag() } subPartSize, subPartNumber := int64(azureBlockSize), 1 @@ -728,13 +740,13 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int info.PartNumber = partID info.ETag = etag - info.LastModified = UTCNow() + info.LastModified = minio.UTCNow() info.Size = data.Size() return info, nil } // ListObjectParts - Use Azure equivalent GetBlockList. -func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) { +func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) { if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { return result, err } @@ -755,20 +767,20 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb return result, azureToObjectError(errors.Trace(err), bucket, object) } // Build a sorted list of parts and return the requested entries. - partsMap := make(map[int]PartInfo) + partsMap := make(map[int]minio.PartInfo) for _, block := range resp.UncommittedBlocks { var partNumber int var parsedUploadID string var md5Hex string if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil { - return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object) + return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) } if parsedUploadID != uploadID { continue } part, ok := partsMap[partNumber] if !ok { - partsMap[partNumber] = PartInfo{ + partsMap[partNumber] = minio.PartInfo{ PartNumber: partNumber, Size: block.Size, ETag: md5Hex, @@ -778,12 +790,12 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb if part.ETag != md5Hex { // If two parts of same partNumber were uploaded with different contents // return error as we won't be able to decide which the latest part is. - return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object) + return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) } part.Size += block.Size partsMap[partNumber] = part } - var parts []PartInfo + var parts []minio.PartInfo for _, part := range partsMap { parts = append(parts, part) } @@ -831,7 +843,7 @@ func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (er } // CompleteMultipartUpload - Use Azure equivalent PutBlockList. -func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) { +func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) { metadataObject := getAzureMetadataObjectName(object, uploadID) if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { return objInfo, err @@ -859,7 +871,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) derr := blob.Delete(nil) - errorIf(derr, "unable to remove meta data object for upload ID %s", uploadID) + minio.ErrorIf(derr, "unable to remove meta data object for upload ID %s", uploadID) }() objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) @@ -888,7 +900,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, } if len(blocks) == 0 { - return nil, 0, InvalidPart{} + return nil, 0, minio.InvalidPart{} } return blocks, size, nil @@ -910,8 +922,8 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, // Error out if parts except last part sizing < 5MiB. for i, size := range partSizes[:len(partSizes)-1] { - if size < globalMinPartSize { - return objInfo, errors.Trace(PartTooSmall{ + if size < azureS3MinPartSize { + return objInfo, errors.Trace(minio.PartTooSmall{ PartNumber: uploadedParts[i].PartNumber, PartSize: size, PartETag: uploadedParts[i].ETag, @@ -947,23 +959,23 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, // As the common denominator for minio and azure is readonly and none, we support // these two policies at the bucket level. func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []BucketAccessPolicy + var policies []minio.BucketAccessPolicy for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { - policies = append(policies, BucketAccessPolicy{ + policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, }) } prefix := bucket + "/*" // For all objects inside the bucket. if len(policies) != 1 { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if policies[0].Prefix != prefix { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if policies[0].Policy != policy.BucketPolicyReadOnly { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } perm := storage.ContainerPermissions{ AccessType: storage.ContainerAccessTypeContainer, @@ -984,11 +996,11 @@ func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPoli } switch perm.AccessType { case storage.ContainerAccessTypePrivate: - return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket}) + return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket}) case storage.ContainerAccessTypeContainer: policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") default: - return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(NotImplemented{})) + return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(minio.NotImplemented{})) } return policyInfo, nil } diff --git a/cmd/gateway-azure_test.go b/cmd/gateway/azure/gateway-azure_test.go similarity index 93% rename from cmd/gateway-azure_test.go rename to cmd/gateway/azure/gateway-azure_test.go index 33973a19a..bb79a4f02 100644 --- a/cmd/gateway-azure_test.go +++ b/cmd/gateway/azure/gateway-azure_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package azure import ( "fmt" @@ -24,6 +24,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/storage" + minio "github.com/minio/minio/cmd" "github.com/minio/minio/pkg/errors" ) @@ -67,7 +68,7 @@ func TestS3MetaToAzureProperties(t *testing.T) { } _, _, err = s3MetaToAzureProperties(headers) if err = errors.Cause(err); err != nil { - if _, ok := err.(UnsupportedMetadata); !ok { + if _, ok := err.(minio.UnsupportedMetadata); !ok { t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) } } @@ -150,27 +151,27 @@ func TestAzureToObjectError(t *testing.T) { { errors.Trace(storage.AzureStorageServiceError{ Code: "ContainerAlreadyExists", - }), BucketExists{Bucket: "bucket"}, "bucket", "", + }), minio.BucketExists{Bucket: "bucket"}, "bucket", "", }, { errors.Trace(storage.AzureStorageServiceError{ Code: "InvalidResourceName", - }), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", + }), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }, { errors.Trace(storage.AzureStorageServiceError{ Code: "RequestBodyTooLarge", - }), PartTooBig{}, "", "", + }), minio.PartTooBig{}, "", "", }, { errors.Trace(storage.AzureStorageServiceError{ Code: "InvalidMetadata", - }), UnsupportedMetadata{}, "", "", + }), minio.UnsupportedMetadata{}, "", "", }, { errors.Trace(storage.AzureStorageServiceError{ StatusCode: http.StatusNotFound, - }), ObjectNotFound{ + }), minio.ObjectNotFound{ Bucket: "bucket", Object: "object", }, "bucket", "object", @@ -178,12 +179,12 @@ func TestAzureToObjectError(t *testing.T) { { errors.Trace(storage.AzureStorageServiceError{ StatusCode: http.StatusNotFound, - }), BucketNotFound{Bucket: "bucket"}, "bucket", "", + }), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "", }, { errors.Trace(storage.AzureStorageServiceError{ StatusCode: http.StatusBadRequest, - }), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", + }), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }, } for i, testCase := range testCases { @@ -321,32 +322,27 @@ func TestAnonErrToObjectErr(t *testing.T) { {"ObjectNotFound", http.StatusNotFound, []string{"testBucket", "testObject"}, - ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, + minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, }, {"BucketNotFound", http.StatusNotFound, []string{"testBucket", ""}, - BucketNotFound{Bucket: "testBucket"}, + minio.BucketNotFound{Bucket: "testBucket"}, }, {"ObjectNameInvalid", http.StatusBadRequest, []string{"testBucket", "testObject"}, - ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, + minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, }, {"BucketNameInvalid", http.StatusBadRequest, []string{"testBucket", ""}, - BucketNameInvalid{Bucket: "testBucket"}, - }, - {"UnexpectedError", - http.StatusBadGateway, - []string{"testBucket", "testObject"}, - errUnexpected, + minio.BucketNameInvalid{Bucket: "testBucket"}, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - if err := anonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { + if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) } }) diff --git a/cmd/gateway-b2-anonymous.go b/cmd/gateway/b2/gateway-b2-anonymous.go similarity index 94% rename from cmd/gateway-b2-anonymous.go rename to cmd/gateway/b2/gateway-b2-anonymous.go index 20375e9c8..6a3f550e1 100644 --- a/cmd/gateway-b2-anonymous.go +++ b/cmd/gateway/b2/gateway-b2-anonymous.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package b2 import ( "fmt" @@ -26,6 +26,8 @@ import ( "time" "github.com/minio/minio/pkg/errors" + + minio "github.com/minio/minio/cmd" ) // mkRange converts offset, size into Range header equivalent. @@ -34,9 +36,9 @@ func mkRange(offset, size int64) string { return "" } if size == 0 { - return fmt.Sprintf("%s%d-", byteRangePrefix, offset) + return fmt.Sprintf("bytes=%d-", offset) } - return fmt.Sprintf("%s%d-%d", byteRangePrefix, offset, offset+size-1) + return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) } // AnonGetObject - performs a plain http GET request on a public resource, @@ -71,7 +73,7 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6 // X-Bz-Info-
: is converted to
: // Content-Type is converted to ContentType. // X-Bz-Content-Sha1 is converted to ETag. -func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) { +func headerToObjectInfo(bucket, object string, header http.Header) (objInfo minio.ObjectInfo, err error) { clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64) if err != nil { return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) @@ -103,7 +105,7 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje } } - return ObjectInfo{ + return minio.ObjectInfo{ Bucket: bucket, Name: object, ContentType: header.Get("Content-Type"), @@ -116,7 +118,7 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje // AnonGetObjectInfo - performs a plain http HEAD request on a public resource, // fails if the resource is not public. -func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { +func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) { uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) req, err := http.NewRequest("HEAD", uri, nil) if err != nil { diff --git a/cmd/gateway-b2.go b/cmd/gateway/b2/gateway-b2.go similarity index 84% rename from cmd/gateway-b2.go rename to cmd/gateway/b2/gateway-b2.go index 35a37bf82..f4a054459 100644 --- a/cmd/gateway-b2.go +++ b/cmd/gateway/b2/gateway-b2.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package b2 import ( "context" @@ -34,6 +34,8 @@ import ( "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/errors" h2 "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio/cmd" ) // Supported bucket types by B2 backend. @@ -68,53 +70,33 @@ EXAMPLES: $ {{.HelpName}} ` - - MustRegisterGatewayCommand(cli.Command{ + minio.RegisterGatewayCommand(cli.Command{ Name: b2Backend, Usage: "Backblaze B2.", Action: b2GatewayMain, CustomHelpTemplate: b2GatewayTemplate, - Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, }) } // Handler for 'minio gateway b2' command line. func b2GatewayMain(ctx *cli.Context) { - startGateway(ctx, &B2Gateway{}) + minio.StartGateway(ctx, &B2{}) } -// B2Gateway implements Gateway. -type B2Gateway struct{} +// B2 implements Minio Gateway +type B2 struct{} // Name implements Gateway interface. -func (g *B2Gateway) Name() string { +func (g *B2) Name() string { return b2Backend } // NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to // talk to B2 remote backend. -func (g *B2Gateway) NewGatewayLayer() (GatewayLayer, error) { - log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) - return newB2GatewayLayer() -} - -// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers. -type b2Objects struct { - gatewayUnsupported - mu sync.Mutex - creds auth.Credentials - b2Client *b2.B2 - anonClient *http.Client - ctx context.Context -} - -// newB2GatewayLayer returns b2 gateway layer. -func newB2GatewayLayer() (GatewayLayer, error) { +func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { ctx := context.Background() - creds := globalServerConfig.GetCredential() - - client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(newCustomHTTPTransport())) + client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport())) if err != nil { return nil, err } @@ -123,12 +105,28 @@ func newB2GatewayLayer() (GatewayLayer, error) { creds: creds, b2Client: client, anonClient: &http.Client{ - Transport: newCustomHTTPTransport(), + Transport: minio.NewCustomHTTPTransport(), }, ctx: ctx, }, nil } +// Production - Ready for production use? +func (g *B2) Production() bool { + // Not ready for production use just yet. + return false +} + +// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers. +type b2Objects struct { + minio.GatewayUnsupported + mu sync.Mutex + creds auth.Credentials + b2Client *b2.B2 + anonClient *http.Client + ctx context.Context +} + // Convert B2 errors to minio object layer errors. func b2ToObjectError(err error, params ...string) error { if err == nil { @@ -139,7 +137,7 @@ func b2ToObjectError(err error, params ...string) error { if !ok { // Code should be fixed if this function is called without doing errors.Trace() // Else handling different situations in this function makes this function complicated. - errorIf(err, "Expected type *Error") + minio.ErrorIf(err, "Expected type *Error") return err } @@ -170,24 +168,30 @@ func b2ToObjectError(err error, params ...string) error { switch code { case "duplicate_bucket_name": - err = BucketAlreadyOwnedByYou{Bucket: bucket} + err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} case "bad_request": if object != "" { - err = ObjectNameInvalid{bucket, object} + err = minio.ObjectNameInvalid{ + Bucket: bucket, + Object: object, + } } else if bucket != "" { - err = BucketNotFound{Bucket: bucket} + err = minio.BucketNotFound{Bucket: bucket} } case "bad_bucket_id": - err = BucketNotFound{Bucket: bucket} + err = minio.BucketNotFound{Bucket: bucket} case "file_not_present", "not_found": - err = ObjectNotFound{bucket, object} + err = minio.ObjectNotFound{ + Bucket: bucket, + Object: object, + } case "cannot_delete_non_empty_bucket": - err = BucketNotEmpty{bucket, ""} + err = minio.BucketNotEmpty{Bucket: bucket} } // Special interpretation like this is required for Multipart sessions. if strings.Contains(msg, "No active upload for") && uploadID != "" { - err = InvalidUploadID{uploadID} + err = minio.InvalidUploadID{UploadID: uploadID} } e.Cause = err @@ -202,7 +206,7 @@ func (l *b2Objects) Shutdown() error { } // StorageInfo is not relevant to B2 backend. -func (l *b2Objects) StorageInfo() (si StorageInfo) { +func (l *b2Objects) StorageInfo() (si minio.StorageInfo) { return si } @@ -216,7 +220,7 @@ func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error { } func (l *b2Objects) reAuthorizeAccount() error { - client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(newCustomHTTPTransport())) + client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport())) if err != nil { return err } @@ -260,29 +264,29 @@ func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) { return bkt, nil } } - return nil, errors.Trace(BucketNotFound{Bucket: bucket}) + return nil, errors.Trace(minio.BucketNotFound{Bucket: bucket}) } // GetBucketInfo gets bucket metadata.. -func (l *b2Objects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { +func (l *b2Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) { if _, err = l.Bucket(bucket); err != nil { return bi, err } - return BucketInfo{ + return minio.BucketInfo{ Name: bucket, Created: time.Unix(0, 0), }, nil } // ListBuckets lists all B2 buckets -func (l *b2Objects) ListBuckets() ([]BucketInfo, error) { +func (l *b2Objects) ListBuckets() ([]minio.BucketInfo, error) { bktList, err := l.listBuckets(nil) if err != nil { return nil, err } - var bktInfo []BucketInfo + var bktInfo []minio.BucketInfo for _, bkt := range bktList { - bktInfo = append(bktInfo, BucketInfo{ + bktInfo = append(bktInfo, minio.BucketInfo{ Name: bkt.Name, Created: time.Unix(0, 0), }) @@ -301,12 +305,11 @@ func (l *b2Objects) DeleteBucket(bucket string) error { } // ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. -func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { +func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return loi, err } - loi = ListObjectsInfo{} files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) if lerr != nil { return loi, b2ToObjectError(errors.Trace(lerr), bucket) @@ -318,12 +321,12 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del case "folder": loi.Prefixes = append(loi.Prefixes, file.Name) case "upload": - loi.Objects = append(loi.Objects, ObjectInfo{ + loi.Objects = append(loi.Objects, minio.ObjectInfo{ Bucket: bucket, Name: file.Name, ModTime: file.Timestamp, Size: file.Size, - ETag: toS3ETag(file.Info.ID), + ETag: minio.ToS3ETag(file.Info.ID), ContentType: file.Info.ContentType, UserDefined: file.Info.Info, }) @@ -334,13 +337,12 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del // ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time. func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, - fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { + fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { // fetchOwner, startAfter are not supported and unused. bkt, err := l.Bucket(bucket) if err != nil { return loi, err } - loi = ListObjectsV2Info{} files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) if lerr != nil { return loi, b2ToObjectError(errors.Trace(lerr), bucket) @@ -353,12 +355,12 @@ func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter s case "folder": loi.Prefixes = append(loi.Prefixes, file.Name) case "upload": - loi.Objects = append(loi.Objects, ObjectInfo{ + loi.Objects = append(loi.Objects, minio.ObjectInfo{ Bucket: bucket, Name: file.Name, ModTime: file.Timestamp, Size: file.Size, - ETag: toS3ETag(file.Info.ID), + ETag: minio.ToS3ETag(file.Info.ID), ContentType: file.Info.ContentType, UserDefined: file.Info.Info, }) @@ -388,7 +390,7 @@ func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, l } // GetObjectInfo reads object info and replies back ObjectInfo -func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { +func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return objInfo, err @@ -402,16 +404,15 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI if err != nil { return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) } - objInfo = ObjectInfo{ + return minio.ObjectInfo{ Bucket: bucket, Name: object, - ETag: toS3ETag(fi.ID), + ETag: minio.ToS3ETag(fi.ID), Size: fi.Size, ModTime: fi.Timestamp, ContentType: fi.ContentType, UserDefined: fi.Info, - } - return objInfo, nil + }, nil } // In B2 - You must always include the X-Bz-Content-Sha1 header with @@ -421,10 +422,8 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI // (3) the string do_not_verify. // For more reference - https://www.backblaze.com/b2/docs/uploading.html // -const ( - sha1NoVerify = "do_not_verify" - sha1AtEOF = "hex_digits_at_end" -) +// In our case we are going to use (2) option +const sha1AtEOF = "hex_digits_at_end" // With the second option mentioned above, you append the 40-character hex sha1 // to the end of the request body, immediately after the contents of the file @@ -437,20 +436,20 @@ const ( // Additionally this reader also verifies Hash encapsulated inside hash.Reader // at io.EOF if the verification failed we return an error and do not send // the content to server. -func newB2Reader(r *h2.Reader, size int64) *B2Reader { - return &B2Reader{ +func newB2Reader(r *h2.Reader, size int64) *Reader { + return &Reader{ r: r, size: size, sha1Hash: sha1.New(), } } -// B2Reader - is a Reader wraps the hash.Reader which will emit out the sha1 +// Reader - is a Reader wraps the hash.Reader which will emit out the sha1 // hex digits at io.EOF. It also means that your overall content size is // now original size + 40 bytes. Additionally this reader also verifies // Hash encapsulated inside hash.Reader at io.EOF if the verification // failed we return an error and do not send the content to server. -type B2Reader struct { +type Reader struct { r *h2.Reader size int64 sha1Hash hash.Hash @@ -460,8 +459,8 @@ type B2Reader struct { } // Size - Returns the total size of Reader. -func (nb *B2Reader) Size() int64 { return nb.size + 40 } -func (nb *B2Reader) Read(p []byte) (int, error) { +func (nb *Reader) Size() int64 { return nb.size + 40 } +func (nb *Reader) Read(p []byte) (int, error) { if nb.isEOF { return nb.buf.Read(p) } @@ -480,8 +479,7 @@ func (nb *B2Reader) Read(p []byte) (int, error) { } // PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB. -func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (ObjectInfo, error) { - var objInfo ObjectInfo +func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return objInfo, err @@ -508,10 +506,10 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) } - return ObjectInfo{ + return minio.ObjectInfo{ Bucket: bucket, Name: object, - ETag: toS3ETag(fi.ID), + ETag: minio.ToS3ETag(fi.ID), Size: fi.Size, ModTime: fi.Timestamp, ContentType: fi.ContentType, @@ -521,8 +519,8 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met // CopyObject copies a blob from source container to destination container. func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, - dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { - return objInfo, errors.Trace(NotImplemented{}) + dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + return objInfo, errors.Trace(minio.NotImplemented{}) } // DeleteObject deletes a blob in bucket @@ -543,7 +541,7 @@ func (l *b2Objects) DeleteObject(bucket string, object string) error { // ListMultipartUploads lists all multipart uploads. func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, - delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { + delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { // keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any // of these parameters only equivalent parameter is uploadIDMarker. bkt, err := l.Bucket(bucket) @@ -559,7 +557,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker if err != nil { return lmi, b2ToObjectError(errors.Trace(err), bucket) } - lmi = ListMultipartsInfo{ + lmi = minio.ListMultipartsInfo{ MaxUploads: maxUploads, } if nextMarker != "" { @@ -567,7 +565,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker lmi.NextUploadIDMarker = nextMarker } for _, largeFile := range largeFiles { - lmi.Uploads = append(lmi.Uploads, MultipartInfo{ + lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{ Object: largeFile.Name, UploadID: largeFile.ID, Initiated: largeFile.Timestamp, @@ -599,7 +597,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma } // PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. -func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi PartInfo, err error) { +func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi minio.PartInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return pi, err @@ -616,21 +614,21 @@ func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) } - return PartInfo{ + return minio.PartInfo{ PartNumber: partID, - LastModified: UTCNow(), - ETag: toS3ETag(sha1), + LastModified: minio.UTCNow(), + ETag: minio.ToS3ETag(sha1), Size: data.Size(), }, nil } // ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API. -func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { +func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return lpi, err } - lpi = ListPartsInfo{ + lpi = minio.ListPartsInfo{ Bucket: bucket, Object: object, UploadID: uploadID, @@ -648,9 +646,9 @@ func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID strin lpi.NextPartNumberMarker = next } for _, part := range partsList { - lpi.Parts = append(lpi.Parts, PartInfo{ + lpi.Parts = append(lpi.Parts, minio.PartInfo{ PartNumber: part.Number, - ETag: toS3ETag(part.SHA1), + ETag: minio.ToS3ETag(part.SHA1), Size: part.Size, }) } @@ -668,7 +666,7 @@ func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID } // CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. -func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { +func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) { bkt, err := l.Bucket(bucket) if err != nil { return oi, err @@ -678,7 +676,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload // B2 requires contigous part numbers starting with 1, they do not support // hand picking part numbers, we return an S3 compatible error instead. if i+1 != uploadedPart.PartNumber { - return oi, b2ToObjectError(errors.Trace(InvalidPart{}), bucket, object, uploadID) + return oi, b2ToObjectError(errors.Trace(minio.InvalidPart{}), bucket, object, uploadID) } // Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag. @@ -697,23 +695,23 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload // bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them. // Default is AllPrivate for all buckets. func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []BucketAccessPolicy + var policies []minio.BucketAccessPolicy for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { - policies = append(policies, BucketAccessPolicy{ + policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, }) } prefix := bucket + "/*" // For all objects inside the bucket. if len(policies) != 1 { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if policies[0].Prefix != prefix { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if policies[0].Policy != policy.BucketPolicyReadOnly { - return errors.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } bkt, err := l.Bucket(bucket) if err != nil { @@ -739,7 +737,7 @@ func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, // bkt.Type can also be snapshot, but it is only allowed through B2 browser console, // just return back as policy not found for all cases. // CreateBucket always sets the value to allPrivate by default. - return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket}) + return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket}) } // DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'. diff --git a/cmd/gateway-b2_test.go b/cmd/gateway/b2/gateway-b2_test.go similarity index 91% rename from cmd/gateway-b2_test.go rename to cmd/gateway/b2/gateway-b2_test.go index 2a50aaba4..b9de7cccc 100644 --- a/cmd/gateway-b2_test.go +++ b/cmd/gateway/b2/gateway-b2_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package b2 import ( "fmt" @@ -23,6 +23,8 @@ import ( b2 "github.com/minio/blazer/base" "github.com/minio/minio/pkg/errors" + + minio "github.com/minio/minio/cmd" ) // Tests headerToObjectInfo @@ -30,7 +32,7 @@ func TestHeaderToObjectInfo(t *testing.T) { testCases := []struct { bucket, object string header http.Header - objInfo ObjectInfo + objInfo minio.ObjectInfo }{ { bucket: "bucket", @@ -42,7 +44,7 @@ func TestHeaderToObjectInfo(t *testing.T) { "X-Bz-Info-X-Amz-Meta-1": []string{"test1"}, "X-Bz-File-Id": []string{"xxxxx"}, }, - objInfo: ObjectInfo{ + objInfo: minio.ObjectInfo{ Bucket: "bucket", Name: "object", ContentType: "application/javascript", @@ -127,19 +129,23 @@ func TestB2ObjectError(t *testing.T) { []string{"bucket"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "duplicate_bucket_name", - }), BucketAlreadyOwnedByYou{Bucket: "bucket"}, + }), minio.BucketAlreadyOwnedByYou{ + Bucket: "bucket", + }, }, { []string{"bucket"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "bad_request", - }), BucketNotFound{Bucket: "bucket"}, + }), minio.BucketNotFound{ + Bucket: "bucket", + }, }, { []string{"bucket", "object"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "bad_request", - }), ObjectNameInvalid{ + }), minio.ObjectNameInvalid{ Bucket: "bucket", Object: "object", }, @@ -148,13 +154,13 @@ func TestB2ObjectError(t *testing.T) { []string{"bucket"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "bad_bucket_id", - }), BucketNotFound{Bucket: "bucket"}, + }), minio.BucketNotFound{Bucket: "bucket"}, }, { []string{"bucket", "object"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "file_not_present", - }), ObjectNotFound{ + }), minio.ObjectNotFound{ Bucket: "bucket", Object: "object", }, @@ -163,7 +169,7 @@ func TestB2ObjectError(t *testing.T) { []string{"bucket", "object"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "not_found", - }), ObjectNotFound{ + }), minio.ObjectNotFound{ Bucket: "bucket", Object: "object", }, @@ -172,13 +178,15 @@ func TestB2ObjectError(t *testing.T) { []string{"bucket"}, errors.Trace(b2.Error{ StatusCode: 1, Code: "cannot_delete_non_empty_bucket", - }), BucketNotEmpty{Bucket: "bucket"}, + }), minio.BucketNotEmpty{ + Bucket: "bucket", + }, }, { []string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{ StatusCode: 1, Message: "No active upload for", - }), InvalidUploadID{ + }), minio.InvalidUploadID{ UploadID: "uploadID", }, }, diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go new file mode 100644 index 000000000..a50ae1c1c --- /dev/null +++ b/cmd/gateway/gateway.go @@ -0,0 +1,26 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package gateway + +import ( + // Import all gateways. + _ "github.com/minio/minio/cmd/gateway/azure" + _ "github.com/minio/minio/cmd/gateway/b2" + _ "github.com/minio/minio/cmd/gateway/gcs" + _ "github.com/minio/minio/cmd/gateway/s3" + _ "github.com/minio/minio/cmd/gateway/sia" +) diff --git a/cmd/gateway-gcs-anonymous.go b/cmd/gateway/gcs/gateway-gcs-anonymous.go similarity index 78% rename from cmd/gateway-gcs-anonymous.go rename to cmd/gateway/gcs/gateway-gcs-anonymous.go index 723253315..c89cdc61f 100644 --- a/cmd/gateway-gcs-anonymous.go +++ b/cmd/gateway/gcs/gateway-gcs-anonymous.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package gcs import ( "fmt" @@ -24,6 +24,8 @@ import ( "time" "github.com/minio/minio/pkg/errors" + + minio "github.com/minio/minio/cmd" ) func toGCSPublicURL(bucket, object string) string { @@ -50,7 +52,7 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int defer resp.Body.Close() if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { - return gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + return gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) } _, err = io.Copy(writer, resp.Body) @@ -58,7 +60,7 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int } // AnonGetObjectInfo - Get object info anonymously -func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { +func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) { resp, err := http.Head(toGCSPublicURL(bucket, object)) if err != nil { return objInfo, gcsToObjectError(errors.Trace(err), bucket, object) @@ -66,7 +68,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return objInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + return objInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) } var contentLength int64 @@ -74,7 +76,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob if contentLengthStr != "" { contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) if err != nil { - return objInfo, gcsToObjectError(errors.Trace(errUnexpected), bucket, object) + return objInfo, gcsToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) } } @@ -98,28 +100,28 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob } // AnonListObjects - List objects anonymously -func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) { result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { - return ListObjectsInfo{}, s3ToObjectError(errors.Trace(err), bucket) + return minio.ListObjectsInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } - return fromMinioClientListBucketResult(bucket, result), nil + return minio.FromMinioClientListBucketResult(bucket, result), nil } // AnonListObjectsV2 - List objects in V2 mode, anonymously -func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { +func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) { // Request V1 List Object to the backend result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys) if err != nil { - return ListObjectsV2Info{}, s3ToObjectError(errors.Trace(err), bucket) + return minio.ListObjectsV2Info{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } // translate V1 Result to V2Info - return fromMinioClientListBucketResultToV2Info(bucket, result), nil + return minio.FromMinioClientListBucketResultToV2Info(bucket, result), nil } // AnonGetBucketInfo - Get bucket metadata anonymously. -func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { +func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) { resp, err := http.Head(toGCSPublicURL(bucket, "")) if err != nil { return bucketInfo, gcsToObjectError(errors.Trace(err)) @@ -128,7 +130,7 @@ func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, er defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return bucketInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) + return bucketInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket) } t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) @@ -137,7 +139,7 @@ func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, er } // Last-Modified date being returned by GCS - return BucketInfo{ + return minio.BucketInfo{ Name: bucket, Created: t, }, nil diff --git a/cmd/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go similarity index 77% rename from cmd/gateway-gcs.go rename to cmd/gateway/gcs/gateway-gcs.go index d34f3cb90..81db62320 100644 --- a/cmd/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -14,13 +14,12 @@ * limitations under the License. */ -package cmd +package gcs import ( "context" "encoding/base64" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -31,30 +30,37 @@ import ( "time" "cloud.google.com/go/storage" + humanize "github.com/dustin/go-humanize" "github.com/minio/cli" - minio "github.com/minio/minio-go" "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/hash" + "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" - errors2 "github.com/minio/minio/pkg/errors" + miniogo "github.com/minio/minio-go" + minio "github.com/minio/minio/cmd" ) var ( // Project ID format is not valid. - errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") + errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid") // Project ID not found - errGCSProjectIDNotFound = errors.New("unknown project id") + errGCSProjectIDNotFound = fmt.Errorf("Unknown project id") + + // Invalid format. + errGCSFormat = fmt.Errorf("Unknown format") ) const ( // Path where multipart objects are saved. // If we change the backend format we will use a different url path like /multipart/v2 // but we will not migrate old data. - gcsMinioMultipartPathV1 = globalMinioSysTmp + "multipart/v1" + gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1" // Multipart meta file. gcsMinioMultipartMeta = "gcs.json" @@ -116,12 +122,11 @@ EXAMPLES: ` - MustRegisterGatewayCommand(cli.Command{ + minio.RegisterGatewayCommand(cli.Command{ Name: gcsBackend, Usage: "Google Cloud Storage.", Action: gcsGatewayMain, CustomHelpTemplate: gcsGatewayTemplate, - Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, }) } @@ -130,31 +135,71 @@ EXAMPLES: func gcsGatewayMain(ctx *cli.Context) { projectID := ctx.Args().First() if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { - errorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json") + minio.ErrorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json") cli.ShowCommandHelpAndExit(ctx, "gcs", 1) } if projectID != "" && !isValidGCSProjectIDFormat(projectID) { - errorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First()) + minio.ErrorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First()) cli.ShowCommandHelpAndExit(ctx, "gcs", 1) } - startGateway(ctx, &GCSGateway{projectID}) + minio.StartGateway(ctx, &GCS{projectID}) } -// GCSGateway implements Gateway. -type GCSGateway struct { +// GCS implements Azure. +type GCS struct { projectID string } // Name returns the name of gcs gatewaylayer. -func (g *GCSGateway) Name() string { +func (g *GCS) Name() string { return gcsBackend } // NewGatewayLayer returns gcs gatewaylayer. -func (g *GCSGateway) NewGatewayLayer() (GatewayLayer, error) { - log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) - return newGCSGatewayLayer(g.projectID) +func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { + ctx := context.Background() + + var err error + if g.projectID == "" { + // If project ID is not provided on command line, we figure it out + // from the credentials.json file. + g.projectID, err = gcsParseProjectID(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) + if err != nil { + return nil, err + } + } + + // Initialize a GCS client. + // Send user-agent in this format for Google to obtain usage insights while participating in the + // Google Cloud Technology Partners (https://cloud.google.com/partners/) + client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("Minio/%s (GPN:Minio;)", minio.Version))) + if err != nil { + return nil, err + } + + // Initialize a anonymous client with minio core APIs. + anonClient, err := miniogo.NewCore(googleStorageEndpoint, "", "", true) + if err != nil { + return nil, err + } + anonClient.SetCustomTransport(minio.NewCustomHTTPTransport()) + + gcs := &gcsGateway{ + client: client, + projectID: g.projectID, + ctx: ctx, + anonClient: anonClient, + } + + // Start background process to cleanup old files in minio.sys.tmp + go gcs.CleanupGCSMinioSysTmp() + return gcs, nil +} + +// Production - FIXME: GCS is not production ready yet. +func (g *GCS) Production() bool { + return false } // Stored in gcs.json - Contents of this file is not used anywhere. It can be @@ -181,11 +226,11 @@ func gcsToObjectError(err error, params ...string) error { return nil } - e, ok := err.(*errors2.Error) + e, ok := err.(*errors.Error) if !ok { - // Code should be fixed if this function is called without doing errors2.Trace() + // Code should be fixed if this function is called without doing errors.Trace() // Else handling different situations in this function makes this function complicated. - errorIf(err, "Expected type *Error") + minio.ErrorIf(err, "Expected type *Error") return err } @@ -207,18 +252,18 @@ func gcsToObjectError(err error, params ...string) error { // in some cases just a plain error is being returned switch err.Error() { case "storage: bucket doesn't exist": - err = BucketNotFound{ + err = minio.BucketNotFound{ Bucket: bucket, } e.Cause = err return e case "storage: object doesn't exist": if uploadID != "" { - err = InvalidUploadID{ + err = minio.InvalidUploadID{ UploadID: uploadID, } } else { - err = ObjectNotFound{ + err = minio.ObjectNotFound{ Bucket: bucket, Object: object, } @@ -250,33 +295,33 @@ func gcsToObjectError(err error, params ...string) error { case "keyInvalid": fallthrough case "forbidden": - err = PrefixAccessDenied{ + err = minio.PrefixAccessDenied{ Bucket: bucket, Object: object, } case "invalid": - err = BucketNameInvalid{ + err = minio.BucketNameInvalid{ Bucket: bucket, } case "notFound": if object != "" { - err = ObjectNotFound{ + err = minio.ObjectNotFound{ Bucket: bucket, Object: object, } break } - err = BucketNotFound{Bucket: bucket} + err = minio.BucketNotFound{Bucket: bucket} case "conflict": if message == "You already own this bucket. Please select another name." { - err = BucketAlreadyOwnedByYou{Bucket: bucket} + err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} break } if message == "Sorry, that name is not available. Please try a different one." { - err = BucketAlreadyExists{Bucket: bucket} + err = minio.BucketAlreadyExists{Bucket: bucket} break } - err = BucketNotEmpty{Bucket: bucket} + err = minio.BucketNotEmpty{Bucket: bucket} default: err = fmt.Errorf("Unsupported error reason: %s", reason) } @@ -299,9 +344,9 @@ func isValidGCSProjectIDFormat(projectID string) bool { // gcsGateway - Implements gateway for Minio and GCS compatible object storage servers. type gcsGateway struct { - gatewayUnsupported + minio.GatewayUnsupported client *storage.Client - anonClient *minio.Core + anonClient *miniogo.Core projectID string ctx context.Context } @@ -321,54 +366,14 @@ func gcsParseProjectID(credsFile string) (projectID string, err error) { return googleCreds[gcsProjectIDKey], err } -// newGCSGatewayLayer returns gcs gatewaylayer -func newGCSGatewayLayer(projectID string) (GatewayLayer, error) { - ctx := context.Background() - - var err error - if projectID == "" { - // If project ID is not provided on command line, we figure it out - // from the credentials.json file. - projectID, err = gcsParseProjectID(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) - if err != nil { - return nil, err - } - } - - // Initialize a GCS client. - // Send user-agent in this format for Google to obtain usage insights while participating in the - // Google Cloud Technology Partners (https://cloud.google.com/partners/) - client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("Minio/%s (GPN:Minio;)", Version))) - if err != nil { - return nil, err - } - - // Initialize a anonymous client with minio core APIs. - anonClient, err := minio.NewCore(googleStorageEndpoint, "", "", true) - if err != nil { - return nil, err - } - anonClient.SetCustomTransport(newCustomHTTPTransport()) - - gateway := &gcsGateway{ - client: client, - projectID: projectID, - ctx: ctx, - anonClient: anonClient, - } - // Start background process to cleanup old files in minio.sys.tmp - go gateway.CleanupGCSMinioSysTmp() - return gateway, nil -} - // Cleanup old files in minio.sys.tmp of the given bucket. func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) { - it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: globalMinioSysTmp, Versions: false}) + it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false}) for { attrs, err := it.Next() if err != nil { if err != iterator.Done { - errorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket) + minio.ErrorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket) } return } @@ -376,7 +381,7 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) { // Delete files older than 2 weeks. err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx) if err != nil { - errorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name) + minio.ErrorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name) return } } @@ -391,7 +396,7 @@ func (l *gcsGateway) CleanupGCSMinioSysTmp() { attrs, err := it.Next() if err != nil { if err != iterator.Done { - errorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp") + minio.ErrorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp") } break } @@ -409,8 +414,8 @@ func (l *gcsGateway) Shutdown() error { } // StorageInfo - Not relevant to GCS backend. -func (l *gcsGateway) StorageInfo() StorageInfo { - return StorageInfo{} +func (l *gcsGateway) StorageInfo() minio.StorageInfo { + return minio.StorageInfo{} } // MakeBucketWithLocation - Create a new container on GCS backend. @@ -426,24 +431,24 @@ func (l *gcsGateway) MakeBucketWithLocation(bucket, location string) error { Location: location, }) - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } // GetBucketInfo - Get bucket metadata.. -func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) { +func (l *gcsGateway) GetBucketInfo(bucket string) (minio.BucketInfo, error) { attrs, err := l.client.Bucket(bucket).Attrs(l.ctx) if err != nil { - return BucketInfo{}, gcsToObjectError(errors2.Trace(err), bucket) + return minio.BucketInfo{}, gcsToObjectError(errors.Trace(err), bucket) } - return BucketInfo{ + return minio.BucketInfo{ Name: attrs.Name, Created: attrs.Created, }, nil } // ListBuckets lists all buckets under your project-id on GCS. -func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) { +func (l *gcsGateway) ListBuckets() (buckets []minio.BucketInfo, err error) { it := l.client.Buckets(l.ctx, l.projectID) // Iterate and capture all the buckets. @@ -454,10 +459,10 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) { } if ierr != nil { - return buckets, gcsToObjectError(errors2.Trace(ierr)) + return buckets, gcsToObjectError(errors.Trace(ierr)) } - buckets = append(buckets, BucketInfo{ + buckets = append(buckets, minio.BucketInfo{ Name: attrs.Name, Created: attrs.Created, }) @@ -468,7 +473,10 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) { // DeleteBucket delete a bucket on GCS. func (l *gcsGateway) DeleteBucket(bucket string) error { - itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Delimiter: slashSeparator, Versions: false}) + itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ + Delimiter: "/", + Versions: false, + }) // We list the bucket and if we find any objects we return BucketNotEmpty error. If we // find only "minio.sys.tmp/" then we remove it before deleting the bucket. gcsMinioPathFound := false @@ -479,9 +487,9 @@ func (l *gcsGateway) DeleteBucket(bucket string) error { break } if err != nil { - return gcsToObjectError(errors2.Trace(err)) + return gcsToObjectError(errors.Trace(err)) } - if objAttrs.Prefix == globalMinioSysTmp { + if objAttrs.Prefix == minio.GatewayMinioSysTmp { gcsMinioPathFound = true continue } @@ -489,27 +497,27 @@ func (l *gcsGateway) DeleteBucket(bucket string) error { break } if nonGCSMinioPathFound { - return gcsToObjectError(errors2.Trace(BucketNotEmpty{})) + return gcsToObjectError(errors.Trace(minio.BucketNotEmpty{})) } if gcsMinioPathFound { // Remove minio.sys.tmp before deleting the bucket. - itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: globalMinioSysTmp}) + itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp}) for { objAttrs, err := itObject.Next() if err == iterator.Done { break } if err != nil { - return gcsToObjectError(errors2.Trace(err)) + return gcsToObjectError(errors.Trace(err)) } err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx) if err != nil { - return gcsToObjectError(errors2.Trace(err)) + return gcsToObjectError(errors.Trace(err)) } } } err := l.client.Bucket(bucket).Delete(l.ctx) - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } func toGCSPageToken(name string) string { @@ -531,13 +539,13 @@ func toGCSPageToken(name string) string { } // Returns true if marker was returned by GCS, i.e prefixed with -// ##minio by minio gcs gateway. +// ##minio by minio gcs minio. func isGCSMarker(marker string) bool { return strings.HasPrefix(marker, gcsTokenPrefix) } // ListObjects - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) { it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ Delimiter: delimiter, Prefix: prefix, @@ -570,7 +578,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de it.PageInfo().MaxSize = maxKeys - objects := []ObjectInfo{} + objects := []minio.ObjectInfo{} for { if len(objects) >= maxKeys { // check if there is one next object and @@ -578,7 +586,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de // metadata folder, then just break // otherwise we've truncated the output attrs, _ := it.Next() - if attrs != nil && attrs.Prefix == globalMinioSysTmp { + if attrs != nil && attrs.Prefix == minio.GatewayMinioSysTmp { break } @@ -591,21 +599,21 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de break } if err != nil { - return ListObjectsInfo{}, gcsToObjectError(errors2.Trace(err), bucket, prefix) + return minio.ListObjectsInfo{}, gcsToObjectError(errors.Trace(err), bucket, prefix) } nextMarker = toGCSPageToken(attrs.Name) - if attrs.Prefix == globalMinioSysTmp { + if attrs.Prefix == minio.GatewayMinioSysTmp { // We don't return our metadata prefix. continue } - if !strings.HasPrefix(prefix, globalMinioSysTmp) { + if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, globalMinioSysTmp) || - strings.HasPrefix(attrs.Name, globalMinioSysTmp) { + if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || + strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { continue } } @@ -619,19 +627,19 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de continue } - objects = append(objects, ObjectInfo{ + objects = append(objects, minio.ObjectInfo{ Name: attrs.Name, Bucket: attrs.Bucket, ModTime: attrs.Updated, Size: attrs.Size, - ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), + ETag: minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), UserDefined: attrs.Metadata, ContentType: attrs.ContentType, ContentEncoding: attrs.ContentEncoding, }) } - return ListObjectsInfo{ + return minio.ListObjectsInfo{ IsTruncated: isTruncated, NextMarker: gcsTokenPrefix + nextMarker, Prefixes: prefixes, @@ -640,8 +648,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de } // ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { - +func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) { it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ Delimiter: delimiter, Prefix: prefix, @@ -664,8 +671,8 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter } } - prefixes := []string{} - objects := []ObjectInfo{} + var prefixes []string + var objects []minio.ObjectInfo for { attrs, err := it.Next() @@ -674,19 +681,19 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter } if err != nil { - return ListObjectsV2Info{}, gcsToObjectError(errors2.Trace(err), bucket, prefix) + return minio.ListObjectsV2Info{}, gcsToObjectError(errors.Trace(err), bucket, prefix) } - if attrs.Prefix == globalMinioSysTmp { + if attrs.Prefix == minio.GatewayMinioSysTmp { // We don't return our metadata prefix. continue } - if !strings.HasPrefix(prefix, globalMinioSysTmp) { + if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, globalMinioSysTmp) || - strings.HasPrefix(attrs.Name, globalMinioSysTmp) { + if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || + strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { continue } } @@ -699,7 +706,7 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) } - return ListObjectsV2Info{ + return minio.ListObjectsV2Info{ IsTruncated: isTruncated, ContinuationToken: continuationToken, NextContinuationToken: continuationToken, @@ -718,55 +725,33 @@ func (l *gcsGateway) GetObject(bucket string, key string, startOffset int64, len // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } object := l.client.Bucket(bucket).Object(key) r, err := object.NewRangeReader(l.ctx, startOffset, length) if err != nil { - return gcsToObjectError(errors2.Trace(err), bucket, key) + return gcsToObjectError(errors.Trace(err), bucket, key) } defer r.Close() if _, err := io.Copy(writer, r); err != nil { - return gcsToObjectError(errors2.Trace(err), bucket, key) + return gcsToObjectError(errors.Trace(err), bucket, key) } return nil } -// fromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info -func fromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = fromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsV2Info{ - IsTruncated: result.IsTruncated, - Prefixes: prefixes, - Objects: objects, - ContinuationToken: result.Marker, - NextContinuationToken: result.NextMarker, - } -} - // fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo -func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo { +func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo { // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag - return ObjectInfo{ + return minio.ObjectInfo{ Name: attrs.Name, Bucket: attrs.Bucket, ModTime: attrs.Updated, Size: attrs.Size, - ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), + ETag: minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), UserDefined: attrs.Metadata, ContentType: attrs.ContentType, ContentEncoding: attrs.ContentEncoding, @@ -774,27 +759,27 @@ func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo { } // GetObjectInfo - reads object info and replies back ObjectInfo -func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, error) { +func (l *gcsGateway) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) { // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket) } attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, object) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, object) } return fromGCSAttrsToObjectInfo(attrs), nil } // PutObject - Create a new object with the incoming data, -func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error) { +func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, metadata map[string]string) (minio.ObjectInfo, error) { // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // otherwise gcs will just return object not exist in case of non-existing bucket if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket) } object := l.client.Bucket(bucket).Object(key) @@ -808,7 +793,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met if _, err := io.Copy(w, data); err != nil { // Close the object writer upon error. w.CloseWithError(err) - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } // Close the object writer upon success. @@ -816,7 +801,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met attrs, err := object.Attrs(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } return fromGCSAttrsToObjectInfo(attrs), nil @@ -824,7 +809,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met // CopyObject - Copies a blob from source container to destination container. func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, - metadata map[string]string) (ObjectInfo, error) { + metadata map[string]string) (minio.ObjectInfo, error) { src := l.client.Bucket(srcBucket).Object(srcObject) dst := l.client.Bucket(destBucket).Object(destObject) @@ -834,7 +819,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s attrs, err := copier.Run(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), destBucket, destObject) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), destBucket, destObject) } return fromGCSAttrsToObjectInfo(attrs), nil @@ -844,7 +829,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s func (l *gcsGateway) DeleteObject(bucket string, object string) error { err := l.client.Bucket(bucket).Object(object).Delete(l.ctx) if err != nil { - return gcsToObjectError(errors2.Trace(err), bucket, object) + return gcsToObjectError(errors.Trace(err), bucket, object) } return nil @@ -853,7 +838,7 @@ func (l *gcsGateway) DeleteObject(bucket string, object string) error { // NewMultipartUpload - upload object in multiple parts func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[string]string) (uploadID string, err error) { // generate new uploadid - uploadID = mustGetUUID() + uploadID = minio.MustGetUUID() // generate name for part zero meta := gcsMultipartMetaName(uploadID) @@ -870,14 +855,14 @@ func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[ bucket, key, }); err != nil { - return "", gcsToObjectError(errors2.Trace(err), bucket, key) + return "", gcsToObjectError(errors.Trace(err), bucket, key) } return uploadID, nil } // ListMultipartUploads - lists all multipart uploads. -func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { - return ListMultipartsInfo{ +func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) { + return minio.ListMultipartsInfo{ KeyMarker: keyMarker, UploadIDMarker: uploadIDMarker, MaxUploads: maxUploads, @@ -890,18 +875,18 @@ func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarke // an object layer compatible error upon any error. func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error { _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx) - return gcsToObjectError(errors2.Trace(err), bucket, key, uploadID) + return gcsToObjectError(errors.Trace(err), bucket, key, uploadID) } // PutObjectPart puts a part of object in bucket -func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (PartInfo, error) { +func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (minio.PartInfo, error) { if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil { - return PartInfo{}, err + return minio.PartInfo{}, err } etag := data.MD5HexString() if etag == "" { // Generate random ETag. - etag = genETag() + etag = minio.GenETag() } object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) w := object.NewWriter(l.ctx) @@ -911,22 +896,22 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p if _, err := io.Copy(w, data); err != nil { // Make sure to close object writer upon error. w.Close() - return PartInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.PartInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } // Make sure to close the object writer upon success. w.Close() - return PartInfo{ + return minio.PartInfo{ PartNumber: partNumber, ETag: etag, - LastModified: UTCNow(), + LastModified: minio.UTCNow(), Size: data.Size(), }, nil } // ListObjectParts returns all object parts for specified object in specified bucket -func (l *gcsGateway) ListObjectParts(bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { - return ListPartsInfo{}, l.checkUploadIDExists(bucket, key, uploadID) +func (l *gcsGateway) ListObjectParts(bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (minio.ListPartsInfo, error) { + return minio.ListPartsInfo{}, l.checkUploadIDExists(bucket, key, uploadID) } // Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. @@ -942,7 +927,7 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error break } if err != nil { - return gcsToObjectError(errors2.Trace(err), bucket, key) + return gcsToObjectError(errors.Trace(err), bucket, key) } object := l.client.Bucket(bucket).Object(attrs.Name) @@ -969,34 +954,34 @@ func (l *gcsGateway) AbortMultipartUpload(bucket string, key string, uploadID st // to the number of components you can compose per second. This rate counts both the // components being appended to a composite object as well as the components being // copied when the composite object of which they are a part is copied. -func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID string, uploadedParts []CompletePart) (ObjectInfo, error) { +func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID string, uploadedParts []minio.CompletePart) (minio.ObjectInfo, error) { meta := gcsMultipartMetaName(uploadID) object := l.client.Bucket(bucket).Object(meta) partZeroAttrs, err := object.Attrs(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key, uploadID) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key, uploadID) } r, err := object.NewReader(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } defer r.Close() // Check version compatibility of the meta file before compose() multipartMeta := gcsMultipartMetaV1{} if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(errFormatNotSupported), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(errGCSFormat), bucket, key) } // Validate if the gcs.json stores valid entries for the bucket and key. if multipartMeta.Bucket != bucket || multipartMeta.Object != key { - return ObjectInfo{}, gcsToObjectError(InvalidUploadID{ + return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{ UploadID: uploadID, }, bucket, key) } @@ -1008,15 +993,15 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID uploadedPart.PartNumber, uploadedPart.ETag))) partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx) if pErr != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(pErr), bucket, key, uploadID) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(pErr), bucket, key, uploadID) } partSizes[i] = partAttr.Size } // Error out if parts except last part sizing < 5MiB. for i, size := range partSizes[:len(partSizes)-1] { - if size < globalMinPartSize { - return ObjectInfo{}, errors2.Trace(PartTooSmall{ + if size < 5*humanize.MiByte { + return minio.ObjectInfo{}, errors.Trace(minio.PartTooSmall{ PartNumber: uploadedParts[i].PartNumber, PartSize: size, PartETag: uploadedParts[i].ETag, @@ -1026,7 +1011,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID // Returns name of the composed object. gcsMultipartComposeName := func(uploadID string, composeNumber int) string { - return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", globalMinioSysTmp, uploadID, composeNumber) + return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber) } composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) @@ -1047,7 +1032,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID composer.Metadata = partZeroAttrs.Metadata if _, err = composer.Run(l.ctx); err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } } @@ -1060,20 +1045,20 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID composer.Metadata = partZeroAttrs.Metadata attrs, err := composer.Run(l.ctx) if err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil { - return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) + return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) } return fromGCSAttrsToObjectInfo(attrs), nil } // SetBucketPolicies - Set policy on bucket func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []BucketAccessPolicy + var policies []minio.BucketAccessPolicy for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { - policies = append(policies, BucketAccessPolicy{ + policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, }) @@ -1082,16 +1067,16 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc prefix := bucket + "/*" // For all objects inside the bucket. if len(policies) != 1 { - return errors2.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if policies[0].Prefix != prefix { - return errors2.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } acl := l.client.Bucket(bucket).ACL() if policies[0].Policy == policy.BucketPolicyNone { if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } return nil } @@ -1103,11 +1088,11 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc case policy.BucketPolicyWriteOnly: role = storage.RoleWriter default: - return errors2.Trace(NotImplemented{}) + return errors.Trace(minio.NotImplemented{}) } if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil { - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } return nil @@ -1117,7 +1102,7 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) if err != nil { - return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(err), bucket) + return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(err), bucket) } policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} for _, r := range rules { @@ -1133,7 +1118,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy } // Return NoSuchBucketPolicy error, when policy is not set if len(policyInfo.Statements) == 0 { - return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(PolicyNotFound{}), bucket) + return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket) } return policyInfo, nil } @@ -1142,7 +1127,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy func (l *gcsGateway) DeleteBucketPolicies(bucket string) error { // This only removes the storage.AllUsers policies if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil { - return gcsToObjectError(errors2.Trace(err), bucket) + return gcsToObjectError(errors.Trace(err), bucket) } return nil diff --git a/cmd/gateway-gcs_test.go b/cmd/gateway/gcs/gateway-gcs_test.go similarity index 90% rename from cmd/gateway-gcs_test.go rename to cmd/gateway/gcs/gateway-gcs_test.go index f31994d94..4af836c49 100644 --- a/cmd/gateway-gcs_test.go +++ b/cmd/gateway/gcs/gateway-gcs_test.go @@ -14,18 +14,21 @@ * limitations under the License. */ -package cmd +package gcs import ( "fmt" "io/ioutil" "os" + "path" "reflect" "testing" - "github.com/minio/minio-go" "github.com/minio/minio/pkg/errors" "google.golang.org/api/googleapi" + + miniogo "github.com/minio/minio-go" + minio "github.com/minio/minio/cmd" ) func TestToGCSPageToken(t *testing.T) { @@ -140,7 +143,7 @@ func TestIsGCSMarker(t *testing.T) { // Test for gcsMultipartMetaName. func TestGCSMultipartMetaName(t *testing.T) { uploadID := "a" - expected := pathJoin(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) + expected := path.Join(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) got := gcsMultipartMetaName(uploadID) if expected != got { t.Errorf("expected: %s, got: %s", expected, got) @@ -154,7 +157,7 @@ func TestGCSMultipartDataName(t *testing.T) { etag = "b" partNumber = 1 ) - expected := pathJoin(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) + expected := path.Join(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) got := gcsMultipartDataName(uploadID, partNumber, etag) if expected != got { t.Errorf("expected: %s, got: %s", expected, got) @@ -163,23 +166,23 @@ func TestGCSMultipartDataName(t *testing.T) { func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { - listBucketResult := minio.ListBucketResult{ + listBucketResult := miniogo.ListBucketResult{ IsTruncated: false, Marker: "testMarker", NextMarker: "testMarker2", - CommonPrefixes: []minio.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, - Contents: []minio.ObjectInfo{{Key: "testobj", ContentType: ""}}, + CommonPrefixes: []miniogo.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, + Contents: []miniogo.ObjectInfo{{Key: "testobj", ContentType: ""}}, } - listBucketV2Info := ListObjectsV2Info{ + listBucketV2Info := minio.ListObjectsV2Info{ Prefixes: []string{"one", "two"}, - Objects: []ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, + Objects: []minio.ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, IsTruncated: false, ContinuationToken: "testMarker", NextContinuationToken: "testMarker2", } - if got := fromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { + if got := minio.FromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info) } } @@ -242,14 +245,14 @@ func TestGCSToObjectError(t *testing.T) { { []string{"bucket"}, errors.Trace(fmt.Errorf("storage: bucket doesn't exist")), - BucketNotFound{ + minio.BucketNotFound{ Bucket: "bucket", }, }, { []string{"bucket", "object"}, errors.Trace(fmt.Errorf("storage: object doesn't exist")), - ObjectNotFound{ + minio.ObjectNotFound{ Bucket: "bucket", Object: "object", }, @@ -257,7 +260,7 @@ func TestGCSToObjectError(t *testing.T) { { []string{"bucket", "object", "uploadID"}, errors.Trace(fmt.Errorf("storage: object doesn't exist")), - InvalidUploadID{ + minio.InvalidUploadID{ UploadID: "uploadID", }, }, @@ -283,7 +286,9 @@ func TestGCSToObjectError(t *testing.T) { Message: "You already own this bucket. Please select another name.", }}, }), - BucketAlreadyOwnedByYou{Bucket: "bucket"}, + minio.BucketAlreadyOwnedByYou{ + Bucket: "bucket", + }, }, { []string{"bucket", "object"}, @@ -293,7 +298,9 @@ func TestGCSToObjectError(t *testing.T) { Message: "Sorry, that name is not available. Please try a different one.", }}, }), - BucketAlreadyExists{Bucket: "bucket"}, + minio.BucketAlreadyExists{ + Bucket: "bucket", + }, }, { []string{"bucket", "object"}, @@ -302,7 +309,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "conflict", }}, }), - BucketNotEmpty{Bucket: "bucket"}, + minio.BucketNotEmpty{Bucket: "bucket"}, }, { []string{"bucket"}, @@ -311,7 +318,9 @@ func TestGCSToObjectError(t *testing.T) { Reason: "notFound", }}, }), - BucketNotFound{Bucket: "bucket"}, + minio.BucketNotFound{ + Bucket: "bucket", + }, }, { []string{"bucket", "object"}, @@ -320,7 +329,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "notFound", }}, }), - ObjectNotFound{ + minio.ObjectNotFound{ Bucket: "bucket", Object: "object", }, @@ -332,7 +341,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "invalid", }}, }), - BucketNameInvalid{ + minio.BucketNameInvalid{ Bucket: "bucket", }, }, @@ -343,7 +352,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "forbidden", }}, }), - PrefixAccessDenied{ + minio.PrefixAccessDenied{ Bucket: "bucket", Object: "object", }, @@ -355,7 +364,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "keyInvalid", }}, }), - PrefixAccessDenied{ + minio.PrefixAccessDenied{ Bucket: "bucket", Object: "object", }, @@ -367,7 +376,7 @@ func TestGCSToObjectError(t *testing.T) { Reason: "required", }}, }), - PrefixAccessDenied{ + minio.PrefixAccessDenied{ Bucket: "bucket", Object: "object", }, diff --git a/cmd/gateway-s3-anonymous.go b/cmd/gateway/s3/gateway-s3-anonymous.go similarity index 58% rename from cmd/gateway-s3-anonymous.go rename to cmd/gateway/s3/gateway-s3-anonymous.go index 3c4f36081..4ac61f4db 100644 --- a/cmd/gateway-s3-anonymous.go +++ b/cmd/gateway/s3/gateway-s3-anonymous.go @@ -14,87 +14,89 @@ * limitations under the License. */ -package cmd +package s3 import ( "io" - minio "github.com/minio/minio-go" + miniogo "github.com/minio/minio-go" "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio/cmd" ) // AnonPutObject creates a new object anonymously with the incoming data, -func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) { - oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata)) +func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, e error) { + oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata)) if err != nil { - return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) + return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) } - return fromMinioClientObjectInfo(bucket, oi), nil + return minio.FromMinioClientObjectInfo(bucket, oi), nil } // AnonGetObject - Get object anonymously func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { - opts := minio.GetObjectOptions{} + opts := miniogo.GetObjectOptions{} if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) } object, _, err := l.anonClient.GetObject(bucket, key, opts) if err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) } defer object.Close() if _, err := io.CopyN(writer, object, length); err != nil { - return s3ToObjectError(errors.Trace(err), bucket, key) + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) } return nil } // AnonGetObjectInfo - Get object info anonymously -func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) { - oi, err := l.anonClient.StatObject(bucket, object, minio.StatObjectOptions{}) +func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, e error) { + oi, err := l.anonClient.StatObject(bucket, object, miniogo.StatObjectOptions{}) if err != nil { - return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) + return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) } - return fromMinioClientObjectInfo(bucket, oi), nil + return minio.FromMinioClientObjectInfo(bucket, oi), nil } // AnonListObjects - List objects anonymously -func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { +func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { - return loi, s3ToObjectError(errors.Trace(err), bucket) + return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } - return fromMinioClientListBucketResult(bucket, result), nil + return minio.FromMinioClientListBucketResult(bucket, result), nil } // AnonListObjectsV2 - List objects in V2 mode, anonymously -func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { +func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) if err != nil { - return loi, s3ToObjectError(errors.Trace(err), bucket) + return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } - return fromMinioClientListBucketV2Result(bucket, result), nil + return minio.FromMinioClientListBucketV2Result(bucket, result), nil } // AnonGetBucketInfo - Get bucket metadata anonymously. -func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) { +func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, e error) { if exists, err := l.anonClient.BucketExists(bucket); err != nil { - return bi, s3ToObjectError(errors.Trace(err), bucket) + return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } else if !exists { - return bi, errors.Trace(BucketNotFound{Bucket: bucket}) + return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) } buckets, err := l.anonClient.ListBuckets() if err != nil { - return bi, s3ToObjectError(errors.Trace(err), bucket) + return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) } for _, bi := range buckets { @@ -102,11 +104,11 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) { continue } - return BucketInfo{ + return minio.BucketInfo{ Name: bi.Name, Created: bi.CreationDate, }, nil } - return bi, errors.Trace(BucketNotFound{Bucket: bucket}) + return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) } diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go new file mode 100644 index 000000000..5524676d5 --- /dev/null +++ b/cmd/gateway/s3/gateway-s3.go @@ -0,0 +1,416 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3 + +import ( + "io" + + "github.com/minio/cli" + miniogo "github.com/minio/minio-go" + "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio/cmd" +) + +const ( + s3Backend = "s3" +) + +func init() { + const s3GatewayTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +ENDPOINT: + S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com + +ENVIRONMENT VARIABLES: + ACCESS: + MINIO_ACCESS_KEY: Username or access key of S3 storage. + MINIO_SECRET_KEY: Password or secret key of S3 storage. + + BROWSER: + MINIO_BROWSER: To disable web browser access, set this value to "off". + +EXAMPLES: + 1. Start minio gateway server for AWS S3 backend. + $ export MINIO_ACCESS_KEY=accesskey + $ export MINIO_SECRET_KEY=secretkey + $ {{.HelpName}} + + 2. Start minio gateway server for S3 backend on custom endpoint. + $ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F + $ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG + $ {{.HelpName}} https://play.minio.io:9000 +` + + minio.RegisterGatewayCommand(cli.Command{ + Name: s3Backend, + Usage: "Amazon Simple Storage Service (S3).", + Action: s3GatewayMain, + CustomHelpTemplate: s3GatewayTemplate, + HideHelpCommand: true, + }) +} + +// Handler for 'minio gateway s3' command line. +func s3GatewayMain(ctx *cli.Context) { + // Validate gateway arguments. + host := ctx.Args().First() + // Validate gateway arguments. + minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") + + minio.StartGateway(ctx, &S3{host}) +} + +// S3 implements Gateway. +type S3 struct { + host string +} + +// Name implements Gateway interface. +func (g *S3) Name() string { + return s3Backend +} + +// NewGatewayLayer returns s3 gatewaylayer. +func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { + var err error + var endpoint string + var secure = true + + // Validate host parameters. + if g.host != "" { + // Override default params if the host is provided + endpoint, secure, err = minio.ParseGatewayEndpoint(g.host) + if err != nil { + return nil, err + } + } + + // Default endpoint parameters + if endpoint == "" { + endpoint = "s3.amazonaws.com" + } + + // Initialize minio client object. + client, err := miniogo.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure) + if err != nil { + return nil, err + } + + anonClient, err := miniogo.NewCore(endpoint, "", "", secure) + if err != nil { + return nil, err + } + anonClient.SetCustomTransport(minio.NewCustomHTTPTransport()) + + return &s3Objects{ + Client: client, + anonClient: anonClient, + }, nil +} + +// Production - s3 gateway is not production ready. +func (g *S3) Production() bool { + return false +} + +// s3Objects implements gateway for Minio and S3 compatible object storage servers. +type s3Objects struct { + minio.GatewayUnsupported + Client *miniogo.Core + anonClient *miniogo.Core +} + +// Shutdown saves any gateway metadata to disk +// if necessary and reload upon next restart. +func (l *s3Objects) Shutdown() error { + return nil +} + +// StorageInfo is not relevant to S3 backend. +func (l *s3Objects) StorageInfo() (si minio.StorageInfo) { + return si +} + +// MakeBucket creates a new container on S3 backend. +func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error { + err := l.Client.MakeBucket(bucket, location) + if err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket) + } + return err +} + +// GetBucketInfo gets bucket metadata.. +func (l *s3Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) { + // Verify if bucket name is valid. + // We are using a separate helper function here to validate bucket + // names instead of IsValidBucketName() because there is a possibility + // that certains users might have buckets which are non-DNS compliant + // in us-east-1 and we might severely restrict them by not allowing + // access to these buckets. + // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html + if s3utils.CheckValidBucketName(bucket) != nil { + return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) + } + + buckets, err := l.Client.ListBuckets() + if err != nil { + return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) + } + + for _, bi := range buckets { + if bi.Name != bucket { + continue + } + + return minio.BucketInfo{ + Name: bi.Name, + Created: bi.CreationDate, + }, nil + } + + return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) +} + +// ListBuckets lists all S3 buckets +func (l *s3Objects) ListBuckets() ([]minio.BucketInfo, error) { + buckets, err := l.Client.ListBuckets() + if err != nil { + return nil, minio.ErrorRespToObjectError(errors.Trace(err)) + } + + b := make([]minio.BucketInfo, len(buckets)) + for i, bi := range buckets { + b[i] = minio.BucketInfo{ + Name: bi.Name, + Created: bi.CreationDate, + } + } + + return b, err +} + +// DeleteBucket deletes a bucket on S3 +func (l *s3Objects) DeleteBucket(bucket string) error { + err := l.Client.RemoveBucket(bucket) + if err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket) + } + return nil +} + +// ListObjects lists all blobs in S3 bucket filtered by prefix +func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { + result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) + if err != nil { + return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) + } + + return minio.FromMinioClientListBucketResult(bucket, result), nil +} + +// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix +func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { + result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) + if err != nil { + return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) + } + + return minio.FromMinioClientListBucketV2Result(bucket, result), nil +} + +// GetObject reads an object from S3. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { + if length < 0 && length != -1 { + return minio.ErrorRespToObjectError(errors.Trace(minio.InvalidRange{}), bucket, key) + } + + opts := miniogo.GetObjectOptions{} + if startOffset >= 0 && length >= 0 { + if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) + } + } + object, _, err := l.Client.GetObject(bucket, key, opts) + if err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) + } + defer object.Close() + + if _, err := io.Copy(writer, object); err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) + } + return nil +} + +// GetObjectInfo reads object info and replies back ObjectInfo +func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) { + oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{}) + if err != nil { + return minio.ObjectInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + + return minio.FromMinioClientObjectInfo(bucket, oi), nil +} + +// PutObject creates a new object with the incoming data, +func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata)) + if err != nil { + return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + + return minio.FromMinioClientObjectInfo(bucket, oi), nil +} + +// CopyObject copies an object from source bucket to a destination bucket. +func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + // Set this header such that following CopyObject() always sets the right metadata on the destination. + // metadata input is already a trickled down value from interpreting x-amz-metadata-directive at + // handler layer. So what we have right now is supposed to be applied on the destination object anyways. + // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. + metadata["x-amz-metadata-directive"] = "REPLACE" + if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil { + return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject) + } + return l.GetObjectInfo(dstBucket, dstObject) +} + +// DeleteObject deletes a blob in bucket +func (l *s3Objects) DeleteObject(bucket string, object string) error { + err := l.Client.RemoveObject(bucket, object) + if err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + + return nil +} + +// ListMultipartUploads lists all multipart uploads. +func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { + result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) + if err != nil { + return lmi, err + } + + return minio.FromMinioClientListMultipartsInfo(result), nil +} + +// NewMultipartUpload upload object in multiple parts +func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { + // Create PutObject options + opts := miniogo.PutObjectOptions{UserMetadata: metadata} + uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) + if err != nil { + return uploadID, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + return uploadID, nil +} + +// PutObjectPart puts a part of object in bucket +func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) { + info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString()) + if err != nil { + return pi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + + return minio.FromMinioClientObjectPart(info), nil +} + +// CopyObjectPart creates a part in a multipart upload by copying +// existing object or a part of it. +func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string) (p minio.PartInfo, err error) { + + completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject, + uploadID, partID, startOffset, length, metadata) + if err != nil { + return p, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject) + } + p.PartNumber = completePart.PartNumber + p.ETag = completePart.ETag + return p, nil +} + +// ListObjectParts returns all object parts for specified object in specified bucket +func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, e error) { + result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) + if err != nil { + return lpi, err + } + + return minio.FromMinioClientListPartsInfo(result), nil +} + +// AbortMultipartUpload aborts a ongoing multipart upload +func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { + err := l.Client.AbortMultipartUpload(bucket, object, uploadID) + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) +} + +// CompleteMultipartUpload completes ongoing multipart upload and finalizes object +func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) { + err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts)) + if err != nil { + return oi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) + } + + return l.GetObjectInfo(bucket, object) +} + +// SetBucketPolicies sets policy on bucket +func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { + if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") + } + + return nil +} + +// GetBucketPolicies will get policy on bucket +func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { + policyInfo, err := l.Client.GetBucketPolicy(bucket) + if err != nil { + return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") + } + return policyInfo, nil +} + +// DeleteBucketPolicies deletes all policies on bucket +func (l *s3Objects) DeleteBucketPolicies(bucket string) error { + if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil { + return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") + } + return nil +} diff --git a/cmd/gateway-s3_test.go b/cmd/gateway/s3/gateway-s3_test.go similarity index 60% rename from cmd/gateway-s3_test.go rename to cmd/gateway/s3/gateway-s3_test.go index e132d13cc..45ad0d6d5 100644 --- a/cmd/gateway-s3_test.go +++ b/cmd/gateway/s3/gateway-s3_test.go @@ -14,19 +14,21 @@ * limitations under the License. */ -package cmd +package s3 import ( - "errors" + "fmt" "testing" - minio "github.com/minio/minio-go" - errors2 "github.com/minio/minio/pkg/errors" + miniogo "github.com/minio/minio-go" + "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/hash" + + minio "github.com/minio/minio/cmd" ) -func errResponse(code string) minio.ErrorResponse { - return minio.ErrorResponse{ +func errResponse(code string) miniogo.ErrorResponse { + return miniogo.ErrorResponse{ Code: code, } } @@ -39,41 +41,41 @@ func TestS3ToObjectError(t *testing.T) { }{ { inputErr: errResponse("BucketAlreadyOwnedByYou"), - expectedErr: BucketAlreadyOwnedByYou{}, + expectedErr: minio.BucketAlreadyOwnedByYou{}, }, { inputErr: errResponse("BucketNotEmpty"), - expectedErr: BucketNotEmpty{}, + expectedErr: minio.BucketNotEmpty{}, }, { inputErr: errResponse("InvalidBucketName"), - expectedErr: BucketNameInvalid{}, + expectedErr: minio.BucketNameInvalid{}, }, { inputErr: errResponse("NoSuchBucketPolicy"), - expectedErr: PolicyNotFound{}, + expectedErr: minio.PolicyNotFound{}, }, { inputErr: errResponse("NoSuchBucket"), - expectedErr: BucketNotFound{}, + expectedErr: minio.BucketNotFound{}, }, - // with empty Object in minio.ErrorRepsonse, NoSuchKey + // with empty Object in miniogo.ErrorRepsonse, NoSuchKey // is interpreted as BucketNotFound { inputErr: errResponse("NoSuchKey"), - expectedErr: BucketNotFound{}, + expectedErr: minio.BucketNotFound{}, }, { inputErr: errResponse("NoSuchUpload"), - expectedErr: InvalidUploadID{}, + expectedErr: minio.InvalidUploadID{}, }, { inputErr: errResponse("XMinioInvalidObjectName"), - expectedErr: ObjectNameInvalid{}, + expectedErr: minio.ObjectNameInvalid{}, }, { inputErr: errResponse("AccessDenied"), - expectedErr: PrefixAccessDenied{}, + expectedErr: minio.PrefixAccessDenied{}, }, { inputErr: errResponse("XAmzContentSHA256Mismatch"), @@ -81,7 +83,7 @@ func TestS3ToObjectError(t *testing.T) { }, { inputErr: errResponse("EntityTooSmall"), - expectedErr: PartTooSmall{}, + expectedErr: minio.PartTooSmall{}, }, { inputErr: nil, @@ -89,34 +91,37 @@ func TestS3ToObjectError(t *testing.T) { }, // Special test case for NoSuchKey with object name { - inputErr: minio.ErrorResponse{ + inputErr: miniogo.ErrorResponse{ Code: "NoSuchKey", }, - expectedErr: ObjectNotFound{}, - bucket: "bucket", - object: "obbject", + expectedErr: minio.ObjectNotFound{ + Bucket: "bucket", + Object: "object", + }, + bucket: "bucket", + object: "object", }, // N B error values that aren't of expected types // should be left untouched. // Special test case for error that is not of type - // minio.ErrorResponse + // miniogo.ErrorResponse { - inputErr: errors.New("not a minio.ErrorResponse"), - expectedErr: errors.New("not a minio.ErrorResponse"), + inputErr: fmt.Errorf("not a minio.ErrorResponse"), + expectedErr: fmt.Errorf("not a minio.ErrorResponse"), }, // Special test case for error value that is not of // type (*Error) { - inputErr: errors.New("not a *Error"), - expectedErr: errors.New("not a *Error"), + inputErr: fmt.Errorf("not a *Error"), + expectedErr: fmt.Errorf("not a *Error"), }, } for i, tc := range testCases { - actualErr := s3ToObjectError(tc.inputErr, tc.bucket, tc.object) - if e, ok := actualErr.(*errors2.Error); ok && e.Cause != tc.expectedErr { - t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.Cause) + actualErr := minio.ErrorRespToObjectError(errors.Trace(tc.inputErr), tc.bucket, tc.object) + if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() { + t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e) } } } diff --git a/cmd/gateway-sia.go b/cmd/gateway/sia/gateway-sia.go similarity index 75% rename from cmd/gateway-sia.go rename to cmd/gateway/sia/gateway-sia.go index dd4ca25e1..961ec976f 100644 --- a/cmd/gateway-sia.go +++ b/cmd/gateway/sia/gateway-sia.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package sia import ( "bytes" @@ -22,15 +22,21 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/url" "os" + "path" "path/filepath" "strings" "time" + humanize "github.com/dustin/go-humanize" + "github.com/fatih/color" "github.com/minio/cli" "github.com/minio/minio-go/pkg/set" + minio "github.com/minio/minio/cmd" + "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/hash" "github.com/minio/sha256-simd" @@ -41,7 +47,7 @@ const ( ) type siaObjects struct { - gatewayUnsupported + minio.GatewayUnsupported Address string // Address and port of Sia Daemon. TempDir string // Temporary storage location for file transfers. RootDir string // Root directory to store files on Sia. @@ -72,12 +78,11 @@ EXAMPLES: ` - MustRegisterGatewayCommand(cli.Command{ + minio.RegisterGatewayCommand(cli.Command{ Name: siaBackend, Usage: "Sia Decentralized Cloud.", Action: siaGatewayMain, CustomHelpTemplate: siaGatewayTemplate, - Flags: append(serverFlags, globalFlags...), HideHelpCommand: true, }) } @@ -87,26 +92,67 @@ func siaGatewayMain(ctx *cli.Context) { // Validate gateway arguments. host := ctx.Args().First() // Validate gateway arguments. - fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") + minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") - startGateway(ctx, &SiaGateway{host}) + minio.StartGateway(ctx, &Sia{host}) } -// SiaGateway implements Gateway. -type SiaGateway struct { +// Sia implements Gateway. +type Sia struct { host string // Sia daemon host address } // Name implements Gateway interface. -func (g *SiaGateway) Name() string { +func (g *Sia) Name() string { return siaBackend } -// NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to -// talk to B2 remote backend. -func (g *SiaGateway) NewGatewayLayer() (GatewayLayer, error) { - log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) - return newSiaGatewayLayer(g.host) +// NewGatewayLayer returns Sia gateway layer, implements GatewayLayer interface to +// talk to Sia backend. +func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { + sia := &siaObjects{ + Address: g.host, + // RootDir uses access key directly, provides partitioning for + // concurrent users talking to same sia daemon. + RootDir: creds.AccessKey, + TempDir: os.Getenv("SIA_TEMP_DIR"), + password: os.Getenv("SIA_API_PASSWORD"), + } + + // If Address not provided on command line or ENV, default to: + if sia.Address == "" { + sia.Address = "127.0.0.1:9980" + } + + // If local Sia temp directory not specified, default to: + if sia.TempDir == "" { + sia.TempDir = ".sia_temp" + } + + var err error + sia.TempDir, err = filepath.Abs(sia.TempDir) + if err != nil { + return nil, err + } + + // Create the temp directory with proper permissions. + // Ignore error when dir already exists. + if err = os.MkdirAll(sia.TempDir, 0700); err != nil { + return nil, err + } + + colorBlue := color.New(color.FgBlue).SprintfFunc() + colorBold := color.New(color.Bold).SprintFunc() + + log.Println(colorBlue("\nSia Gateway Configuration:")) + log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address))) + log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir))) + return sia, nil +} + +// Production - sia gateway is not ready for production use. +func (g *Sia) Production() bool { + return false } // non2xx returns true for non-success HTTP status codes. @@ -139,12 +185,12 @@ func decodeError(resp *http.Response) error { return apiErr } -// SiaMethodNotSupported - returned if call returned error. -type SiaMethodNotSupported struct { +// MethodNotSupported - returned if call returned error. +type MethodNotSupported struct { method string } -func (s SiaMethodNotSupported) Error() string { +func (s MethodNotSupported) Error() string { return fmt.Sprintf("API call not recognized: %s", s.method) } @@ -166,7 +212,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) { } if resp.StatusCode == http.StatusNotFound { resp.Body.Close() - return nil, SiaMethodNotSupported{call} + return nil, MethodNotSupported{call} } if non2xx(resp.StatusCode) { err := decodeError(resp) @@ -191,12 +237,12 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) { } resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, err + return nil, errors.Trace(err) } if resp.StatusCode == http.StatusNotFound { resp.Body.Close() - return nil, SiaMethodNotSupported{call} + return nil, MethodNotSupported{call} } if non2xx(resp.StatusCode) { @@ -244,45 +290,6 @@ func get(addr, call, apiPassword string) error { return nil } -// newSiaGatewayLayer returns Sia gatewaylayer -func newSiaGatewayLayer(host string) (GatewayLayer, error) { - sia := &siaObjects{ - Address: host, - // RootDir uses access key directly, provides partitioning for - // concurrent users talking to same sia daemon. - RootDir: os.Getenv("MINIO_ACCESS_KEY"), - TempDir: os.Getenv("SIA_TEMP_DIR"), - password: os.Getenv("SIA_API_PASSWORD"), - } - - // If Address not provided on command line or ENV, default to: - if sia.Address == "" { - sia.Address = "127.0.0.1:9980" - } - - // If local Sia temp directory not specified, default to: - if sia.TempDir == "" { - sia.TempDir = ".sia_temp" - } - - var err error - sia.TempDir, err = filepath.Abs(sia.TempDir) - if err != nil { - return nil, err - } - - // Create the temp directory with proper permissions. - // Ignore error when dir already exists. - if err = os.MkdirAll(sia.TempDir, 0700); err != nil { - return nil, err - } - - log.Println(colorBlue("\nSia Gateway Configuration:")) - log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address))) - log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir))) - return sia, nil -} - // Shutdown saves any gateway metadata to disk // if necessary and reload upon next restart. func (s *siaObjects) Shutdown() error { @@ -290,40 +297,44 @@ func (s *siaObjects) Shutdown() error { } // StorageInfo is not relevant to Sia backend. -func (s *siaObjects) StorageInfo() (si StorageInfo) { +func (s *siaObjects) StorageInfo() (si minio.StorageInfo) { return si } // MakeBucket creates a new container on Sia backend. func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error { - srcFile := pathJoin(s.TempDir, mustGetUUID()) - defer fsRemoveFile(srcFile) + srcFile := path.Join(s.TempDir, minio.MustGetUUID()) + defer os.Remove(srcFile) - if _, err := fsCreateFile(srcFile, bytes.NewReader([]byte("")), nil, 0); err != nil { + writer, err := os.Create(srcFile) + if err != nil { + return err + } + if _, err = io.Copy(writer, bytes.NewReader([]byte(""))); err != nil { return err } sha256sum := sha256.Sum256([]byte(bucket)) - var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) + var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password) } // GetBucketInfo gets bucket metadata. -func (s *siaObjects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { +func (s *siaObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) { sha256sum := sha256.Sum256([]byte(bucket)) - var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) + var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) - dstFile := pathJoin(s.TempDir, mustGetUUID()) - defer fsRemoveFile(dstFile) + dstFile := path.Join(s.TempDir, minio.MustGetUUID()) + defer os.Remove(dstFile) if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { return bi, err } - return BucketInfo{Name: bucket}, nil + return minio.BucketInfo{Name: bucket}, nil } // ListBuckets will detect and return existing buckets on Sia. -func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) { +func (s *siaObjects) ListBuckets() (buckets []minio.BucketInfo, err error) { sObjs, serr := s.listRenterFiles("") if serr != nil { return buckets, serr @@ -343,7 +354,7 @@ func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) { } for _, bktName := range m.ToSlice() { - buckets = append(buckets, BucketInfo{ + buckets = append(buckets, minio.BucketInfo{ Name: bktName, Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), }) @@ -355,12 +366,12 @@ func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) { // DeleteBucket deletes a bucket on Sia. func (s *siaObjects) DeleteBucket(bucket string) error { sha256sum := sha256.Sum256([]byte(bucket)) - var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) + var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) return post(s.Address, "/renter/delete/"+siaObj, "", s.password) } -func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { +func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { siaObjs, siaErr := s.listRenterFiles(bucket) if siaErr != nil { return loi, siaErr @@ -376,13 +387,13 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de // based filtering. Once list renter files API supports paginated output we can support // paginated results here as well - until then Listing is an expensive operation. for _, sObj := range siaObjs { - name := strings.TrimPrefix(sObj.SiaPath, pathJoin(root, bucket, "/")) + name := strings.TrimPrefix(sObj.SiaPath, path.Join(root, bucket)+"/") // Skip the file created specially when bucket was created. if name == hex.EncodeToString(sha256sum[:]) { continue } if strings.HasPrefix(name, prefix) { - loi.Objects = append(loi.Objects, ObjectInfo{ + loi.Objects = append(loi.Objects, minio.ObjectInfo{ Bucket: bucket, Name: name, Size: int64(sObj.Filesize), @@ -394,33 +405,45 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de } func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { - dstFile := pathJoin(s.TempDir, mustGetUUID()) - defer fsRemoveFile(dstFile) + dstFile := path.Join(s.TempDir, minio.MustGetUUID()) + defer os.Remove(dstFile) - var siaObj = pathJoin(s.RootDir, bucket, object) + var siaObj = path.Join(s.RootDir, bucket, object) if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { return err } - reader, size, err := fsOpenFile(dstFile, startOffset) + reader, err := os.Open(dstFile) if err != nil { - return toObjectErr(err, bucket, object) + return err } defer reader.Close() + st, err := reader.Stat() + if err != nil { + return err + } + size := st.Size() + if _, err = reader.Seek(startOffset, os.SEEK_SET); err != nil { + return err + } // For negative length we read everything. if length < 0 { length = size - startOffset } - bufSize := int64(readSizeV1) + bufSize := int64(1 * humanize.MiByte) if bufSize > length { bufSize = length } // Reply back invalid range if the input offset and length fall out of range. if startOffset > size || startOffset+length > size { - return errors.Trace(InvalidRange{startOffset, length, size}) + return errors.Trace(minio.InvalidRange{ + OffsetBegin: startOffset, + OffsetEnd: length, + ResourceSize: size, + }) } // Allocate a staging buffer. @@ -433,7 +456,9 @@ func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, // findSiaObject retrieves the siaObjectInfo for the Sia object with the given // Sia path name. -func (s *siaObjects) findSiaObject(siaPath string) (siaObjectInfo, error) { +func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error) { + siaPath := path.Join(s.RootDir, bucket, object) + sObjs, err := s.listRenterFiles("") if err != nil { return siaObjectInfo{}, err @@ -445,64 +470,62 @@ func (s *siaObjects) findSiaObject(siaPath string) (siaObjectInfo, error) { } } - return siaObjectInfo{}, errors.Trace(ObjectNotFound{"", siaPath}) + return siaObjectInfo{}, errors.Trace(minio.ObjectNotFound{ + Bucket: bucket, + Object: object, + }) } // GetObjectInfo reads object info and replies back ObjectInfo -func (s *siaObjects) GetObjectInfo(bucket string, object string) (ObjectInfo, error) { - siaPath := pathJoin(s.RootDir, bucket, object) - so, err := s.findSiaObject(siaPath) +func (s *siaObjects) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) { + so, err := s.findSiaObject(bucket, object) if err != nil { - return ObjectInfo{}, err + return minio.ObjectInfo{}, err } - // Metadata about sia objects is just quite minimal. Sia only provides - // file size. - return ObjectInfo{ - Bucket: bucket, - Name: object, - Size: int64(so.Filesize), - IsDir: false, + // Metadata about sia objects is just quite minimal. Sia only provides file size. + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), + Size: int64(so.Filesize), + IsDir: false, }, nil } // PutObject creates a new object with the incoming data, -func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { - bufSize := int64(readSizeV1) - size := data.Size() - if size > 0 && bufSize > size { - bufSize = size - } - buf := make([]byte, int(bufSize)) - - srcFile := pathJoin(s.TempDir, mustGetUUID()) - - if _, err = fsCreateFile(srcFile, data, buf, data.Size()); err != nil { +func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + srcFile := path.Join(s.TempDir, minio.MustGetUUID()) + writer, err := os.Create(srcFile) + if err != nil { return objInfo, err } - var siaPath = pathJoin(s.RootDir, bucket, object) - if err = post(s.Address, "/renter/upload/"+siaPath, "source="+srcFile, s.password); err != nil { - fsRemoveFile(srcFile) + wsize, err := io.CopyN(writer, data, data.Size()) + if err != nil { + os.Remove(srcFile) return objInfo, err } - defer s.deleteTempFileWhenUploadCompletes(srcFile, siaPath) - objInfo = ObjectInfo{ + if err = post(s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil { + os.Remove(srcFile) + return objInfo, err + } + defer s.deleteTempFileWhenUploadCompletes(srcFile, bucket, object) + + return minio.ObjectInfo{ Name: object, Bucket: bucket, ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), - Size: size, - ETag: genETag(), - } - - return objInfo, nil + Size: wsize, + ETag: minio.GenETag(), + }, nil } // DeleteObject deletes a blob in bucket func (s *siaObjects) DeleteObject(bucket string, object string) error { // Tell Sia daemon to delete the object - var siaObj = pathJoin(s.RootDir, bucket, object) + var siaObj = path.Join(s.RootDir, bucket, object) return post(s.Address, "/renter/delete/"+siaObj, "", s.password) } @@ -549,22 +572,23 @@ func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, er // deleteTempFileWhenUploadCompletes checks the status of a Sia file upload // until it reaches 100% upload progress, then deletes the local temp copy from // the filesystem. -func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, siaPath string) { +func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, bucket, object string) { var soi siaObjectInfo // Wait until 100% upload instead of 1x redundancy because if we delete // after 1x redundancy, the user has to pay the cost of other hosts // redistributing the file. for soi.UploadProgress < 100.0 { var err error - soi, err = s.findSiaObject(siaPath) + soi, err = s.findSiaObject(bucket, object) if err != nil { - errorIf(err, "Unable to find file uploaded to Sia path %s", siaPath) + minio.ErrorIf(err, "Unable to find file uploaded to Sia path %s/%s", bucket, object) break } - // Sleep between each check so that we're not hammering the Sia - // daemon with requests. + // Sleep between each check so that we're not hammering + // the Sia daemon with requests. time.Sleep(15 * time.Second) } - fsRemoveFile(tempFile) + + os.Remove(tempFile) } diff --git a/cmd/gateway-sia_test.go b/cmd/gateway/sia/gateway-sia_test.go similarity index 98% rename from cmd/gateway-sia_test.go rename to cmd/gateway/sia/gateway-sia_test.go index 600e24588..4942efb0d 100644 --- a/cmd/gateway-sia_test.go +++ b/cmd/gateway/sia/gateway-sia_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package cmd +package sia import ( "testing" diff --git a/cmd/globals.go b/cmd/globals.go index f0cd3c65f..439c8e251 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -53,9 +53,6 @@ const ( globalMinioModeDistXL = "mode-server-distributed-xl" globalMinioModeGatewayPrefix = "mode-gateway-" - // globalMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API. - globalMinioSysTmp = "minio.sys.tmp/" - // Add new global values here. ) diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index b2a9fe02c..be083059c 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -27,10 +27,6 @@ var errUnexpected = errors.New("Unexpected error, please report this issue at ht // errCorruptedFormat - corrupted backend format. var errCorruptedFormat = errors.New("corrupted backend format, please join https://slack.minio.io for assistance") -// errFormatNotSupported - returned when older minio tries to parse metadata -// created by newer minio. -var errFormatNotSupported = errors.New("format not supported") - // errUnformattedDisk - unformatted disk found. var errUnformattedDisk = errors.New("unformatted disk found") diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index a9944645a..4e844516a 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -2394,13 +2394,4 @@ func TestToErrIsNil(t *testing.T) { if toAPIErrorCode(nil) != ErrNone { t.Errorf("Test expected error code to be ErrNone, failed instead provided %d", toAPIErrorCode(nil)) } - if s3ToObjectError(nil) != nil { - t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", s3ToObjectError(nil)) - } - if azureToObjectError(nil) != nil { - t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", azureToObjectError(nil)) - } - if gcsToObjectError(nil) != nil { - t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", gcsToObjectError(nil)) - } } diff --git a/cmd/utils.go b/cmd/utils.go index 282adb0d8..c4d656f01 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -18,12 +18,14 @@ package cmd import ( "bytes" + "crypto/tls" "encoding/base64" "encoding/json" "encoding/xml" "errors" "fmt" "io" + "net" "net/http" "net/url" "os" @@ -212,13 +214,13 @@ func UTCNow() time.Time { return time.Now().UTC() } -// genETag - generate UUID based ETag -func genETag() string { - return toS3ETag(getMD5Hash([]byte(mustGetUUID()))) +// GenETag - generate UUID based ETag +func GenETag() string { + return ToS3ETag(getMD5Hash([]byte(mustGetUUID()))) } -// toS3ETag - return checksum to ETag -func toS3ETag(etag string) string { +// ToS3ETag - return checksum to ETag +func ToS3ETag(etag string) string { etag = canonicalizeETag(etag) if !strings.HasSuffix(etag, "-1") { @@ -229,3 +231,23 @@ func toS3ETag(etag string) string { return etag } + +// NewCustomHTTPTransport returns a new http configuration +// used while communicating with the cloud backends. +// This sets the value for MaxIdleConns from 2 (go default) to +// 100. +func NewCustomHTTPTransport() http.RoundTripper { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{RootCAs: globalRootCAs}, + DisableCompression: true, + } +} diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 4ddf4d4f1..bcd7c86d6 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -294,7 +294,7 @@ func TestDumpRequest(t *testing.T) { } } -// Test toS3ETag() +// Test ToS3ETag() func TestToS3ETag(t *testing.T) { testCases := []struct { etag string @@ -306,7 +306,7 @@ func TestToS3ETag(t *testing.T) { {"5d57546eeb86b3eba68967292fba0644-1", "5d57546eeb86b3eba68967292fba0644-1"}, } for i, testCase := range testCases { - etag := toS3ETag(testCase.etag) + etag := ToS3ETag(testCase.etag) if etag != testCase.expectedETag { t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedETag, etag) } diff --git a/main.go b/main.go index 7ad6076a4..3503e60da 100644 --- a/main.go +++ b/main.go @@ -30,6 +30,9 @@ import ( version "github.com/hashicorp/go-version" "github.com/minio/mc/pkg/console" minio "github.com/minio/minio/cmd" + + // Import gateway + _ "github.com/minio/minio/cmd/gateway" ) const (