Convert gateways into respective packages (#5200)

- Make azure gateway a package
- Make b2 gateway a package
- Make gcs gateway a package
- Make s3 gateway a package
- Make sia gateway a package
This commit is contained in:
Harshavardhana 2017-12-05 17:58:09 -08:00 committed by Dee Koder
parent 52e382b697
commit eb2894233c
31 changed files with 1586 additions and 1505 deletions

View file

@ -58,15 +58,13 @@ spelling:
check: test check: test
test: verifiers build test: verifiers build
@echo "Running unit tests" @echo "Running unit tests"
@go test $(GOFLAGS) . @go test $(GOFLAGS) ./...
@go test $(GOFLAGS) github.com/minio/minio/cmd...
@go test $(GOFLAGS) github.com/minio/minio/pkg...
@echo "Verifying build" @echo "Verifying build"
@(env bash $(PWD)/buildscripts/verify-build.sh) @(env bash $(PWD)/buildscripts/verify-build.sh)
coverage: build coverage: build
@echo "Running all coverage for minio" @echo "Running all coverage for minio"
@./buildscripts/go-coverage.sh @(env bash $(PWD)/buildscripts/go-coverage.sh)
# Builds minio locally. # Builds minio locally.
build: build:

View file

@ -38,21 +38,18 @@ test_script:
# Unit tests # Unit tests
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running - ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- mkdir build\coverage - mkdir build\coverage
- go test -v -timeout 17m -race github.com/minio/minio/cmd... - for /f "" %%G in ('go list github.com/minio/minio/... ^| find /i /v "browser/"') do ( go test -v -timeout 20m -race %%G )
- go test -v -race github.com/minio/minio/pkg... - go test -v -timeout 20m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
# FIXME(aead): enable codecov after issue https://github.com/golang/go/issues/18468 is solved.
# - go test -v -timeout 17m -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
after_test: after_test:
# FIXME(aead): enable codecov after issue https://github.com/golang/go/issues/18468 is solved. - go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html
# - go tool cover -html=build\coverage\coverage.txt -o build\coverage\coverage.html - ps: Push-AppveyorArtifact build\coverage\coverage.txt
# - ps: Push-AppveyorArtifact build\coverage\coverage.txt - ps: Push-AppveyorArtifact build\coverage\coverage.html
# - ps: Push-AppveyorArtifact build\coverage\coverage.html
# Upload coverage report. # Upload coverage report.
# - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%" - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
# - pip install codecov - pip install codecov
# - codecov -X gcov -f "build\coverage\coverage.txt" - codecov -X gcov -f "build\coverage\coverage.txt"
# to disable deployment # to disable deployment
deploy: off deploy: off

View file

@ -3,8 +3,8 @@
set -e set -e
echo "" > coverage.txt echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do for d in $(go list ./... | grep -v browser); do
go test -coverprofile=profile.out -covermode=atomic $d go test -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then if [ -f profile.out ]; then
cat profile.out >> coverage.txt cat profile.out >> coverage.txt
rm profile.out rm profile.out

View file

@ -1,74 +0,0 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"crypto/tls"
"net"
"net/http"
"time"
)
func anonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
case http.StatusForbidden:
fallthrough
case http.StatusUnauthorized:
return AllAccessDisabled{bucket, object}
}
return errUnexpected
}
// newCustomHTTPTransport returns a new http configuration
// used while communicating with the cloud backends.
// This sets the value for MaxIdleConns from 2 (go default) to
// 100.
func newCustomHTTPTransport() http.RoundTripper {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
DisableCompression: true,
}
}

325
cmd/gateway-common.go Normal file
View file

@ -0,0 +1,325 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio-go"
)
var (
// CanonicalizeETag provides canonicalizeETag function alias.
CanonicalizeETag = canonicalizeETag
// MustGetUUID function alias.
MustGetUUID = mustGetUUID
// ErrorIf provides errorIf function alias.
ErrorIf = errorIf
// FatalIf provides fatalIf function alias.
FatalIf = fatalIf
)
// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors.
func AnonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
case http.StatusForbidden:
fallthrough
case http.StatusUnauthorized:
return AllAccessDisabled{bucket, object}
}
return errUnexpected
}
// FromMinioClientMetadata converts minio metadata to map[string]string
func FromMinioClientMetadata(metadata map[string][]string) map[string]string {
mm := map[string]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v[0]
}
return mm
}
// FromMinioClientObjectPart converts minio ObjectPart to PartInfo
func FromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
return PartInfo{
Size: op.Size,
ETag: canonicalizeETag(op.ETag),
LastModified: op.LastModified,
PartNumber: op.PartNumber,
}
}
// FromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo
func FromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo {
// Convert minio ObjectPart to PartInfo
fromMinioClientObjectParts := func(parts []minio.ObjectPart) []PartInfo {
toParts := make([]PartInfo, len(parts))
for i, part := range parts {
toParts[i] = FromMinioClientObjectPart(part)
}
return toParts
}
return ListPartsInfo{
UploadID: lopr.UploadID,
Bucket: lopr.Bucket,
Object: lopr.Key,
StorageClass: "",
PartNumberMarker: lopr.PartNumberMarker,
NextPartNumberMarker: lopr.NextPartNumberMarker,
MaxParts: lopr.MaxParts,
IsTruncated: lopr.IsTruncated,
EncodingType: lopr.EncodingType,
Parts: fromMinioClientObjectParts(lopr.ObjectParts),
}
}
// FromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo
func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo {
uploads := make([]MultipartInfo, len(lmur.Uploads))
for i, um := range lmur.Uploads {
uploads[i] = MultipartInfo{
Object: um.Key,
UploadID: um.UploadID,
Initiated: um.Initiated,
}
}
commonPrefixes := make([]string, len(lmur.CommonPrefixes))
for i, cp := range lmur.CommonPrefixes {
commonPrefixes[i] = cp.Prefix
}
return ListMultipartsInfo{
KeyMarker: lmur.KeyMarker,
UploadIDMarker: lmur.UploadIDMarker,
NextKeyMarker: lmur.NextKeyMarker,
NextUploadIDMarker: lmur.NextUploadIDMarker,
MaxUploads: int(lmur.MaxUploads),
IsTruncated: lmur.IsTruncated,
Uploads: uploads,
Prefix: lmur.Prefix,
Delimiter: lmur.Delimiter,
CommonPrefixes: commonPrefixes,
EncodingType: lmur.EncodingType,
}
}
// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
func FromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
userDefined := FromMinioClientMetadata(oi.Metadata)
userDefined["Content-Type"] = oi.ContentType
return ObjectInfo{
Bucket: bucket,
Name: oi.Key,
ModTime: oi.LastModified,
Size: oi.Size,
ETag: canonicalizeETag(oi.ETag),
UserDefined: userDefined,
ContentType: oi.ContentType,
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
}
}
// FromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo
func FromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = FromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.ContinuationToken,
NextContinuationToken: result.NextContinuationToken,
}
}
// FromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo
func FromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = FromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsInfo{
IsTruncated: result.IsTruncated,
NextMarker: result.NextMarker,
Prefixes: prefixes,
Objects: objects,
}
}
// FromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info
func FromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = FromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.Marker,
NextContinuationToken: result.NextMarker,
}
}
// ToMinioClientMetadata converts metadata to map[string][]string
func ToMinioClientMetadata(metadata map[string]string) map[string]string {
mm := make(map[string]string)
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v
}
return mm
}
// ToMinioClientCompletePart converts CompletePart to minio CompletePart
func ToMinioClientCompletePart(part CompletePart) minio.CompletePart {
return minio.CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
// ToMinioClientCompleteParts converts []CompletePart to minio []CompletePart
func ToMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart {
mparts := make([]minio.CompletePart, len(parts))
for i, part := range parts {
mparts[i] = ToMinioClientCompletePart(part)
}
return mparts
}
// ErrorRespToObjectError converts Minio errors to minio object layer errors.
func ErrorRespToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return e
}
switch minioErr.Code {
case "BucketAlreadyOwnedByYou":
err = BucketAlreadyOwnedByYou{}
case "BucketNotEmpty":
err = BucketNotEmpty{}
case "NoSuchBucketPolicy":
err = PolicyNotFound{}
case "InvalidBucketName":
err = BucketNameInvalid{Bucket: bucket}
case "NoSuchBucket":
err = BucketNotFound{Bucket: bucket}
case "NoSuchKey":
if object != "" {
err = ObjectNotFound{Bucket: bucket, Object: object}
} else {
err = BucketNotFound{Bucket: bucket}
}
case "XMinioInvalidObjectName":
err = ObjectNameInvalid{}
case "AccessDenied":
err = PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "XAmzContentSHA256Mismatch":
err = hash.SHA256Mismatch{}
case "NoSuchUpload":
err = InvalidUploadID{}
case "EntityTooSmall":
err = PartTooSmall{}
}
e.Cause = err
return e
}

View file

@ -40,36 +40,15 @@ var (
} }
) )
// Gateway represents a gateway backend.
type Gateway interface {
// Name returns the unique name of the gateway.
Name() string
// NewGatewayLayer returns a new gateway layer.
NewGatewayLayer() (GatewayLayer, error)
}
// RegisterGatewayCommand registers a new command for gateway. // RegisterGatewayCommand registers a new command for gateway.
func RegisterGatewayCommand(cmd cli.Command) error { func RegisterGatewayCommand(cmd cli.Command) error {
// We should not have multiple subcommands with same name. cmd.Flags = append(append(cmd.Flags, append(cmd.Flags, serverFlags...)...), globalFlags...)
for _, c := range gatewayCmd.Subcommands {
if c.Name == cmd.Name {
return fmt.Errorf("duplicate gateway: %s", cmd.Name)
}
}
gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd) gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd)
return nil return nil
} }
// MustRegisterGatewayCommand is like RegisterGatewayCommand but panics instead of returning error. // ParseGatewayEndpoint - Return endpoint.
func MustRegisterGatewayCommand(cmd cli.Command) { func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
if err := RegisterGatewayCommand(cmd); err != nil {
panic(err)
}
}
// Return endpoint.
func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1 schemeSpecified := len(strings.Split(arg, "://")) > 1
if !schemeSpecified { if !schemeSpecified {
// Default connection will be "secure". // Default connection will be "secure".
@ -91,8 +70,8 @@ func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error)
} }
} }
// Validate gateway arguments. // ValidateGatewayArguments - Validate gateway arguments.
func validateGatewayArguments(serverAddr, endpointAddr string) error { func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
if err := CheckLocalServerAddr(serverAddr); err != nil { if err := CheckLocalServerAddr(serverAddr); err != nil {
return err return err
} }
@ -121,8 +100,18 @@ func validateGatewayArguments(serverAddr, endpointAddr string) error {
return nil return nil
} }
// Handler for 'minio gateway <name>'. // StartGateway - handler for 'minio gateway <name>'.
func startGateway(ctx *cli.Context, gw Gateway) { func StartGateway(ctx *cli.Context, gw Gateway) {
if gw == nil {
fatalIf(errUnexpected, "Gateway implementation not initialized, exiting.")
}
// Validate if we have access, secret set through environment.
gatewayName := gw.Name()
if ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
// Get quiet flag from command line argument. // Get quiet flag from command line argument.
quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet") quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
if quietFlag { if quietFlag {
@ -142,7 +131,6 @@ func startGateway(ctx *cli.Context, gw Gateway) {
handleCommonEnvVars() handleCommonEnvVars()
// Validate if we have access, secret set through environment. // Validate if we have access, secret set through environment.
gatewayName := gw.Name()
if !globalIsEnvCreds { if !globalIsEnvCreds {
errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName) errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName)
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
@ -167,11 +155,7 @@ func startGateway(ctx *cli.Context, gw Gateway) {
initNSLock(false) // Enable local namespace lock. initNSLock(false) // Enable local namespace lock.
if ctx.Args().First() == "help" { newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
newObject, err := gw.NewGatewayLayer()
fatalIf(err, "Unable to initialize gateway layer") fatalIf(err, "Unable to initialize gateway layer")
router := mux.NewRouter().SkipClean(true) router := mux.NewRouter().SkipClean(true)
@ -230,6 +214,11 @@ func startGateway(ctx *cli.Context, gw Gateway) {
// Check update mode. // Check update mode.
checkUpdate(mode) checkUpdate(mode)
// Print a warning message if gateway is not ready for production before the startup banner.
if !gw.Production() {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
}
// Print gateway startup message. // Print gateway startup message.
printGatewayStartupMessage(getAPIEndpoints(gatewayAddr), gatewayName) printGatewayStartupMessage(getAPIEndpoints(gatewayAddr), gatewayName)
} }

View file

@ -32,16 +32,6 @@ func TestRegisterGatewayCommand(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("RegisterGatewayCommand got unexpected error: %s", err) t.Errorf("RegisterGatewayCommand got unexpected error: %s", err)
} }
// Should returns 'duplicated' error
err = RegisterGatewayCommand(cmd)
if err == nil {
t.Errorf("RegisterGatewayCommand twice with same name should return error")
} else {
if err.Error() != "duplicate gateway: test" {
t.Errorf("RegisterGatewayCommand got unexpected error: %s", err)
}
}
} }
// Test parseGatewayEndpoint // Test parseGatewayEndpoint
@ -62,7 +52,7 @@ func TestParseGatewayEndpoint(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
endPoint, secure, err := parseGatewayEndpoint(test.arg) endPoint, secure, err := ParseGatewayEndpoint(test.arg)
errReturned := err != nil errReturned := err != nil
if endPoint != test.endPoint || if endPoint != test.endPoint ||
@ -97,7 +87,7 @@ func TestValidateGatewayArguments(t *testing.T) {
{":9000", nonLoopBackIP + ":9000", false}, {":9000", nonLoopBackIP + ":9000", false},
} }
for i, test := range testCases { for i, test := range testCases {
err := validateGatewayArguments(test.serverAddr, test.endpointAddr) err := ValidateGatewayArguments(test.serverAddr, test.endpointAddr)
if test.valid && err != nil { if test.valid && err != nil {
t.Errorf("Test %d expected not to return error but got %s", i+1, err) t.Errorf("Test %d expected not to return error but got %s", i+1, err)
} }

View file

@ -22,10 +22,26 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
// GatewayLayer - Interface to implement gateway mode. // GatewayMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API.
const GatewayMinioSysTmp = "minio.sys.tmp/"
// Gateway represents a gateway backend.
type Gateway interface {
// Name returns the unique name of the gateway.
Name() string
// NewGatewayLayer returns a new gateway layer.
NewGatewayLayer(creds auth.Credentials) (GatewayLayer, error)
// Returns true if gateway is ready for production.
Production() bool
}
// GatewayLayer - interface to implement gateway mode.
type GatewayLayer interface { type GatewayLayer interface {
ObjectLayer ObjectLayer

View file

@ -1,655 +0,0 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io"
"net/http"
"github.com/minio/cli"
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
const (
s3Backend = "s3"
)
func init() {
const s3GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENDPOINT:
S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of S3 storage.
MINIO_SECRET_KEY: Password or secret key of S3 storage.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio gateway server for AWS S3 backend.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}}
2. Start minio gateway server for S3 backend on custom endpoint.
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} https://play.minio.io:9000
`
MustRegisterGatewayCommand(cli.Command{
Name: s3Backend,
Usage: "Amazon Simple Storage Service (S3).",
Action: s3GatewayMain,
CustomHelpTemplate: s3GatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
})
}
// Handler for 'minio gateway s3' command line.
func s3GatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
startGateway(ctx, &S3Gateway{host})
}
// S3Gateway implements Gateway.
type S3Gateway struct {
host string
}
// Name implements Gateway interface.
func (g *S3Gateway) Name() string {
return s3Backend
}
// NewGatewayLayer returns s3 gatewaylayer.
func (g *S3Gateway) NewGatewayLayer() (GatewayLayer, error) {
return newS3GatewayLayer(g.host)
}
// s3ToObjectError converts Minio errors to minio object layer errors.
func s3ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return e
}
switch minioErr.Code {
case "BucketAlreadyOwnedByYou":
err = BucketAlreadyOwnedByYou{}
case "BucketNotEmpty":
err = BucketNotEmpty{}
case "NoSuchBucketPolicy":
err = PolicyNotFound{}
case "InvalidBucketName":
err = BucketNameInvalid{Bucket: bucket}
case "NoSuchBucket":
err = BucketNotFound{Bucket: bucket}
case "NoSuchKey":
if object != "" {
err = ObjectNotFound{Bucket: bucket, Object: object}
} else {
err = BucketNotFound{Bucket: bucket}
}
case "XMinioInvalidObjectName":
err = ObjectNameInvalid{}
case "AccessDenied":
err = PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "XAmzContentSHA256Mismatch":
err = hash.SHA256Mismatch{}
case "NoSuchUpload":
err = InvalidUploadID{}
case "EntityTooSmall":
err = PartTooSmall{}
}
e.Cause = err
return e
}
// s3Objects implements gateway for Minio and S3 compatible object storage servers.
type s3Objects struct {
gatewayUnsupported
Client *minio.Core
anonClient *minio.Core
}
// newS3GatewayLayer returns s3 gatewaylayer
func newS3GatewayLayer(host string) (GatewayLayer, error) {
var err error
var endpoint string
var secure = true
// Validate host parameters.
if host != "" {
// Override default params if the host is provided
endpoint, secure, err = parseGatewayEndpoint(host)
if err != nil {
return nil, err
}
}
// Default endpoint parameters
if endpoint == "" {
endpoint = "s3.amazonaws.com"
}
creds := globalServerConfig.GetCredential()
// Initialize minio client object.
client, err := minio.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure)
if err != nil {
return nil, err
}
anonClient, err := minio.NewCore(endpoint, "", "", secure)
if err != nil {
return nil, err
}
anonClient.SetCustomTransport(newCustomHTTPTransport())
return &s3Objects{
Client: client,
anonClient: anonClient,
}, nil
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *s3Objects) Shutdown() error {
// TODO
return nil
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo() (si StorageInfo) {
return si
}
// MakeBucket creates a new container on S3 backend.
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return s3ToObjectError(errors.Trace(err), bucket)
}
return err
}
// GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// Verify if bucket name is valid.
// We are using a separate helper function here to validate bucket
// names instead of IsValidBucketName() because there is a possibility
// that certains users might have buckets which are non-DNS compliant
// in us-east-1 and we might severely restrict them by not allowing
// access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil {
return bi, errors.Trace(BucketNameInvalid{Bucket: bucket})
}
buckets, err := l.Client.ListBuckets()
if err != nil {
return bi, s3ToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return bi, errors.Trace(BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, s3ToObjectError(errors.Trace(err))
}
b := make([]BucketInfo, len(buckets))
for i, bi := range buckets {
b[i] = BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}
}
return b, err
}
// DeleteBucket deletes a bucket on S3
func (l *s3Objects) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return s3ToObjectError(errors.Trace(err), bucket)
}
return nil
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
}
// fromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.ContinuationToken,
NextContinuationToken: result.NextContinuationToken,
}
}
// fromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsInfo{
IsTruncated: result.IsTruncated,
NextMarker: result.NextMarker,
Prefixes: prefixes,
Objects: objects,
}
}
// GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
if length < 0 && length != -1 {
return s3ToObjectError(errors.Trace(errInvalidArgument), bucket, key)
}
opts := minio.GetObjectOptions{}
if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key)
}
}
object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key)
}
return nil
}
// fromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
userDefined := fromMinioClientMetadata(oi.Metadata)
userDefined["Content-Type"] = oi.ContentType
return ObjectInfo{
Bucket: bucket,
Name: oi.Key,
ModTime: oi.LastModified,
Size: oi.Size,
ETag: canonicalizeETag(oi.ETag),
UserDefined: userDefined,
ContentType: oi.ContentType,
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
}
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, minio.StatObjectOptions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata))
if err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
// Set this header such that following CopyObject() always sets the right metadata on the destination.
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
metadata["x-amz-metadata-directive"] = "REPLACE"
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), srcBucket, srcObject)
}
return l.GetObjectInfo(dstBucket, dstObject)
}
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p PartInfo, err error) {
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, metadata)
if err != nil {
return p, s3ToObjectError(errors.Trace(err), srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag
return p, nil
}
// DeleteObject deletes a blob in bucket
func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return s3ToObjectError(errors.Trace(err), bucket, object)
}
return nil
}
// fromMinioClientMultipartInfo converts ObjectMultipartInfo to MultipartInfo
func fromMinioClientMultipartInfo(omi minio.ObjectMultipartInfo) MultipartInfo {
return MultipartInfo{
Object: omi.Key,
UploadID: omi.UploadID,
Initiated: omi.Initiated,
}
}
// fromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo
func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo {
uploads := make([]MultipartInfo, len(lmur.Uploads))
for i, um := range lmur.Uploads {
uploads[i] = fromMinioClientMultipartInfo(um)
}
commonPrefixes := make([]string, len(lmur.CommonPrefixes))
for i, cp := range lmur.CommonPrefixes {
commonPrefixes[i] = cp.Prefix
}
return ListMultipartsInfo{
KeyMarker: lmur.KeyMarker,
UploadIDMarker: lmur.UploadIDMarker,
NextKeyMarker: lmur.NextKeyMarker,
NextUploadIDMarker: lmur.NextUploadIDMarker,
MaxUploads: int(lmur.MaxUploads),
IsTruncated: lmur.IsTruncated,
Uploads: uploads,
Prefix: lmur.Prefix,
Delimiter: lmur.Delimiter,
CommonPrefixes: commonPrefixes,
EncodingType: lmur.EncodingType,
}
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return lmi, err
}
return fromMinioClientListMultipartsInfo(result), nil
}
// fromMinioClientMetadata converts minio metadata to map[string]string
func fromMinioClientMetadata(metadata map[string][]string) map[string]string {
mm := map[string]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v[0]
}
return mm
}
// toMinioClientMetadata converts metadata to map[string][]string
func toMinioClientMetadata(metadata map[string]string) map[string]string {
mm := map[string]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v
}
return mm
}
// NewMultipartUpload upload object in multiple parts
func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
// Create PutObject options
opts := minio.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil {
return uploadID, s3ToObjectError(errors.Trace(err), bucket, object)
}
return uploadID, nil
}
// fromMinioClientObjectPart converts minio ObjectPart to PartInfo
func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
return PartInfo{
Size: op.Size,
ETag: canonicalizeETag(op.ETag),
LastModified: op.LastModified,
PartNumber: op.PartNumber,
}
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString())
if err != nil {
return pi, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectPart(info), nil
}
// fromMinioClientObjectParts converts minio ObjectPart to PartInfo
func fromMinioClientObjectParts(parts []minio.ObjectPart) []PartInfo {
toParts := make([]PartInfo, len(parts))
for i, part := range parts {
toParts[i] = fromMinioClientObjectPart(part)
}
return toParts
}
// fromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo
func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo {
return ListPartsInfo{
UploadID: lopr.UploadID,
Bucket: lopr.Bucket,
Object: lopr.Key,
StorageClass: "",
PartNumberMarker: lopr.PartNumberMarker,
NextPartNumberMarker: lopr.NextPartNumberMarker,
MaxParts: lopr.MaxParts,
IsTruncated: lopr.IsTruncated,
EncodingType: lopr.EncodingType,
Parts: fromMinioClientObjectParts(lopr.ObjectParts),
}
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return lpi, err
}
return fromMinioClientListPartsInfo(result), nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return s3ToObjectError(errors.Trace(err), bucket, object)
}
// toMinioClientCompletePart converts CompletePart to minio CompletePart
func toMinioClientCompletePart(part CompletePart) minio.CompletePart {
return minio.CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
// toMinioClientCompleteParts converts []CompletePart to minio []CompletePart
func toMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart {
mparts := make([]minio.CompletePart, len(parts))
for i, part := range parts {
mparts[i] = toMinioClientCompletePart(part)
}
return mparts
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return oi, s3ToObjectError(errors.Trace(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies sets policy on bucket
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, "")
}
return nil
}
// GetBucketPolicies will get policy on bucket
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, s3ToObjectError(errors.Trace(err), bucket, "")
}
return policyInfo, nil
}
// DeleteBucketPolicies deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, "")
}
return nil
}

View file

@ -24,124 +24,125 @@ import (
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
type gatewayUnsupported struct{} // GatewayUnsupported list of unsupported call stubs for gateway.
type GatewayUnsupported struct{}
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
func (a gatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { func (a GatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, errors.Trace(NotImplemented{}) return lmi, errors.Trace(NotImplemented{})
} }
// NewMultipartUpload upload object in multiple parts // NewMultipartUpload upload object in multiple parts
func (a gatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { func (a GatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return "", errors.Trace(NotImplemented{}) return "", errors.Trace(NotImplemented{})
} }
// CopyObjectPart copy part of object to other bucket and object
func (a GatewayUnsupported) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64, metadata map[string]string) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{})
}
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
func (a gatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) { func (a GatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{}) return pi, errors.Trace(NotImplemented{})
} }
// ListObjectParts returns all object parts for specified object in specified bucket // ListObjectParts returns all object parts for specified object in specified bucket
func (a gatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { func (a GatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
return lpi, errors.Trace(NotImplemented{}) return lpi, errors.Trace(NotImplemented{})
} }
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (a gatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error { func (a GatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error {
return errors.Trace(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object // CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a gatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { func (a GatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
return oi, errors.Trace(NotImplemented{}) return oi, errors.Trace(NotImplemented{})
} }
// SetBucketPolicies sets policy on bucket // SetBucketPolicies sets policy on bucket
func (a gatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (a GatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
return errors.Trace(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// GetBucketPolicies will get policy on bucket // GetBucketPolicies will get policy on bucket
func (a gatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) { func (a GatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) {
return bal, errors.Trace(NotImplemented{}) return bal, errors.Trace(NotImplemented{})
} }
// DeleteBucketPolicies deletes all policies on bucket // DeleteBucketPolicies deletes all policies on bucket
func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error { func (a GatewayUnsupported) DeleteBucketPolicies(bucket string) error {
return errors.Trace(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// CopyObjectPart - Not implemented. // HealBucket - Not implemented stub
func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, func (a GatewayUnsupported) HealBucket(bucket string) error {
partID int, startOffset int64, length int64, metadata map[string]string) (info PartInfo, err error) {
return info, errors.Trace(NotImplemented{})
}
// HealBucket - Not relevant.
func (a gatewayUnsupported) HealBucket(bucket string) error {
return errors.Trace(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// ListBucketsHeal - Not relevant. // ListBucketsHeal - Not implemented stub
func (a gatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) { func (a GatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) {
return nil, errors.Trace(NotImplemented{}) return nil, errors.Trace(NotImplemented{})
} }
// HealObject - Not relevant. // HealObject - Not implemented stub
func (a gatewayUnsupported) HealObject(bucket, object string) (int, int, error) { func (a GatewayUnsupported) HealObject(bucket, object string) (int, int, error) {
return 0, 0, errors.Trace(NotImplemented{}) return 0, 0, errors.Trace(NotImplemented{})
} }
func (a gatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { // ListObjectsV2 - Not implemented stub
func (a GatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, errors.Trace(NotImplemented{}) return result, errors.Trace(NotImplemented{})
} }
// ListObjectsHeal - Not relevant. // ListObjectsHeal - Not implemented stub
func (a gatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (a GatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, errors.Trace(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// ListUploadsHeal - Not relevant. // ListUploadsHeal - Not implemented stub
func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, func (a GatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, errors.Trace(NotImplemented{}) return lmi, errors.Trace(NotImplemented{})
} }
// AnonListObjects - List objects anonymously // AnonListObjects - List objects anonymously
func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, func (a GatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string,
maxKeys int) (loi ListObjectsInfo, err error) { maxKeys int) (loi ListObjectsInfo, err error) {
return loi, errors.Trace(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// AnonListObjectsV2 - List objects in V2 mode, anonymously // AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, func (a GatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) {
return loi, errors.Trace(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// AnonGetBucketInfo - Get bucket metadata anonymously. // AnonGetBucketInfo - Get bucket metadata anonymously.
func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { func (a GatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) {
return bi, errors.Trace(NotImplemented{}) return bi, errors.Trace(NotImplemented{})
} }
// AnonPutObject creates a new object anonymously with the incoming data, // AnonPutObject creates a new object anonymously with the incoming data,
func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader, func (a GatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader,
metadata map[string]string) (ObjectInfo, error) { metadata map[string]string) (ObjectInfo, error) {
return ObjectInfo{}, errors.Trace(NotImplemented{}) return ObjectInfo{}, errors.Trace(NotImplemented{})
} }
// AnonGetObject downloads object anonymously. // AnonGetObject downloads object anonymously.
func (a gatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { func (a GatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
return errors.Trace(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// AnonGetObjectInfo returns stat information about an object anonymously. // AnonGetObjectInfo returns stat information about an object anonymously.
func (a gatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { func (a GatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
return objInfo, errors.Trace(NotImplemented{}) return objInfo, errors.Trace(NotImplemented{})
} }
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (a gatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, func (a GatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string,
metadata map[string]string) (objInfo ObjectInfo, err error) { metadata map[string]string) (objInfo ObjectInfo, err error) {
return objInfo, errors.Trace(NotImplemented{}) return objInfo, errors.Trace(NotImplemented{})
} }

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package azure
import ( import (
"encoding/xml" "encoding/xml"
@ -29,6 +29,8 @@ import (
"github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
) )
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go // Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
@ -113,21 +115,21 @@ func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response,
} }
// AnonGetBucketInfo - Get bucket metadata from azure anonymously. // AnonGetBucketInfo - Get bucket metadata from azure anonymously.
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL) url, err := url.Parse(blobURL)
if err != nil { if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err)) return bucketInfo, azureToObjectError(errors.Trace(err))
} }
url.RawQuery = "restype=container" url.RawQuery = "restype=container"
resp, err := azureAnonRequest(httpHEAD, url.String(), nil) resp, err := azureAnonRequest(http.MethodHead, url.String(), nil)
if err != nil { if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err), bucket) return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) return bucketInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
@ -135,12 +137,10 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo,
return bucketInfo, errors.Trace(err) return bucketInfo, errors.Trace(err)
} }
bucketInfo = BucketInfo{ return minio.BucketInfo{
Name: bucket, Name: bucket,
Created: t, Created: t,
} }, nil
return bucketInfo, nil
} }
// AnonGetObject - SendGET request without authentication. // AnonGetObject - SendGET request without authentication.
@ -154,14 +154,14 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l
} }
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpGET, blobURL, h) resp, err := azureAnonRequest(http.MethodGet, blobURL, h)
if err != nil { if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object) return azureToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
_, err = io.Copy(writer, resp.Body) _, err = io.Copy(writer, resp.Body)
@ -170,16 +170,16 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l
// AnonGetObjectInfo - Send HEAD request without authentication and convert the // AnonGetObjectInfo - Send HEAD request without authentication and convert the
// result to ObjectInfo. // result to ObjectInfo.
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpHEAD, blobURL, nil) resp, err := azureAnonRequest(http.MethodHead, blobURL, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return objInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
var contentLength int64 var contentLength int64
@ -187,7 +187,7 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
if contentLengthStr != "" { if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(errUnexpected), bucket, object) return objInfo, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
} }
} }
@ -211,7 +211,7 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
} }
// AnonListObjects - Use Azure equivalent ListBlobs. // AnonListObjects - Use Azure equivalent ListBlobs.
func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
params := storage.ListBlobsParameters{ params := storage.ListBlobsParameters{
Prefix: prefix, Prefix: prefix,
Marker: marker, Marker: marker,
@ -230,7 +230,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
} }
url.RawQuery = q.Encode() url.RawQuery = q.Encode()
resp, err := azureAnonRequest(httpGET, url.String(), nil) resp, err := azureAnonRequest(http.MethodGet, url.String(), nil)
if err != nil { if err != nil {
return result, azureToObjectError(errors.Trace(err)) return result, azureToObjectError(errors.Trace(err))
} }
@ -250,7 +250,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
result.IsTruncated = listResp.NextMarker != "" result.IsTruncated = listResp.NextMarker != ""
result.NextMarker = listResp.NextMarker result.NextMarker = listResp.NextMarker
for _, object := range listResp.Blobs { for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, ObjectInfo{ result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object.Name, Name: object.Name,
ModTime: time.Time(object.Properties.LastModified), ModTime: time.Time(object.Properties.LastModified),
@ -265,7 +265,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
} }
// AnonListObjectsV2 - List objects in V2 mode, anonymously // AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
params := storage.ListBlobsParameters{ params := storage.ListBlobsParameters{
Prefix: prefix, Prefix: prefix,
Marker: continuationToken, Marker: continuationToken,
@ -307,12 +307,12 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli
result.NextContinuationToken = listResp.NextMarker result.NextContinuationToken = listResp.NextMarker
} }
for _, object := range listResp.Blobs { for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, ObjectInfo{ result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object.Name, Name: object.Name,
ModTime: time.Time(object.Properties.LastModified), ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength, Size: object.Properties.ContentLength,
ETag: canonicalizeETag(object.Properties.Etag), ETag: minio.CanonicalizeETag(object.Properties.Etag),
ContentType: object.Properties.ContentType, ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding, ContentEncoding: object.Properties.ContentEncoding,
}) })

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package azure
import ( import (
"bytes" "bytes"
@ -35,14 +35,18 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
) )
const ( const (
globalAzureAPIVersion = "2016-05-31" globalAzureAPIVersion = "2016-05-31"
azureBlockSize = 100 * humanize.MiByte azureBlockSize = 100 * humanize.MiByte
metadataObjectNameTemplate = globalMinioSysTmp + "multipart/v1/%s.%x/azure.json" azureS3MinPartSize = 5 * humanize.MiByte
metadataObjectNameTemplate = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x/azure.json"
azureBackend = "azure" azureBackend = "azure"
) )
@ -80,12 +84,11 @@ EXAMPLES:
` `
MustRegisterGatewayCommand(cli.Command{ minio.RegisterGatewayCommand(cli.Command{
Name: azureBackend, Name: azureBackend,
Usage: "Microsoft Azure Blob Storage.", Usage: "Microsoft Azure Blob Storage.",
Action: azureGatewayMain, Action: azureGatewayMain,
CustomHelpTemplate: azureGatewayTemplate, CustomHelpTemplate: azureGatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true, HideHelpCommand: true,
}) })
} }
@ -95,24 +98,49 @@ func azureGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
startGateway(ctx, &AzureGateway{host}) minio.StartGateway(ctx, &Azure{host})
} }
// AzureGateway implements Gateway. // Azure implements Gateway.
type AzureGateway struct { type Azure struct {
host string host string
} }
// Name implements Gateway interface. // Name implements Gateway interface.
func (g *AzureGateway) Name() string { func (g *Azure) Name() string {
return azureBackend return azureBackend
} }
// NewGatewayLayer initializes azure blob storage client and returns AzureObjects. // NewGatewayLayer initializes azure blob storage client and returns AzureObjects.
func (g *AzureGateway) NewGatewayLayer() (GatewayLayer, error) { func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
return newAzureLayer(g.host) var err error
var endpoint = storage.DefaultBaseURL
var secure = true
// If user provided some parameters
if g.host != "" {
endpoint, secure, err = minio.ParseGatewayEndpoint(g.host)
if err != nil {
return nil, err
}
}
c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure)
if err != nil {
return &azureObjects{}, err
}
c.HTTPClient = &http.Client{Transport: minio.NewCustomHTTPTransport()}
return &azureObjects{
client: c.GetBlobService(),
}, nil
}
// Production - Azure gateway is production ready.
func (g *Azure) Production() bool {
return true
} }
// s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY // s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY
@ -133,7 +161,7 @@ func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata
storage.BlobProperties, error) { storage.BlobProperties, error) {
for k := range s3Metadata { for k := range s3Metadata {
if strings.Contains(k, "--") { if strings.Contains(k, "--") {
return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(UnsupportedMetadata{}) return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(minio.UnsupportedMetadata{})
} }
} }
@ -238,7 +266,7 @@ func azurePropertiesToS3Meta(meta storage.BlobMetadata, props storage.BlobProper
// azureObjects - Implements Object layer for Azure blob storage. // azureObjects - Implements Object layer for Azure blob storage.
type azureObjects struct { type azureObjects struct {
gatewayUnsupported minio.GatewayUnsupported
client storage.BlobStorageClient // Azure sdk client client storage.BlobStorageClient // Azure sdk client
} }
@ -252,7 +280,7 @@ func azureToObjectError(err error, params ...string) error {
if !ok { if !ok {
// Code should be fixed if this function is called without doing errors.Trace() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") minio.ErrorIf(err, "Expected type *Error")
return err return err
} }
@ -275,23 +303,26 @@ func azureToObjectError(err error, params ...string) error {
switch azureErr.Code { switch azureErr.Code {
case "ContainerAlreadyExists": case "ContainerAlreadyExists":
err = BucketExists{Bucket: bucket} err = minio.BucketExists{Bucket: bucket}
case "InvalidResourceName": case "InvalidResourceName":
err = BucketNameInvalid{Bucket: bucket} err = minio.BucketNameInvalid{Bucket: bucket}
case "RequestBodyTooLarge": case "RequestBodyTooLarge":
err = PartTooBig{} err = minio.PartTooBig{}
case "InvalidMetadata": case "InvalidMetadata":
err = UnsupportedMetadata{} err = minio.UnsupportedMetadata{}
default: default:
switch azureErr.StatusCode { switch azureErr.StatusCode {
case http.StatusNotFound: case http.StatusNotFound:
if object != "" { if object != "" {
err = ObjectNotFound{bucket, object} err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
} else { } else {
err = BucketNotFound{Bucket: bucket} err = minio.BucketNotFound{Bucket: bucket}
} }
case http.StatusBadRequest: case http.StatusBadRequest:
err = BucketNameInvalid{Bucket: bucket} err = minio.BucketNameInvalid{Bucket: bucket}
} }
} }
e.Cause = err e.Cause = err
@ -316,11 +347,15 @@ func mustGetAzureUploadID() string {
// checkAzureUploadID - returns error in case of given string is upload ID. // checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(uploadID string) (err error) { func checkAzureUploadID(uploadID string) (err error) {
if len(uploadID) != 16 { if len(uploadID) != 16 {
return errors.Trace(MalformedUploadID{uploadID}) return errors.Trace(minio.MalformedUploadID{
UploadID: uploadID,
})
} }
if _, err = hex.DecodeString(uploadID); err != nil { if _, err = hex.DecodeString(uploadID); err != nil {
return errors.Trace(MalformedUploadID{uploadID}) return errors.Trace(minio.MalformedUploadID{
UploadID: uploadID,
})
} }
return nil return nil
@ -360,32 +395,6 @@ func azureParseBlockID(blockID string) (partID, subPartNumber int, uploadID, md5
return return
} }
// Inits azure blob storage client and returns AzureObjects.
func newAzureLayer(host string) (GatewayLayer, error) {
var err error
var endpoint = storage.DefaultBaseURL
var secure = true
// If user provided some parameters
if host != "" {
endpoint, secure, err = parseGatewayEndpoint(host)
if err != nil {
return nil, err
}
}
creds := globalServerConfig.GetCredential()
c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure)
if err != nil {
return &azureObjects{}, err
}
c.HTTPClient = &http.Client{Transport: newCustomHTTPTransport()}
return &azureObjects{
client: c.GetBlobService(),
}, nil
}
// Shutdown - save any gateway metadata to disk // Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart. // if necessary and reload upon next restart.
func (a *azureObjects) Shutdown() error { func (a *azureObjects) Shutdown() error {
@ -393,7 +402,7 @@ func (a *azureObjects) Shutdown() error {
} }
// StorageInfo - Not relevant to Azure backend. // StorageInfo - Not relevant to Azure backend.
func (a *azureObjects) StorageInfo() (si StorageInfo) { func (a *azureObjects) StorageInfo() (si minio.StorageInfo) {
return si return si
} }
@ -407,13 +416,13 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { func (a *azureObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
// Verify if bucket (container-name) is valid. // Verify if bucket (container-name) is valid.
// IsValidBucketName has same restrictions as container names mentioned // IsValidBucketName has same restrictions as container names mentioned
// in azure documentation, so we will simply use the same function here. // in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !IsValidBucketName(bucket) { if !minio.IsValidBucketName(bucket) {
return bi, errors.Trace(BucketNameInvalid{Bucket: bucket}) return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
} }
// Azure does not have an equivalent call, hence use // Azure does not have an equivalent call, hence use
@ -428,18 +437,18 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
if container.Name == bucket { if container.Name == bucket {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified) t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e == nil { if e == nil {
return BucketInfo{ return minio.BucketInfo{
Name: bucket, Name: bucket,
Created: t, Created: t,
}, nil }, nil
} // else continue } // else continue
} }
} }
return bi, errors.Trace(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
} }
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { func (a *azureObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{}) resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil { if err != nil {
return nil, azureToObjectError(errors.Trace(err)) return nil, azureToObjectError(errors.Trace(err))
@ -449,7 +458,7 @@ func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
if e != nil { if e != nil {
return nil, errors.Trace(e) return nil, errors.Trace(e)
} }
buckets = append(buckets, BucketInfo{ buckets = append(buckets, minio.BucketInfo{
Name: container.Name, Name: container.Name,
Created: t, Created: t,
}) })
@ -465,8 +474,8 @@ func (a *azureObjects) DeleteBucket(bucket string) error {
// ListObjects - lists all blobs on azure with in a container filtered by prefix // ListObjects - lists all blobs on azure with in a container filtered by prefix
// and marker, uses Azure equivalent ListBlobs. // and marker, uses Azure equivalent ListBlobs.
func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
var objects []ObjectInfo var objects []minio.ObjectInfo
var prefixes []string var prefixes []string
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
for len(objects) == 0 && len(prefixes) == 0 { for len(objects) == 0 && len(prefixes) == 0 {
@ -481,15 +490,15 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
} }
for _, object := range resp.Blobs { for _, object := range resp.Blobs {
if strings.HasPrefix(object.Name, globalMinioSysTmp) { if strings.HasPrefix(object.Name, minio.GatewayMinioSysTmp) {
continue continue
} }
objects = append(objects, ObjectInfo{ objects = append(objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object.Name, Name: object.Name,
ModTime: time.Time(object.Properties.LastModified), ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength, Size: object.Properties.ContentLength,
ETag: toS3ETag(object.Properties.Etag), ETag: minio.ToS3ETag(object.Properties.Etag),
ContentType: object.Properties.ContentType, ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding, ContentEncoding: object.Properties.ContentEncoding,
}) })
@ -497,7 +506,7 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
// Remove minio.sys.tmp prefix. // Remove minio.sys.tmp prefix.
for _, prefix := range resp.BlobPrefixes { for _, prefix := range resp.BlobPrefixes {
if prefix != globalMinioSysTmp { if prefix != minio.GatewayMinioSysTmp {
prefixes = append(prefixes, prefix) prefixes = append(prefixes, prefix)
} }
} }
@ -516,13 +525,13 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
} }
// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix // ListObjectsV2 - list all blobs in Azure bucket filtered by prefix
func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
marker := continuationToken marker := continuationToken
if startAfter != "" { if startAfter != "" {
marker = startAfter marker = startAfter
} }
var resultV1 ListObjectsInfo var resultV1 minio.ListObjectsInfo
resultV1, err = a.ListObjects(bucket, prefix, marker, delimiter, maxKeys) resultV1, err = a.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return result, err return result, err
@ -545,7 +554,7 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimite
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
// startOffset cannot be negative. // startOffset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
return toObjectErr(errors.Trace(errUnexpected), bucket, object) return azureToObjectError(errors.Trace(minio.InvalidRange{}), bucket, object)
} }
blobRange := &storage.BlobRange{Start: uint64(startOffset)} blobRange := &storage.BlobRange{Start: uint64(startOffset)}
@ -571,32 +580,30 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt
return errors.Trace(err) return errors.Trace(err)
} }
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo, // GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
// uses zure equivalent GetBlobProperties. // uses zure equivalent GetBlobProperties.
func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil) err = blob.GetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties) return minio.ObjectInfo{
objInfo = ObjectInfo{
Bucket: bucket, Bucket: bucket,
UserDefined: meta, UserDefined: azurePropertiesToS3Meta(blob.Metadata, blob.Properties),
ETag: toS3ETag(blob.Properties.Etag), ETag: minio.ToS3ETag(blob.Properties.Etag),
ModTime: time.Time(blob.Properties.LastModified), ModTime: time.Time(blob.Properties.LastModified),
Name: object, Name: object,
Size: blob.Properties.ContentLength, Size: blob.Properties.ContentLength,
ContentType: blob.Properties.ContentType, ContentType: blob.Properties.ContentType,
ContentEncoding: blob.Properties.ContentEncoding, ContentEncoding: blob.Properties.ContentEncoding,
} }, nil
return objInfo, nil
} }
// PutObject - Create a new blob with the incoming data, // PutObject - Create a new blob with the incoming data,
// uses Azure equivalent CreateBlockBlobFromReader. // uses Azure equivalent CreateBlockBlobFromReader.
func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata) blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata)
if err != nil { if err != nil {
@ -611,7 +618,7 @@ func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metad
// CopyObject - Copies a blob from source container to destination container. // CopyObject - Copies a blob from source container to destination container.
// Uses Azure equivalent CopyBlob API. // Uses Azure equivalent CopyBlob API.
func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL() srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject) destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
azureMeta, props, err := s3MetaToAzureProperties(metadata) azureMeta, props, err := s3MetaToAzureProperties(metadata)
@ -643,7 +650,7 @@ func (a *azureObjects) DeleteObject(bucket, object string) error {
} }
// ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result.
func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) {
// It's decided not to support List Multipart Uploads, hence returning empty result. // It's decided not to support List Multipart Uploads, hence returning empty result.
return result, nil return result, nil
} }
@ -662,9 +669,14 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
getAzureMetadataObjectName(objectName, uploadID)) getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil) err = blob.GetMetadata(nil)
err = azureToObjectError(errors.Trace(err), bucketName, objectName) err = azureToObjectError(errors.Trace(err), bucketName, objectName)
oerr := ObjectNotFound{bucketName, objectName} oerr := minio.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
if errors.Cause(err) == oerr { if errors.Cause(err) == oerr {
err = errors.Trace(InvalidUploadID{}) err = errors.Trace(minio.InvalidUploadID{
UploadID: uploadID,
})
} }
return err return err
} }
@ -692,7 +704,7 @@ func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[st
} }
// PutObjectPart - Use Azure equivalent PutBlockWithLength. // PutObjectPart - Use Azure equivalent PutBlockWithLength.
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) { func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
return info, err return info, err
} }
@ -703,7 +715,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
etag := data.MD5HexString() etag := data.MD5HexString()
if etag == "" { if etag == "" {
etag = genETag() etag = minio.GenETag()
} }
subPartSize, subPartNumber := int64(azureBlockSize), 1 subPartSize, subPartNumber := int64(azureBlockSize), 1
@ -728,13 +740,13 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
info.PartNumber = partID info.PartNumber = partID
info.ETag = etag info.ETag = etag
info.LastModified = UTCNow() info.LastModified = minio.UTCNow()
info.Size = data.Size() info.Size = data.Size()
return info, nil return info, nil
} }
// ListObjectParts - Use Azure equivalent GetBlockList. // ListObjectParts - Use Azure equivalent GetBlockList.
func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) { func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
return result, err return result, err
} }
@ -755,20 +767,20 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
return result, azureToObjectError(errors.Trace(err), bucket, object) return result, azureToObjectError(errors.Trace(err), bucket, object)
} }
// Build a sorted list of parts and return the requested entries. // Build a sorted list of parts and return the requested entries.
partsMap := make(map[int]PartInfo) partsMap := make(map[int]minio.PartInfo)
for _, block := range resp.UncommittedBlocks { for _, block := range resp.UncommittedBlocks {
var partNumber int var partNumber int
var parsedUploadID string var parsedUploadID string
var md5Hex string var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil { if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object) return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
} }
if parsedUploadID != uploadID { if parsedUploadID != uploadID {
continue continue
} }
part, ok := partsMap[partNumber] part, ok := partsMap[partNumber]
if !ok { if !ok {
partsMap[partNumber] = PartInfo{ partsMap[partNumber] = minio.PartInfo{
PartNumber: partNumber, PartNumber: partNumber,
Size: block.Size, Size: block.Size,
ETag: md5Hex, ETag: md5Hex,
@ -778,12 +790,12 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
if part.ETag != md5Hex { if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents // If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is. // return error as we won't be able to decide which the latest part is.
return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object) return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
} }
part.Size += block.Size part.Size += block.Size
partsMap[partNumber] = part partsMap[partNumber] = part
} }
var parts []PartInfo var parts []minio.PartInfo
for _, part := range partsMap { for _, part := range partsMap {
parts = append(parts, part) parts = append(parts, part)
} }
@ -831,7 +843,7 @@ func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (er
} }
// CompleteMultipartUpload - Use Azure equivalent PutBlockList. // CompleteMultipartUpload - Use Azure equivalent PutBlockList.
func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) { func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
metadataObject := getAzureMetadataObjectName(object, uploadID) metadataObject := getAzureMetadataObjectName(object, uploadID)
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
return objInfo, err return objInfo, err
@ -859,7 +871,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
derr := blob.Delete(nil) derr := blob.Delete(nil)
errorIf(derr, "unable to remove meta data object for upload ID %s", uploadID) minio.ErrorIf(derr, "unable to remove meta data object for upload ID %s", uploadID)
}() }()
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
@ -888,7 +900,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
} }
if len(blocks) == 0 { if len(blocks) == 0 {
return nil, 0, InvalidPart{} return nil, 0, minio.InvalidPart{}
} }
return blocks, size, nil return blocks, size, nil
@ -910,8 +922,8 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize { if size < azureS3MinPartSize {
return objInfo, errors.Trace(PartTooSmall{ return objInfo, errors.Trace(minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
@ -947,23 +959,23 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
// As the common denominator for minio and azure is readonly and none, we support // As the common denominator for minio and azure is readonly and none, we support
// these two policies at the bucket level. // these two policies at the bucket level.
func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
var policies []BucketAccessPolicy var policies []minio.BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{ policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix, Prefix: prefix,
Policy: policy, Policy: policy,
}) })
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
perm := storage.ContainerPermissions{ perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer, AccessType: storage.ContainerAccessTypeContainer,
@ -984,11 +996,11 @@ func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPoli
} }
switch perm.AccessType { switch perm.AccessType {
case storage.ContainerAccessTypePrivate: case storage.ContainerAccessTypePrivate:
return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket}) return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
case storage.ContainerAccessTypeContainer: case storage.ContainerAccessTypeContainer:
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default: default:
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(NotImplemented{})) return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(minio.NotImplemented{}))
} }
return policyInfo, nil return policyInfo, nil
} }

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package azure
import ( import (
"fmt" "fmt"
@ -24,6 +24,7 @@ import (
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/azure-sdk-for-go/storage"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
) )
@ -67,7 +68,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
} }
_, _, err = s3MetaToAzureProperties(headers) _, _, err = s3MetaToAzureProperties(headers)
if err = errors.Cause(err); err != nil { if err = errors.Cause(err); err != nil {
if _, ok := err.(UnsupportedMetadata); !ok { if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
} }
} }
@ -150,27 +151,27 @@ func TestAzureToObjectError(t *testing.T) {
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists", Code: "ContainerAlreadyExists",
}), BucketExists{Bucket: "bucket"}, "bucket", "", }), minio.BucketExists{Bucket: "bucket"}, "bucket", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName", Code: "InvalidResourceName",
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "RequestBodyTooLarge", Code: "RequestBodyTooLarge",
}), PartTooBig{}, "", "", }), minio.PartTooBig{}, "", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidMetadata", Code: "InvalidMetadata",
}), UnsupportedMetadata{}, "", "", }), minio.UnsupportedMetadata{}, "", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), ObjectNotFound{ }), minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, "bucket", "object", }, "bucket", "object",
@ -178,12 +179,12 @@ func TestAzureToObjectError(t *testing.T) {
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), BucketNotFound{Bucket: "bucket"}, "bucket", "", }), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest, StatusCode: http.StatusBadRequest,
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
@ -321,32 +322,27 @@ func TestAnonErrToObjectErr(t *testing.T) {
{"ObjectNotFound", {"ObjectNotFound",
http.StatusNotFound, http.StatusNotFound,
[]string{"testBucket", "testObject"}, []string{"testBucket", "testObject"},
ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"},
}, },
{"BucketNotFound", {"BucketNotFound",
http.StatusNotFound, http.StatusNotFound,
[]string{"testBucket", ""}, []string{"testBucket", ""},
BucketNotFound{Bucket: "testBucket"}, minio.BucketNotFound{Bucket: "testBucket"},
}, },
{"ObjectNameInvalid", {"ObjectNameInvalid",
http.StatusBadRequest, http.StatusBadRequest,
[]string{"testBucket", "testObject"}, []string{"testBucket", "testObject"},
ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"},
}, },
{"BucketNameInvalid", {"BucketNameInvalid",
http.StatusBadRequest, http.StatusBadRequest,
[]string{"testBucket", ""}, []string{"testBucket", ""},
BucketNameInvalid{Bucket: "testBucket"}, minio.BucketNameInvalid{Bucket: "testBucket"},
},
{"UnexpectedError",
http.StatusBadGateway,
[]string{"testBucket", "testObject"},
errUnexpected,
}, },
} }
for _, test := range testCases { for _, test := range testCases {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
if err := anonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr)
} }
}) })

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package b2
import ( import (
"fmt" "fmt"
@ -26,6 +26,8 @@ import (
"time" "time"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
) )
// mkRange converts offset, size into Range header equivalent. // mkRange converts offset, size into Range header equivalent.
@ -34,9 +36,9 @@ func mkRange(offset, size int64) string {
return "" return ""
} }
if size == 0 { if size == 0 {
return fmt.Sprintf("%s%d-", byteRangePrefix, offset) return fmt.Sprintf("bytes=%d-", offset)
} }
return fmt.Sprintf("%s%d-%d", byteRangePrefix, offset, offset+size-1) return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)
} }
// AnonGetObject - performs a plain http GET request on a public resource, // AnonGetObject - performs a plain http GET request on a public resource,
@ -71,7 +73,7 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
// X-Bz-Info-<header>:<value> is converted to <header>:<value> // X-Bz-Info-<header>:<value> is converted to <header>:<value>
// Content-Type is converted to ContentType. // Content-Type is converted to ContentType.
// X-Bz-Content-Sha1 is converted to ETag. // X-Bz-Content-Sha1 is converted to ETag.
func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) { func headerToObjectInfo(bucket, object string, header http.Header) (objInfo minio.ObjectInfo, err error) {
clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64) clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
@ -103,7 +105,7 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje
} }
} }
return ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object, Name: object,
ContentType: header.Get("Content-Type"), ContentType: header.Get("Content-Type"),
@ -116,7 +118,7 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje
// AnonGetObjectInfo - performs a plain http HEAD request on a public resource, // AnonGetObjectInfo - performs a plain http HEAD request on a public resource,
// fails if the resource is not public. // fails if the resource is not public.
func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("HEAD", uri, nil) req, err := http.NewRequest("HEAD", uri, nil)
if err != nil { if err != nil {

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package b2
import ( import (
"context" "context"
@ -34,6 +34,8 @@ import (
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash" h2 "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
) )
// Supported bucket types by B2 backend. // Supported bucket types by B2 backend.
@ -68,53 +70,33 @@ EXAMPLES:
$ {{.HelpName}} $ {{.HelpName}}
` `
minio.RegisterGatewayCommand(cli.Command{
MustRegisterGatewayCommand(cli.Command{
Name: b2Backend, Name: b2Backend,
Usage: "Backblaze B2.", Usage: "Backblaze B2.",
Action: b2GatewayMain, Action: b2GatewayMain,
CustomHelpTemplate: b2GatewayTemplate, CustomHelpTemplate: b2GatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true, HideHelpCommand: true,
}) })
} }
// Handler for 'minio gateway b2' command line. // Handler for 'minio gateway b2' command line.
func b2GatewayMain(ctx *cli.Context) { func b2GatewayMain(ctx *cli.Context) {
startGateway(ctx, &B2Gateway{}) minio.StartGateway(ctx, &B2{})
} }
// B2Gateway implements Gateway. // B2 implements Minio Gateway
type B2Gateway struct{} type B2 struct{}
// Name implements Gateway interface. // Name implements Gateway interface.
func (g *B2Gateway) Name() string { func (g *B2) Name() string {
return b2Backend return b2Backend
} }
// NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to // NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to
// talk to B2 remote backend. // talk to B2 remote backend.
func (g *B2Gateway) NewGatewayLayer() (GatewayLayer, error) { func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
return newB2GatewayLayer()
}
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
type b2Objects struct {
gatewayUnsupported
mu sync.Mutex
creds auth.Credentials
b2Client *b2.B2
anonClient *http.Client
ctx context.Context
}
// newB2GatewayLayer returns b2 gateway layer.
func newB2GatewayLayer() (GatewayLayer, error) {
ctx := context.Background() ctx := context.Background()
creds := globalServerConfig.GetCredential() client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(newCustomHTTPTransport()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -123,12 +105,28 @@ func newB2GatewayLayer() (GatewayLayer, error) {
creds: creds, creds: creds,
b2Client: client, b2Client: client,
anonClient: &http.Client{ anonClient: &http.Client{
Transport: newCustomHTTPTransport(), Transport: minio.NewCustomHTTPTransport(),
}, },
ctx: ctx, ctx: ctx,
}, nil }, nil
} }
// Production - Ready for production use?
func (g *B2) Production() bool {
// Not ready for production use just yet.
return false
}
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
type b2Objects struct {
minio.GatewayUnsupported
mu sync.Mutex
creds auth.Credentials
b2Client *b2.B2
anonClient *http.Client
ctx context.Context
}
// Convert B2 errors to minio object layer errors. // Convert B2 errors to minio object layer errors.
func b2ToObjectError(err error, params ...string) error { func b2ToObjectError(err error, params ...string) error {
if err == nil { if err == nil {
@ -139,7 +137,7 @@ func b2ToObjectError(err error, params ...string) error {
if !ok { if !ok {
// Code should be fixed if this function is called without doing errors.Trace() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") minio.ErrorIf(err, "Expected type *Error")
return err return err
} }
@ -170,24 +168,30 @@ func b2ToObjectError(err error, params ...string) error {
switch code { switch code {
case "duplicate_bucket_name": case "duplicate_bucket_name":
err = BucketAlreadyOwnedByYou{Bucket: bucket} err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
case "bad_request": case "bad_request":
if object != "" { if object != "" {
err = ObjectNameInvalid{bucket, object} err = minio.ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} else if bucket != "" { } else if bucket != "" {
err = BucketNotFound{Bucket: bucket} err = minio.BucketNotFound{Bucket: bucket}
} }
case "bad_bucket_id": case "bad_bucket_id":
err = BucketNotFound{Bucket: bucket} err = minio.BucketNotFound{Bucket: bucket}
case "file_not_present", "not_found": case "file_not_present", "not_found":
err = ObjectNotFound{bucket, object} err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
case "cannot_delete_non_empty_bucket": case "cannot_delete_non_empty_bucket":
err = BucketNotEmpty{bucket, ""} err = minio.BucketNotEmpty{Bucket: bucket}
} }
// Special interpretation like this is required for Multipart sessions. // Special interpretation like this is required for Multipart sessions.
if strings.Contains(msg, "No active upload for") && uploadID != "" { if strings.Contains(msg, "No active upload for") && uploadID != "" {
err = InvalidUploadID{uploadID} err = minio.InvalidUploadID{UploadID: uploadID}
} }
e.Cause = err e.Cause = err
@ -202,7 +206,7 @@ func (l *b2Objects) Shutdown() error {
} }
// StorageInfo is not relevant to B2 backend. // StorageInfo is not relevant to B2 backend.
func (l *b2Objects) StorageInfo() (si StorageInfo) { func (l *b2Objects) StorageInfo() (si minio.StorageInfo) {
return si return si
} }
@ -216,7 +220,7 @@ func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error {
} }
func (l *b2Objects) reAuthorizeAccount() error { func (l *b2Objects) reAuthorizeAccount() error {
client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(newCustomHTTPTransport())) client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
if err != nil { if err != nil {
return err return err
} }
@ -260,29 +264,29 @@ func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) {
return bkt, nil return bkt, nil
} }
} }
return nil, errors.Trace(BucketNotFound{Bucket: bucket}) return nil, errors.Trace(minio.BucketNotFound{Bucket: bucket})
} }
// GetBucketInfo gets bucket metadata.. // GetBucketInfo gets bucket metadata..
func (l *b2Objects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { func (l *b2Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
if _, err = l.Bucket(bucket); err != nil { if _, err = l.Bucket(bucket); err != nil {
return bi, err return bi, err
} }
return BucketInfo{ return minio.BucketInfo{
Name: bucket, Name: bucket,
Created: time.Unix(0, 0), Created: time.Unix(0, 0),
}, nil }, nil
} }
// ListBuckets lists all B2 buckets // ListBuckets lists all B2 buckets
func (l *b2Objects) ListBuckets() ([]BucketInfo, error) { func (l *b2Objects) ListBuckets() ([]minio.BucketInfo, error) {
bktList, err := l.listBuckets(nil) bktList, err := l.listBuckets(nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var bktInfo []BucketInfo var bktInfo []minio.BucketInfo
for _, bkt := range bktList { for _, bkt := range bktList {
bktInfo = append(bktInfo, BucketInfo{ bktInfo = append(bktInfo, minio.BucketInfo{
Name: bkt.Name, Name: bkt.Name,
Created: time.Unix(0, 0), Created: time.Unix(0, 0),
}) })
@ -301,12 +305,11 @@ func (l *b2Objects) DeleteBucket(bucket string) error {
} }
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. // ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return loi, err return loi, err
} }
loi = ListObjectsInfo{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket) return loi, b2ToObjectError(errors.Trace(lerr), bucket)
@ -318,12 +321,12 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del
case "folder": case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name) loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload": case "upload":
loi.Objects = append(loi.Objects, ObjectInfo{ loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: file.Name, Name: file.Name,
ModTime: file.Timestamp, ModTime: file.Timestamp,
Size: file.Size, Size: file.Size,
ETag: toS3ETag(file.Info.ID), ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType, ContentType: file.Info.ContentType,
UserDefined: file.Info.Info, UserDefined: file.Info.Info,
}) })
@ -334,13 +337,12 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del
// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time. // ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time.
func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner, startAfter are not supported and unused. // fetchOwner, startAfter are not supported and unused.
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return loi, err return loi, err
} }
loi = ListObjectsV2Info{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket) return loi, b2ToObjectError(errors.Trace(lerr), bucket)
@ -353,12 +355,12 @@ func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter s
case "folder": case "folder":
loi.Prefixes = append(loi.Prefixes, file.Name) loi.Prefixes = append(loi.Prefixes, file.Name)
case "upload": case "upload":
loi.Objects = append(loi.Objects, ObjectInfo{ loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: file.Name, Name: file.Name,
ModTime: file.Timestamp, ModTime: file.Timestamp,
Size: file.Size, Size: file.Size,
ETag: toS3ETag(file.Info.ID), ETag: minio.ToS3ETag(file.Info.ID),
ContentType: file.Info.ContentType, ContentType: file.Info.ContentType,
UserDefined: file.Info.Info, UserDefined: file.Info.Info,
}) })
@ -388,7 +390,7 @@ func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, l
} }
// GetObjectInfo reads object info and replies back ObjectInfo // GetObjectInfo reads object info and replies back ObjectInfo
func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return objInfo, err return objInfo, err
@ -402,16 +404,15 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
objInfo = ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object, Name: object,
ETag: toS3ETag(fi.ID), ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size, Size: fi.Size,
ModTime: fi.Timestamp, ModTime: fi.Timestamp,
ContentType: fi.ContentType, ContentType: fi.ContentType,
UserDefined: fi.Info, UserDefined: fi.Info,
} }, nil
return objInfo, nil
} }
// In B2 - You must always include the X-Bz-Content-Sha1 header with // In B2 - You must always include the X-Bz-Content-Sha1 header with
@ -421,10 +422,8 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
// (3) the string do_not_verify. // (3) the string do_not_verify.
// For more reference - https://www.backblaze.com/b2/docs/uploading.html // For more reference - https://www.backblaze.com/b2/docs/uploading.html
// //
const ( // In our case we are going to use (2) option
sha1NoVerify = "do_not_verify" const sha1AtEOF = "hex_digits_at_end"
sha1AtEOF = "hex_digits_at_end"
)
// With the second option mentioned above, you append the 40-character hex sha1 // With the second option mentioned above, you append the 40-character hex sha1
// to the end of the request body, immediately after the contents of the file // to the end of the request body, immediately after the contents of the file
@ -437,20 +436,20 @@ const (
// Additionally this reader also verifies Hash encapsulated inside hash.Reader // Additionally this reader also verifies Hash encapsulated inside hash.Reader
// at io.EOF if the verification failed we return an error and do not send // at io.EOF if the verification failed we return an error and do not send
// the content to server. // the content to server.
func newB2Reader(r *h2.Reader, size int64) *B2Reader { func newB2Reader(r *h2.Reader, size int64) *Reader {
return &B2Reader{ return &Reader{
r: r, r: r,
size: size, size: size,
sha1Hash: sha1.New(), sha1Hash: sha1.New(),
} }
} }
// B2Reader - is a Reader wraps the hash.Reader which will emit out the sha1 // Reader - is a Reader wraps the hash.Reader which will emit out the sha1
// hex digits at io.EOF. It also means that your overall content size is // hex digits at io.EOF. It also means that your overall content size is
// now original size + 40 bytes. Additionally this reader also verifies // now original size + 40 bytes. Additionally this reader also verifies
// Hash encapsulated inside hash.Reader at io.EOF if the verification // Hash encapsulated inside hash.Reader at io.EOF if the verification
// failed we return an error and do not send the content to server. // failed we return an error and do not send the content to server.
type B2Reader struct { type Reader struct {
r *h2.Reader r *h2.Reader
size int64 size int64
sha1Hash hash.Hash sha1Hash hash.Hash
@ -460,8 +459,8 @@ type B2Reader struct {
} }
// Size - Returns the total size of Reader. // Size - Returns the total size of Reader.
func (nb *B2Reader) Size() int64 { return nb.size + 40 } func (nb *Reader) Size() int64 { return nb.size + 40 }
func (nb *B2Reader) Read(p []byte) (int, error) { func (nb *Reader) Read(p []byte) (int, error) {
if nb.isEOF { if nb.isEOF {
return nb.buf.Read(p) return nb.buf.Read(p)
} }
@ -480,8 +479,7 @@ func (nb *B2Reader) Read(p []byte) (int, error) {
} }
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB. // PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (ObjectInfo, error) { func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
var objInfo ObjectInfo
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return objInfo, err return objInfo, err
@ -508,10 +506,10 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
return ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: object, Name: object,
ETag: toS3ETag(fi.ID), ETag: minio.ToS3ETag(fi.ID),
Size: fi.Size, Size: fi.Size,
ModTime: fi.Timestamp, ModTime: fi.Timestamp,
ContentType: fi.ContentType, ContentType: fi.ContentType,
@ -521,8 +519,8 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string,
dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
return objInfo, errors.Trace(NotImplemented{}) return objInfo, errors.Trace(minio.NotImplemented{})
} }
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
@ -543,7 +541,7 @@ func (l *b2Objects) DeleteObject(bucket string, object string) error {
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
// keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any // keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any
// of these parameters only equivalent parameter is uploadIDMarker. // of these parameters only equivalent parameter is uploadIDMarker.
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
@ -559,7 +557,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker
if err != nil { if err != nil {
return lmi, b2ToObjectError(errors.Trace(err), bucket) return lmi, b2ToObjectError(errors.Trace(err), bucket)
} }
lmi = ListMultipartsInfo{ lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads, MaxUploads: maxUploads,
} }
if nextMarker != "" { if nextMarker != "" {
@ -567,7 +565,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker
lmi.NextUploadIDMarker = nextMarker lmi.NextUploadIDMarker = nextMarker
} }
for _, largeFile := range largeFiles { for _, largeFile := range largeFiles {
lmi.Uploads = append(lmi.Uploads, MultipartInfo{ lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{
Object: largeFile.Name, Object: largeFile.Name,
UploadID: largeFile.ID, UploadID: largeFile.ID,
Initiated: largeFile.Timestamp, Initiated: largeFile.Timestamp,
@ -599,7 +597,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
} }
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. // PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi PartInfo, err error) { func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi minio.PartInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return pi, err return pi, err
@ -616,21 +614,21 @@ func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string,
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
return PartInfo{ return minio.PartInfo{
PartNumber: partID, PartNumber: partID,
LastModified: UTCNow(), LastModified: minio.UTCNow(),
ETag: toS3ETag(sha1), ETag: minio.ToS3ETag(sha1),
Size: data.Size(), Size: data.Size(),
}, nil }, nil
} }
// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API. // ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return lpi, err return lpi, err
} }
lpi = ListPartsInfo{ lpi = minio.ListPartsInfo{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
UploadID: uploadID, UploadID: uploadID,
@ -648,9 +646,9 @@ func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID strin
lpi.NextPartNumberMarker = next lpi.NextPartNumberMarker = next
} }
for _, part := range partsList { for _, part := range partsList {
lpi.Parts = append(lpi.Parts, PartInfo{ lpi.Parts = append(lpi.Parts, minio.PartInfo{
PartNumber: part.Number, PartNumber: part.Number,
ETag: toS3ETag(part.SHA1), ETag: minio.ToS3ETag(part.SHA1),
Size: part.Size, Size: part.Size,
}) })
} }
@ -668,7 +666,7 @@ func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. // CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
return oi, err return oi, err
@ -678,7 +676,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
// B2 requires contigous part numbers starting with 1, they do not support // B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead. // hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber { if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(errors.Trace(InvalidPart{}), bucket, object, uploadID) return oi, b2ToObjectError(errors.Trace(minio.InvalidPart{}), bucket, object, uploadID)
} }
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag. // Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
@ -697,23 +695,23 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them. // bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
// Default is AllPrivate for all buckets. // Default is AllPrivate for all buckets.
func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
var policies []BucketAccessPolicy var policies []minio.BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{ policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix, Prefix: prefix,
Policy: policy, Policy: policy,
}) })
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
@ -739,7 +737,7 @@ func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console, // bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases. // just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default. // CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket}) return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
} }
// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'. // DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package b2
import ( import (
"fmt" "fmt"
@ -23,6 +23,8 @@ import (
b2 "github.com/minio/blazer/base" b2 "github.com/minio/blazer/base"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
) )
// Tests headerToObjectInfo // Tests headerToObjectInfo
@ -30,7 +32,7 @@ func TestHeaderToObjectInfo(t *testing.T) {
testCases := []struct { testCases := []struct {
bucket, object string bucket, object string
header http.Header header http.Header
objInfo ObjectInfo objInfo minio.ObjectInfo
}{ }{
{ {
bucket: "bucket", bucket: "bucket",
@ -42,7 +44,7 @@ func TestHeaderToObjectInfo(t *testing.T) {
"X-Bz-Info-X-Amz-Meta-1": []string{"test1"}, "X-Bz-Info-X-Amz-Meta-1": []string{"test1"},
"X-Bz-File-Id": []string{"xxxxx"}, "X-Bz-File-Id": []string{"xxxxx"},
}, },
objInfo: ObjectInfo{ objInfo: minio.ObjectInfo{
Bucket: "bucket", Bucket: "bucket",
Name: "object", Name: "object",
ContentType: "application/javascript", ContentType: "application/javascript",
@ -127,19 +129,23 @@ func TestB2ObjectError(t *testing.T) {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "duplicate_bucket_name", Code: "duplicate_bucket_name",
}), BucketAlreadyOwnedByYou{Bucket: "bucket"}, }), minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_request", Code: "bad_request",
}), BucketNotFound{Bucket: "bucket"}, }), minio.BucketNotFound{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_request", Code: "bad_request",
}), ObjectNameInvalid{ }), minio.ObjectNameInvalid{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -148,13 +154,13 @@ func TestB2ObjectError(t *testing.T) {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_bucket_id", Code: "bad_bucket_id",
}), BucketNotFound{Bucket: "bucket"}, }), minio.BucketNotFound{Bucket: "bucket"},
}, },
{ {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "file_not_present", Code: "file_not_present",
}), ObjectNotFound{ }), minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -163,7 +169,7 @@ func TestB2ObjectError(t *testing.T) {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "not_found", Code: "not_found",
}), ObjectNotFound{ }), minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -172,13 +178,15 @@ func TestB2ObjectError(t *testing.T) {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "cannot_delete_non_empty_bucket", Code: "cannot_delete_non_empty_bucket",
}), BucketNotEmpty{Bucket: "bucket"}, }), minio.BucketNotEmpty{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{ []string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{
StatusCode: 1, StatusCode: 1,
Message: "No active upload for", Message: "No active upload for",
}), InvalidUploadID{ }), minio.InvalidUploadID{
UploadID: "uploadID", UploadID: "uploadID",
}, },
}, },

26
cmd/gateway/gateway.go Normal file
View file

@ -0,0 +1,26 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gateway
import (
// Import all gateways.
_ "github.com/minio/minio/cmd/gateway/azure"
_ "github.com/minio/minio/cmd/gateway/b2"
_ "github.com/minio/minio/cmd/gateway/gcs"
_ "github.com/minio/minio/cmd/gateway/s3"
_ "github.com/minio/minio/cmd/gateway/sia"
)

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package gcs
import ( import (
"fmt" "fmt"
@ -24,6 +24,8 @@ import (
"time" "time"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
) )
func toGCSPublicURL(bucket, object string) string { func toGCSPublicURL(bucket, object string) string {
@ -50,7 +52,7 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
_, err = io.Copy(writer, resp.Body) _, err = io.Copy(writer, resp.Body)
@ -58,7 +60,7 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int
} }
// AnonGetObjectInfo - Get object info anonymously // AnonGetObjectInfo - Get object info anonymously
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, object)) resp, err := http.Head(toGCSPublicURL(bucket, object))
if err != nil { if err != nil {
return objInfo, gcsToObjectError(errors.Trace(err), bucket, object) return objInfo, gcsToObjectError(errors.Trace(err), bucket, object)
@ -66,7 +68,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return objInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return objInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
var contentLength int64 var contentLength int64
@ -74,7 +76,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
if contentLengthStr != "" { if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil { if err != nil {
return objInfo, gcsToObjectError(errors.Trace(errUnexpected), bucket, object) return objInfo, gcsToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
} }
} }
@ -98,28 +100,28 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
} }
// AnonListObjects - List objects anonymously // AnonListObjects - List objects anonymously
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return ListObjectsInfo{}, s3ToObjectError(errors.Trace(err), bucket) return minio.ListObjectsInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketResult(bucket, result), nil return minio.FromMinioClientListBucketResult(bucket, result), nil
} }
// AnonListObjectsV2 - List objects in V2 mode, anonymously // AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
// Request V1 List Object to the backend // Request V1 List Object to the backend
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil { if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(errors.Trace(err), bucket) return minio.ListObjectsV2Info{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} }
// translate V1 Result to V2Info // translate V1 Result to V2Info
return fromMinioClientListBucketResultToV2Info(bucket, result), nil return minio.FromMinioClientListBucketResultToV2Info(bucket, result), nil
} }
// AnonGetBucketInfo - Get bucket metadata anonymously. // AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, "")) resp, err := http.Head(toGCSPublicURL(bucket, ""))
if err != nil { if err != nil {
return bucketInfo, gcsToObjectError(errors.Trace(err)) return bucketInfo, gcsToObjectError(errors.Trace(err))
@ -128,7 +130,7 @@ func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, er
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return bucketInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) return bucketInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
@ -137,7 +139,7 @@ func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, er
} }
// Last-Modified date being returned by GCS // Last-Modified date being returned by GCS
return BucketInfo{ return minio.BucketInfo{
Name: bucket, Name: bucket,
Created: t, Created: t,
}, nil }, nil

View file

@ -14,13 +14,12 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package gcs
import ( import (
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -31,30 +30,37 @@ import (
"time" "time"
"cloud.google.com/go/storage" "cloud.google.com/go/storage"
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
"google.golang.org/api/option" "google.golang.org/api/option"
errors2 "github.com/minio/minio/pkg/errors" miniogo "github.com/minio/minio-go"
minio "github.com/minio/minio/cmd"
) )
var ( var (
// Project ID format is not valid. // Project ID format is not valid.
errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid")
// Project ID not found // Project ID not found
errGCSProjectIDNotFound = errors.New("unknown project id") errGCSProjectIDNotFound = fmt.Errorf("Unknown project id")
// Invalid format.
errGCSFormat = fmt.Errorf("Unknown format")
) )
const ( const (
// Path where multipart objects are saved. // Path where multipart objects are saved.
// If we change the backend format we will use a different url path like /multipart/v2 // If we change the backend format we will use a different url path like /multipart/v2
// but we will not migrate old data. // but we will not migrate old data.
gcsMinioMultipartPathV1 = globalMinioSysTmp + "multipart/v1" gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1"
// Multipart meta file. // Multipart meta file.
gcsMinioMultipartMeta = "gcs.json" gcsMinioMultipartMeta = "gcs.json"
@ -116,12 +122,11 @@ EXAMPLES:
` `
MustRegisterGatewayCommand(cli.Command{ minio.RegisterGatewayCommand(cli.Command{
Name: gcsBackend, Name: gcsBackend,
Usage: "Google Cloud Storage.", Usage: "Google Cloud Storage.",
Action: gcsGatewayMain, Action: gcsGatewayMain,
CustomHelpTemplate: gcsGatewayTemplate, CustomHelpTemplate: gcsGatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true, HideHelpCommand: true,
}) })
} }
@ -130,31 +135,71 @@ EXAMPLES:
func gcsGatewayMain(ctx *cli.Context) { func gcsGatewayMain(ctx *cli.Context) {
projectID := ctx.Args().First() projectID := ctx.Args().First()
if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
errorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json") minio.ErrorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json")
cli.ShowCommandHelpAndExit(ctx, "gcs", 1) cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
} }
if projectID != "" && !isValidGCSProjectIDFormat(projectID) { if projectID != "" && !isValidGCSProjectIDFormat(projectID) {
errorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First()) minio.ErrorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First())
cli.ShowCommandHelpAndExit(ctx, "gcs", 1) cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
} }
startGateway(ctx, &GCSGateway{projectID}) minio.StartGateway(ctx, &GCS{projectID})
} }
// GCSGateway implements Gateway. // GCS implements Azure.
type GCSGateway struct { type GCS struct {
projectID string projectID string
} }
// Name returns the name of gcs gatewaylayer. // Name returns the name of gcs gatewaylayer.
func (g *GCSGateway) Name() string { func (g *GCS) Name() string {
return gcsBackend return gcsBackend
} }
// NewGatewayLayer returns gcs gatewaylayer. // NewGatewayLayer returns gcs gatewaylayer.
func (g *GCSGateway) NewGatewayLayer() (GatewayLayer, error) { func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) ctx := context.Background()
return newGCSGatewayLayer(g.projectID)
var err error
if g.projectID == "" {
// If project ID is not provided on command line, we figure it out
// from the credentials.json file.
g.projectID, err = gcsParseProjectID(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))
if err != nil {
return nil, err
}
}
// Initialize a GCS client.
// Send user-agent in this format for Google to obtain usage insights while participating in the
// Google Cloud Technology Partners (https://cloud.google.com/partners/)
client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("Minio/%s (GPN:Minio;)", minio.Version)))
if err != nil {
return nil, err
}
// Initialize a anonymous client with minio core APIs.
anonClient, err := miniogo.NewCore(googleStorageEndpoint, "", "", true)
if err != nil {
return nil, err
}
anonClient.SetCustomTransport(minio.NewCustomHTTPTransport())
gcs := &gcsGateway{
client: client,
projectID: g.projectID,
ctx: ctx,
anonClient: anonClient,
}
// Start background process to cleanup old files in minio.sys.tmp
go gcs.CleanupGCSMinioSysTmp()
return gcs, nil
}
// Production - FIXME: GCS is not production ready yet.
func (g *GCS) Production() bool {
return false
} }
// Stored in gcs.json - Contents of this file is not used anywhere. It can be // Stored in gcs.json - Contents of this file is not used anywhere. It can be
@ -181,11 +226,11 @@ func gcsToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*errors2.Error) e, ok := err.(*errors.Error)
if !ok { if !ok {
// Code should be fixed if this function is called without doing errors2.Trace() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") minio.ErrorIf(err, "Expected type *Error")
return err return err
} }
@ -207,18 +252,18 @@ func gcsToObjectError(err error, params ...string) error {
// in some cases just a plain error is being returned // in some cases just a plain error is being returned
switch err.Error() { switch err.Error() {
case "storage: bucket doesn't exist": case "storage: bucket doesn't exist":
err = BucketNotFound{ err = minio.BucketNotFound{
Bucket: bucket, Bucket: bucket,
} }
e.Cause = err e.Cause = err
return e return e
case "storage: object doesn't exist": case "storage: object doesn't exist":
if uploadID != "" { if uploadID != "" {
err = InvalidUploadID{ err = minio.InvalidUploadID{
UploadID: uploadID, UploadID: uploadID,
} }
} else { } else {
err = ObjectNotFound{ err = minio.ObjectNotFound{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
} }
@ -250,33 +295,33 @@ func gcsToObjectError(err error, params ...string) error {
case "keyInvalid": case "keyInvalid":
fallthrough fallthrough
case "forbidden": case "forbidden":
err = PrefixAccessDenied{ err = minio.PrefixAccessDenied{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
} }
case "invalid": case "invalid":
err = BucketNameInvalid{ err = minio.BucketNameInvalid{
Bucket: bucket, Bucket: bucket,
} }
case "notFound": case "notFound":
if object != "" { if object != "" {
err = ObjectNotFound{ err = minio.ObjectNotFound{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
} }
break break
} }
err = BucketNotFound{Bucket: bucket} err = minio.BucketNotFound{Bucket: bucket}
case "conflict": case "conflict":
if message == "You already own this bucket. Please select another name." { if message == "You already own this bucket. Please select another name." {
err = BucketAlreadyOwnedByYou{Bucket: bucket} err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
break break
} }
if message == "Sorry, that name is not available. Please try a different one." { if message == "Sorry, that name is not available. Please try a different one." {
err = BucketAlreadyExists{Bucket: bucket} err = minio.BucketAlreadyExists{Bucket: bucket}
break break
} }
err = BucketNotEmpty{Bucket: bucket} err = minio.BucketNotEmpty{Bucket: bucket}
default: default:
err = fmt.Errorf("Unsupported error reason: %s", reason) err = fmt.Errorf("Unsupported error reason: %s", reason)
} }
@ -299,9 +344,9 @@ func isValidGCSProjectIDFormat(projectID string) bool {
// gcsGateway - Implements gateway for Minio and GCS compatible object storage servers. // gcsGateway - Implements gateway for Minio and GCS compatible object storage servers.
type gcsGateway struct { type gcsGateway struct {
gatewayUnsupported minio.GatewayUnsupported
client *storage.Client client *storage.Client
anonClient *minio.Core anonClient *miniogo.Core
projectID string projectID string
ctx context.Context ctx context.Context
} }
@ -321,54 +366,14 @@ func gcsParseProjectID(credsFile string) (projectID string, err error) {
return googleCreds[gcsProjectIDKey], err return googleCreds[gcsProjectIDKey], err
} }
// newGCSGatewayLayer returns gcs gatewaylayer
func newGCSGatewayLayer(projectID string) (GatewayLayer, error) {
ctx := context.Background()
var err error
if projectID == "" {
// If project ID is not provided on command line, we figure it out
// from the credentials.json file.
projectID, err = gcsParseProjectID(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))
if err != nil {
return nil, err
}
}
// Initialize a GCS client.
// Send user-agent in this format for Google to obtain usage insights while participating in the
// Google Cloud Technology Partners (https://cloud.google.com/partners/)
client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("Minio/%s (GPN:Minio;)", Version)))
if err != nil {
return nil, err
}
// Initialize a anonymous client with minio core APIs.
anonClient, err := minio.NewCore(googleStorageEndpoint, "", "", true)
if err != nil {
return nil, err
}
anonClient.SetCustomTransport(newCustomHTTPTransport())
gateway := &gcsGateway{
client: client,
projectID: projectID,
ctx: ctx,
anonClient: anonClient,
}
// Start background process to cleanup old files in minio.sys.tmp
go gateway.CleanupGCSMinioSysTmp()
return gateway, nil
}
// Cleanup old files in minio.sys.tmp of the given bucket. // Cleanup old files in minio.sys.tmp of the given bucket.
func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) { func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: globalMinioSysTmp, Versions: false}) it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false})
for { for {
attrs, err := it.Next() attrs, err := it.Next()
if err != nil { if err != nil {
if err != iterator.Done { if err != iterator.Done {
errorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket) minio.ErrorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket)
} }
return return
} }
@ -376,7 +381,7 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
// Delete files older than 2 weeks. // Delete files older than 2 weeks.
err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx) err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx)
if err != nil { if err != nil {
errorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name) minio.ErrorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name)
return return
} }
} }
@ -391,7 +396,7 @@ func (l *gcsGateway) CleanupGCSMinioSysTmp() {
attrs, err := it.Next() attrs, err := it.Next()
if err != nil { if err != nil {
if err != iterator.Done { if err != iterator.Done {
errorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp") minio.ErrorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp")
} }
break break
} }
@ -409,8 +414,8 @@ func (l *gcsGateway) Shutdown() error {
} }
// StorageInfo - Not relevant to GCS backend. // StorageInfo - Not relevant to GCS backend.
func (l *gcsGateway) StorageInfo() StorageInfo { func (l *gcsGateway) StorageInfo() minio.StorageInfo {
return StorageInfo{} return minio.StorageInfo{}
} }
// MakeBucketWithLocation - Create a new container on GCS backend. // MakeBucketWithLocation - Create a new container on GCS backend.
@ -426,24 +431,24 @@ func (l *gcsGateway) MakeBucketWithLocation(bucket, location string) error {
Location: location, Location: location,
}) })
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) { func (l *gcsGateway) GetBucketInfo(bucket string) (minio.BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil { if err != nil {
return BucketInfo{}, gcsToObjectError(errors2.Trace(err), bucket) return minio.BucketInfo{}, gcsToObjectError(errors.Trace(err), bucket)
} }
return BucketInfo{ return minio.BucketInfo{
Name: attrs.Name, Name: attrs.Name,
Created: attrs.Created, Created: attrs.Created,
}, nil }, nil
} }
// ListBuckets lists all buckets under your project-id on GCS. // ListBuckets lists all buckets under your project-id on GCS.
func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) { func (l *gcsGateway) ListBuckets() (buckets []minio.BucketInfo, err error) {
it := l.client.Buckets(l.ctx, l.projectID) it := l.client.Buckets(l.ctx, l.projectID)
// Iterate and capture all the buckets. // Iterate and capture all the buckets.
@ -454,10 +459,10 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) {
} }
if ierr != nil { if ierr != nil {
return buckets, gcsToObjectError(errors2.Trace(ierr)) return buckets, gcsToObjectError(errors.Trace(ierr))
} }
buckets = append(buckets, BucketInfo{ buckets = append(buckets, minio.BucketInfo{
Name: attrs.Name, Name: attrs.Name,
Created: attrs.Created, Created: attrs.Created,
}) })
@ -468,7 +473,10 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) {
// DeleteBucket delete a bucket on GCS. // DeleteBucket delete a bucket on GCS.
func (l *gcsGateway) DeleteBucket(bucket string) error { func (l *gcsGateway) DeleteBucket(bucket string) error {
itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Delimiter: slashSeparator, Versions: false}) itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: "/",
Versions: false,
})
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we // We list the bucket and if we find any objects we return BucketNotEmpty error. If we
// find only "minio.sys.tmp/" then we remove it before deleting the bucket. // find only "minio.sys.tmp/" then we remove it before deleting the bucket.
gcsMinioPathFound := false gcsMinioPathFound := false
@ -479,9 +487,9 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err)) return gcsToObjectError(errors.Trace(err))
} }
if objAttrs.Prefix == globalMinioSysTmp { if objAttrs.Prefix == minio.GatewayMinioSysTmp {
gcsMinioPathFound = true gcsMinioPathFound = true
continue continue
} }
@ -489,27 +497,27 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break break
} }
if nonGCSMinioPathFound { if nonGCSMinioPathFound {
return gcsToObjectError(errors2.Trace(BucketNotEmpty{})) return gcsToObjectError(errors.Trace(minio.BucketNotEmpty{}))
} }
if gcsMinioPathFound { if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket. // Remove minio.sys.tmp before deleting the bucket.
itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: globalMinioSysTmp}) itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp})
for { for {
objAttrs, err := itObject.Next() objAttrs, err := itObject.Next()
if err == iterator.Done { if err == iterator.Done {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err)) return gcsToObjectError(errors.Trace(err))
} }
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx) err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err)) return gcsToObjectError(errors.Trace(err))
} }
} }
} }
err := l.client.Bucket(bucket).Delete(l.ctx) err := l.client.Bucket(bucket).Delete(l.ctx)
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
func toGCSPageToken(name string) string { func toGCSPageToken(name string) string {
@ -531,13 +539,13 @@ func toGCSPageToken(name string) string {
} }
// Returns true if marker was returned by GCS, i.e prefixed with // Returns true if marker was returned by GCS, i.e prefixed with
// ##minio by minio gcs gateway. // ##minio by minio gcs minio.
func isGCSMarker(marker string) bool { func isGCSMarker(marker string) bool {
return strings.HasPrefix(marker, gcsTokenPrefix) return strings.HasPrefix(marker, gcsTokenPrefix)
} }
// ListObjects - lists all blobs in GCS bucket filtered by prefix // ListObjects - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: delimiter, Delimiter: delimiter,
Prefix: prefix, Prefix: prefix,
@ -570,7 +578,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
it.PageInfo().MaxSize = maxKeys it.PageInfo().MaxSize = maxKeys
objects := []ObjectInfo{} objects := []minio.ObjectInfo{}
for { for {
if len(objects) >= maxKeys { if len(objects) >= maxKeys {
// check if there is one next object and // check if there is one next object and
@ -578,7 +586,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
// metadata folder, then just break // metadata folder, then just break
// otherwise we've truncated the output // otherwise we've truncated the output
attrs, _ := it.Next() attrs, _ := it.Next()
if attrs != nil && attrs.Prefix == globalMinioSysTmp { if attrs != nil && attrs.Prefix == minio.GatewayMinioSysTmp {
break break
} }
@ -591,21 +599,21 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
break break
} }
if err != nil { if err != nil {
return ListObjectsInfo{}, gcsToObjectError(errors2.Trace(err), bucket, prefix) return minio.ListObjectsInfo{}, gcsToObjectError(errors.Trace(err), bucket, prefix)
} }
nextMarker = toGCSPageToken(attrs.Name) nextMarker = toGCSPageToken(attrs.Name)
if attrs.Prefix == globalMinioSysTmp { if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix. // We don't return our metadata prefix.
continue continue
} }
if !strings.HasPrefix(prefix, globalMinioSysTmp) { if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes. // which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, globalMinioSysTmp) || if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, globalMinioSysTmp) { strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue continue
} }
} }
@ -619,19 +627,19 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
continue continue
} }
objects = append(objects, ObjectInfo{ objects = append(objects, minio.ObjectInfo{
Name: attrs.Name, Name: attrs.Name,
Bucket: attrs.Bucket, Bucket: attrs.Bucket,
ModTime: attrs.Updated, ModTime: attrs.Updated,
Size: attrs.Size, Size: attrs.Size,
ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), ETag: minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)),
UserDefined: attrs.Metadata, UserDefined: attrs.Metadata,
ContentType: attrs.ContentType, ContentType: attrs.ContentType,
ContentEncoding: attrs.ContentEncoding, ContentEncoding: attrs.ContentEncoding,
}) })
} }
return ListObjectsInfo{ return minio.ListObjectsInfo{
IsTruncated: isTruncated, IsTruncated: isTruncated,
NextMarker: gcsTokenPrefix + nextMarker, NextMarker: gcsTokenPrefix + nextMarker,
Prefixes: prefixes, Prefixes: prefixes,
@ -640,8 +648,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
} }
// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix // ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: delimiter, Delimiter: delimiter,
Prefix: prefix, Prefix: prefix,
@ -664,8 +671,8 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter
} }
} }
prefixes := []string{} var prefixes []string
objects := []ObjectInfo{} var objects []minio.ObjectInfo
for { for {
attrs, err := it.Next() attrs, err := it.Next()
@ -674,19 +681,19 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter
} }
if err != nil { if err != nil {
return ListObjectsV2Info{}, gcsToObjectError(errors2.Trace(err), bucket, prefix) return minio.ListObjectsV2Info{}, gcsToObjectError(errors.Trace(err), bucket, prefix)
} }
if attrs.Prefix == globalMinioSysTmp { if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix. // We don't return our metadata prefix.
continue continue
} }
if !strings.HasPrefix(prefix, globalMinioSysTmp) { if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes. // which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, globalMinioSysTmp) || if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, globalMinioSysTmp) { strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue continue
} }
} }
@ -699,7 +706,7 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter
objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) objects = append(objects, fromGCSAttrsToObjectInfo(attrs))
} }
return ListObjectsV2Info{ return minio.ListObjectsV2Info{
IsTruncated: isTruncated, IsTruncated: isTruncated,
ContinuationToken: continuationToken, ContinuationToken: continuationToken,
NextContinuationToken: continuationToken, NextContinuationToken: continuationToken,
@ -718,55 +725,33 @@ func (l *gcsGateway) GetObject(bucket string, key string, startOffset int64, len
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
r, err := object.NewRangeReader(l.ctx, startOffset, length) r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err), bucket, key) return gcsToObjectError(errors.Trace(err), bucket, key)
} }
defer r.Close() defer r.Close()
if _, err := io.Copy(writer, r); err != nil { if _, err := io.Copy(writer, r); err != nil {
return gcsToObjectError(errors2.Trace(err), bucket, key) return gcsToObjectError(errors.Trace(err), bucket, key)
} }
return nil return nil
} }
// fromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info
func fromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.Marker,
NextContinuationToken: result.NextMarker,
}
}
// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo // fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo
func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo { func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo {
// All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash
// Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag
return ObjectInfo{ return minio.ObjectInfo{
Name: attrs.Name, Name: attrs.Name,
Bucket: attrs.Bucket, Bucket: attrs.Bucket,
ModTime: attrs.Updated, ModTime: attrs.Updated,
Size: attrs.Size, Size: attrs.Size,
ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)), ETag: minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)),
UserDefined: attrs.Metadata, UserDefined: attrs.Metadata,
ContentType: attrs.ContentType, ContentType: attrs.ContentType,
ContentEncoding: attrs.ContentEncoding, ContentEncoding: attrs.ContentEncoding,
@ -774,27 +759,27 @@ func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo {
} }
// GetObjectInfo - reads object info and replies back ObjectInfo // GetObjectInfo - reads object info and replies back ObjectInfo
func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, error) { func (l *gcsGateway) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket)
} }
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, object) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, object)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
} }
// PutObject - Create a new object with the incoming data, // PutObject - Create a new object with the incoming data,
func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error) { func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, metadata map[string]string) (minio.ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
@ -808,7 +793,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error. // Close the object writer upon error.
w.CloseWithError(err) w.CloseWithError(err)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
// Close the object writer upon success. // Close the object writer upon success.
@ -816,7 +801,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
attrs, err := object.Attrs(l.ctx) attrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -824,7 +809,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
// CopyObject - Copies a blob from source container to destination container. // CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string,
metadata map[string]string) (ObjectInfo, error) { metadata map[string]string) (minio.ObjectInfo, error) {
src := l.client.Bucket(srcBucket).Object(srcObject) src := l.client.Bucket(srcBucket).Object(srcObject)
dst := l.client.Bucket(destBucket).Object(destObject) dst := l.client.Bucket(destBucket).Object(destObject)
@ -834,7 +819,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
attrs, err := copier.Run(l.ctx) attrs, err := copier.Run(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), destBucket, destObject) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), destBucket, destObject)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -844,7 +829,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
func (l *gcsGateway) DeleteObject(bucket string, object string) error { func (l *gcsGateway) DeleteObject(bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx) err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err), bucket, object) return gcsToObjectError(errors.Trace(err), bucket, object)
} }
return nil return nil
@ -853,7 +838,7 @@ func (l *gcsGateway) DeleteObject(bucket string, object string) error {
// NewMultipartUpload - upload object in multiple parts // NewMultipartUpload - upload object in multiple parts
func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[string]string) (uploadID string, err error) { func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[string]string) (uploadID string, err error) {
// generate new uploadid // generate new uploadid
uploadID = mustGetUUID() uploadID = minio.MustGetUUID()
// generate name for part zero // generate name for part zero
meta := gcsMultipartMetaName(uploadID) meta := gcsMultipartMetaName(uploadID)
@ -870,14 +855,14 @@ func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[
bucket, bucket,
key, key,
}); err != nil { }); err != nil {
return "", gcsToObjectError(errors2.Trace(err), bucket, key) return "", gcsToObjectError(errors.Trace(err), bucket, key)
} }
return uploadID, nil return uploadID, nil
} }
// ListMultipartUploads - lists all multipart uploads. // ListMultipartUploads - lists all multipart uploads.
func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) {
return ListMultipartsInfo{ return minio.ListMultipartsInfo{
KeyMarker: keyMarker, KeyMarker: keyMarker,
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
MaxUploads: maxUploads, MaxUploads: maxUploads,
@ -890,18 +875,18 @@ func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarke
// an object layer compatible error upon any error. // an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error { func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx) _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
return gcsToObjectError(errors2.Trace(err), bucket, key, uploadID) return gcsToObjectError(errors.Trace(err), bucket, key, uploadID)
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (PartInfo, error) { func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (minio.PartInfo, error) {
if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil { if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil {
return PartInfo{}, err return minio.PartInfo{}, err
} }
etag := data.MD5HexString() etag := data.MD5HexString()
if etag == "" { if etag == "" {
// Generate random ETag. // Generate random ETag.
etag = genETag() etag = minio.GenETag()
} }
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
w := object.NewWriter(l.ctx) w := object.NewWriter(l.ctx)
@ -911,22 +896,22 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error. // Make sure to close object writer upon error.
w.Close() w.Close()
return PartInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.PartInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
// Make sure to close the object writer upon success. // Make sure to close the object writer upon success.
w.Close() w.Close()
return PartInfo{ return minio.PartInfo{
PartNumber: partNumber, PartNumber: partNumber,
ETag: etag, ETag: etag,
LastModified: UTCNow(), LastModified: minio.UTCNow(),
Size: data.Size(), Size: data.Size(),
}, nil }, nil
} }
// ListObjectParts returns all object parts for specified object in specified bucket // ListObjectParts returns all object parts for specified object in specified bucket
func (l *gcsGateway) ListObjectParts(bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { func (l *gcsGateway) ListObjectParts(bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (minio.ListPartsInfo, error) {
return ListPartsInfo{}, l.checkUploadIDExists(bucket, key, uploadID) return minio.ListPartsInfo{}, l.checkUploadIDExists(bucket, key, uploadID)
} }
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. // Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
@ -942,7 +927,7 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors2.Trace(err), bucket, key) return gcsToObjectError(errors.Trace(err), bucket, key)
} }
object := l.client.Bucket(bucket).Object(attrs.Name) object := l.client.Bucket(bucket).Object(attrs.Name)
@ -969,34 +954,34 @@ func (l *gcsGateway) AbortMultipartUpload(bucket string, key string, uploadID st
// to the number of components you can compose per second. This rate counts both the // to the number of components you can compose per second. This rate counts both the
// components being appended to a composite object as well as the components being // components being appended to a composite object as well as the components being
// copied when the composite object of which they are a part is copied. // copied when the composite object of which they are a part is copied.
func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID string, uploadedParts []CompletePart) (ObjectInfo, error) { func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID string, uploadedParts []minio.CompletePart) (minio.ObjectInfo, error) {
meta := gcsMultipartMetaName(uploadID) meta := gcsMultipartMetaName(uploadID)
object := l.client.Bucket(bucket).Object(meta) object := l.client.Bucket(bucket).Object(meta)
partZeroAttrs, err := object.Attrs(l.ctx) partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key, uploadID) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key, uploadID)
} }
r, err := object.NewReader(l.ctx) r, err := object.NewReader(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
defer r.Close() defer r.Close()
// Check version compatibility of the meta file before compose() // Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{} multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(errFormatNotSupported), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(errGCSFormat), bucket, key)
} }
// Validate if the gcs.json stores valid entries for the bucket and key. // Validate if the gcs.json stores valid entries for the bucket and key.
if multipartMeta.Bucket != bucket || multipartMeta.Object != key { if multipartMeta.Bucket != bucket || multipartMeta.Object != key {
return ObjectInfo{}, gcsToObjectError(InvalidUploadID{ return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{
UploadID: uploadID, UploadID: uploadID,
}, bucket, key) }, bucket, key)
} }
@ -1008,15 +993,15 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
uploadedPart.PartNumber, uploadedPart.ETag))) uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx) partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil { if pErr != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(pErr), bucket, key, uploadID) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(pErr), bucket, key, uploadID)
} }
partSizes[i] = partAttr.Size partSizes[i] = partAttr.Size
} }
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize { if size < 5*humanize.MiByte {
return ObjectInfo{}, errors2.Trace(PartTooSmall{ return minio.ObjectInfo{}, errors.Trace(minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
@ -1026,7 +1011,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
// Returns name of the composed object. // Returns name of the composed object.
gcsMultipartComposeName := func(uploadID string, composeNumber int) string { gcsMultipartComposeName := func(uploadID string, composeNumber int) string {
return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", globalMinioSysTmp, uploadID, composeNumber) return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber)
} }
composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents)))
@ -1047,7 +1032,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil { if _, err = composer.Run(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
} }
@ -1060,20 +1045,20 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx) attrs, err := composer.Run(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil { if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil {
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
} }
// SetBucketPolicies - Set policy on bucket // SetBucketPolicies - Set policy on bucket
func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
var policies []BucketAccessPolicy var policies []minio.BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{ policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix, Prefix: prefix,
Policy: policy, Policy: policy,
}) })
@ -1082,16 +1067,16 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors2.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors2.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
acl := l.client.Bucket(bucket).ACL() acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == policy.BucketPolicyNone { if policies[0].Policy == policy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
return nil return nil
} }
@ -1103,11 +1088,11 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
case policy.BucketPolicyWriteOnly: case policy.BucketPolicyWriteOnly:
role = storage.RoleWriter role = storage.RoleWriter
default: default:
return errors2.Trace(NotImplemented{}) return errors.Trace(minio.NotImplemented{})
} }
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil { if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
return nil return nil
@ -1117,7 +1102,7 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(err), bucket) return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(err), bucket)
} }
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, r := range rules { for _, r := range rules {
@ -1133,7 +1118,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
} }
// Return NoSuchBucketPolicy error, when policy is not set // Return NoSuchBucketPolicy error, when policy is not set
if len(policyInfo.Statements) == 0 { if len(policyInfo.Statements) == 0 {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(PolicyNotFound{}), bucket) return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket)
} }
return policyInfo, nil return policyInfo, nil
} }
@ -1142,7 +1127,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
func (l *gcsGateway) DeleteBucketPolicies(bucket string) error { func (l *gcsGateway) DeleteBucketPolicies(bucket string) error {
// This only removes the storage.AllUsers policies // This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil { if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors2.Trace(err), bucket) return gcsToObjectError(errors.Trace(err), bucket)
} }
return nil return nil

View file

@ -14,18 +14,21 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package gcs
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"reflect" "reflect"
"testing" "testing"
"github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
miniogo "github.com/minio/minio-go"
minio "github.com/minio/minio/cmd"
) )
func TestToGCSPageToken(t *testing.T) { func TestToGCSPageToken(t *testing.T) {
@ -140,7 +143,7 @@ func TestIsGCSMarker(t *testing.T) {
// Test for gcsMultipartMetaName. // Test for gcsMultipartMetaName.
func TestGCSMultipartMetaName(t *testing.T) { func TestGCSMultipartMetaName(t *testing.T) {
uploadID := "a" uploadID := "a"
expected := pathJoin(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) expected := path.Join(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta)
got := gcsMultipartMetaName(uploadID) got := gcsMultipartMetaName(uploadID)
if expected != got { if expected != got {
t.Errorf("expected: %s, got: %s", expected, got) t.Errorf("expected: %s, got: %s", expected, got)
@ -154,7 +157,7 @@ func TestGCSMultipartDataName(t *testing.T) {
etag = "b" etag = "b"
partNumber = 1 partNumber = 1
) )
expected := pathJoin(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) expected := path.Join(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag))
got := gcsMultipartDataName(uploadID, partNumber, etag) got := gcsMultipartDataName(uploadID, partNumber, etag)
if expected != got { if expected != got {
t.Errorf("expected: %s, got: %s", expected, got) t.Errorf("expected: %s, got: %s", expected, got)
@ -163,23 +166,23 @@ func TestGCSMultipartDataName(t *testing.T) {
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
listBucketResult := minio.ListBucketResult{ listBucketResult := miniogo.ListBucketResult{
IsTruncated: false, IsTruncated: false,
Marker: "testMarker", Marker: "testMarker",
NextMarker: "testMarker2", NextMarker: "testMarker2",
CommonPrefixes: []minio.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, CommonPrefixes: []miniogo.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}},
Contents: []minio.ObjectInfo{{Key: "testobj", ContentType: ""}}, Contents: []miniogo.ObjectInfo{{Key: "testobj", ContentType: ""}},
} }
listBucketV2Info := ListObjectsV2Info{ listBucketV2Info := minio.ListObjectsV2Info{
Prefixes: []string{"one", "two"}, Prefixes: []string{"one", "two"},
Objects: []ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, Objects: []minio.ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}},
IsTruncated: false, IsTruncated: false,
ContinuationToken: "testMarker", ContinuationToken: "testMarker",
NextContinuationToken: "testMarker2", NextContinuationToken: "testMarker2",
} }
if got := fromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { if got := minio.FromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) {
t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info) t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info)
} }
} }
@ -242,14 +245,14 @@ func TestGCSToObjectError(t *testing.T) {
{ {
[]string{"bucket"}, []string{"bucket"},
errors.Trace(fmt.Errorf("storage: bucket doesn't exist")), errors.Trace(fmt.Errorf("storage: bucket doesn't exist")),
BucketNotFound{ minio.BucketNotFound{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")), errors.Trace(fmt.Errorf("storage: object doesn't exist")),
ObjectNotFound{ minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -257,7 +260,7 @@ func TestGCSToObjectError(t *testing.T) {
{ {
[]string{"bucket", "object", "uploadID"}, []string{"bucket", "object", "uploadID"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")), errors.Trace(fmt.Errorf("storage: object doesn't exist")),
InvalidUploadID{ minio.InvalidUploadID{
UploadID: "uploadID", UploadID: "uploadID",
}, },
}, },
@ -283,7 +286,9 @@ func TestGCSToObjectError(t *testing.T) {
Message: "You already own this bucket. Please select another name.", Message: "You already own this bucket. Please select another name.",
}}, }},
}), }),
BucketAlreadyOwnedByYou{Bucket: "bucket"}, minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
@ -293,7 +298,9 @@ func TestGCSToObjectError(t *testing.T) {
Message: "Sorry, that name is not available. Please try a different one.", Message: "Sorry, that name is not available. Please try a different one.",
}}, }},
}), }),
BucketAlreadyExists{Bucket: "bucket"}, minio.BucketAlreadyExists{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
@ -302,7 +309,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "conflict", Reason: "conflict",
}}, }},
}), }),
BucketNotEmpty{Bucket: "bucket"}, minio.BucketNotEmpty{Bucket: "bucket"},
}, },
{ {
[]string{"bucket"}, []string{"bucket"},
@ -311,7 +318,9 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "notFound", Reason: "notFound",
}}, }},
}), }),
BucketNotFound{Bucket: "bucket"}, minio.BucketNotFound{
Bucket: "bucket",
},
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
@ -320,7 +329,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "notFound", Reason: "notFound",
}}, }},
}), }),
ObjectNotFound{ minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -332,7 +341,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "invalid", Reason: "invalid",
}}, }},
}), }),
BucketNameInvalid{ minio.BucketNameInvalid{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
@ -343,7 +352,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "forbidden", Reason: "forbidden",
}}, }},
}), }),
PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -355,7 +364,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "keyInvalid", Reason: "keyInvalid",
}}, }},
}), }),
PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
@ -367,7 +376,7 @@ func TestGCSToObjectError(t *testing.T) {
Reason: "required", Reason: "required",
}}, }},
}), }),
PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },

View file

@ -14,87 +14,89 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package s3
import ( import (
"io" "io"
minio "github.com/minio/minio-go" miniogo "github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
) )
// AnonPutObject creates a new object anonymously with the incoming data, // AnonPutObject creates a new object anonymously with the incoming data,
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) { func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, e error) {
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata)) oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return minio.FromMinioClientObjectInfo(bucket, oi), nil
} }
// AnonGetObject - Get object anonymously // AnonGetObject - Get object anonymously
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
opts := minio.GetObjectOptions{} opts := miniogo.GetObjectOptions{}
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key) return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
} }
object, _, err := l.anonClient.GetObject(bucket, key, opts) object, _, err := l.anonClient.GetObject(bucket, key, opts)
if err != nil { if err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key) return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
} }
defer object.Close() defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil { if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(errors.Trace(err), bucket, key) return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
} }
return nil return nil
} }
// AnonGetObjectInfo - Get object info anonymously // AnonGetObjectInfo - Get object info anonymously
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) { func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, e error) {
oi, err := l.anonClient.StatObject(bucket, object, minio.StatObjectOptions{}) oi, err := l.anonClient.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil { if err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return minio.FromMinioClientObjectInfo(bucket, oi), nil
} }
// AnonListObjects - List objects anonymously // AnonListObjects - List objects anonymously
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(errors.Trace(err), bucket) return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketResult(bucket, result), nil return minio.FromMinioClientListBucketResult(bucket, result), nil
} }
// AnonListObjectsV2 - List objects in V2 mode, anonymously // AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(errors.Trace(err), bucket) return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketV2Result(bucket, result), nil return minio.FromMinioClientListBucketV2Result(bucket, result), nil
} }
// AnonGetBucketInfo - Get bucket metadata anonymously. // AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) { func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil { if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return bi, s3ToObjectError(errors.Trace(err), bucket) return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} else if !exists { } else if !exists {
return bi, errors.Trace(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
} }
buckets, err := l.anonClient.ListBuckets() buckets, err := l.anonClient.ListBuckets()
if err != nil { if err != nil {
return bi, s3ToObjectError(errors.Trace(err), bucket) return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
} }
for _, bi := range buckets { for _, bi := range buckets {
@ -102,11 +104,11 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
continue continue
} }
return BucketInfo{ return minio.BucketInfo{
Name: bi.Name, Name: bi.Name,
Created: bi.CreationDate, Created: bi.CreationDate,
}, nil }, nil
} }
return bi, errors.Trace(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
} }

View file

@ -0,0 +1,416 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3
import (
"io"
"github.com/minio/cli"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
)
const (
s3Backend = "s3"
)
func init() {
const s3GatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENDPOINT:
S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of S3 storage.
MINIO_SECRET_KEY: Password or secret key of S3 storage.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
EXAMPLES:
1. Start minio gateway server for AWS S3 backend.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}}
2. Start minio gateway server for S3 backend on custom endpoint.
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} https://play.minio.io:9000
`
minio.RegisterGatewayCommand(cli.Command{
Name: s3Backend,
Usage: "Amazon Simple Storage Service (S3).",
Action: s3GatewayMain,
CustomHelpTemplate: s3GatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway s3' command line.
func s3GatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &S3{host})
}
// S3 implements Gateway.
type S3 struct {
host string
}
// Name implements Gateway interface.
func (g *S3) Name() string {
return s3Backend
}
// NewGatewayLayer returns s3 gatewaylayer.
func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
var err error
var endpoint string
var secure = true
// Validate host parameters.
if g.host != "" {
// Override default params if the host is provided
endpoint, secure, err = minio.ParseGatewayEndpoint(g.host)
if err != nil {
return nil, err
}
}
// Default endpoint parameters
if endpoint == "" {
endpoint = "s3.amazonaws.com"
}
// Initialize minio client object.
client, err := miniogo.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure)
if err != nil {
return nil, err
}
anonClient, err := miniogo.NewCore(endpoint, "", "", secure)
if err != nil {
return nil, err
}
anonClient.SetCustomTransport(minio.NewCustomHTTPTransport())
return &s3Objects{
Client: client,
anonClient: anonClient,
}, nil
}
// Production - s3 gateway is not production ready.
func (g *S3) Production() bool {
return false
}
// s3Objects implements gateway for Minio and S3 compatible object storage servers.
type s3Objects struct {
minio.GatewayUnsupported
Client *miniogo.Core
anonClient *miniogo.Core
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *s3Objects) Shutdown() error {
return nil
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo() (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on S3 backend.
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return err
}
// GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
// Verify if bucket name is valid.
// We are using a separate helper function here to validate bucket
// names instead of IsValidBucketName() because there is a possibility
// that certains users might have buckets which are non-DNS compliant
// in us-east-1 and we might severely restrict them by not allowing
// access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
}
buckets, err := l.Client.ListBuckets()
if err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return minio.BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets() ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, minio.ErrorRespToObjectError(errors.Trace(err))
}
b := make([]minio.BucketInfo, len(buckets))
for i, bi := range buckets {
b[i] = minio.BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}
}
return b, err
}
// DeleteBucket deletes a bucket on S3
func (l *s3Objects) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return nil
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
}
return minio.FromMinioClientListBucketV2Result(bucket, result), nil
}
// GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
if length < 0 && length != -1 {
return minio.ErrorRespToObjectError(errors.Trace(minio.InvalidRange{}), bucket, key)
}
opts := miniogo.GetObjectOptions{}
if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
}
object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
}
return nil
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil {
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies an object from source bucket to a destination bucket.
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
// Set this header such that following CopyObject() always sets the right metadata on the destination.
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
metadata["x-amz-metadata-directive"] = "REPLACE"
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
}
return l.GetObjectInfo(dstBucket, dstObject)
}
// DeleteObject deletes a blob in bucket
func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return nil
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return lmi, err
}
return minio.FromMinioClientListMultipartsInfo(result), nil
}
// NewMultipartUpload upload object in multiple parts
func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
// Create PutObject options
opts := miniogo.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil {
return uploadID, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return uploadID, nil
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString())
if err != nil {
return pi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return minio.FromMinioClientObjectPart(info), nil
}
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p minio.PartInfo, err error) {
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, metadata)
if err != nil {
return p, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag
return p, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi minio.ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return lpi, err
}
return minio.FromMinioClientListPartsInfo(result), nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
if err != nil {
return oi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies sets policy on bucket
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return nil
}
// GetBucketPolicies will get policy on bucket
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return policyInfo, nil
}
// DeleteBucketPolicies deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
}
return nil
}

View file

@ -14,19 +14,21 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package s3
import ( import (
"errors" "fmt"
"testing" "testing"
minio "github.com/minio/minio-go" miniogo "github.com/minio/minio-go"
errors2 "github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
) )
func errResponse(code string) minio.ErrorResponse { func errResponse(code string) miniogo.ErrorResponse {
return minio.ErrorResponse{ return miniogo.ErrorResponse{
Code: code, Code: code,
} }
} }
@ -39,41 +41,41 @@ func TestS3ToObjectError(t *testing.T) {
}{ }{
{ {
inputErr: errResponse("BucketAlreadyOwnedByYou"), inputErr: errResponse("BucketAlreadyOwnedByYou"),
expectedErr: BucketAlreadyOwnedByYou{}, expectedErr: minio.BucketAlreadyOwnedByYou{},
}, },
{ {
inputErr: errResponse("BucketNotEmpty"), inputErr: errResponse("BucketNotEmpty"),
expectedErr: BucketNotEmpty{}, expectedErr: minio.BucketNotEmpty{},
}, },
{ {
inputErr: errResponse("InvalidBucketName"), inputErr: errResponse("InvalidBucketName"),
expectedErr: BucketNameInvalid{}, expectedErr: minio.BucketNameInvalid{},
}, },
{ {
inputErr: errResponse("NoSuchBucketPolicy"), inputErr: errResponse("NoSuchBucketPolicy"),
expectedErr: PolicyNotFound{}, expectedErr: minio.PolicyNotFound{},
}, },
{ {
inputErr: errResponse("NoSuchBucket"), inputErr: errResponse("NoSuchBucket"),
expectedErr: BucketNotFound{}, expectedErr: minio.BucketNotFound{},
}, },
// with empty Object in minio.ErrorRepsonse, NoSuchKey // with empty Object in miniogo.ErrorRepsonse, NoSuchKey
// is interpreted as BucketNotFound // is interpreted as BucketNotFound
{ {
inputErr: errResponse("NoSuchKey"), inputErr: errResponse("NoSuchKey"),
expectedErr: BucketNotFound{}, expectedErr: minio.BucketNotFound{},
}, },
{ {
inputErr: errResponse("NoSuchUpload"), inputErr: errResponse("NoSuchUpload"),
expectedErr: InvalidUploadID{}, expectedErr: minio.InvalidUploadID{},
}, },
{ {
inputErr: errResponse("XMinioInvalidObjectName"), inputErr: errResponse("XMinioInvalidObjectName"),
expectedErr: ObjectNameInvalid{}, expectedErr: minio.ObjectNameInvalid{},
}, },
{ {
inputErr: errResponse("AccessDenied"), inputErr: errResponse("AccessDenied"),
expectedErr: PrefixAccessDenied{}, expectedErr: minio.PrefixAccessDenied{},
}, },
{ {
inputErr: errResponse("XAmzContentSHA256Mismatch"), inputErr: errResponse("XAmzContentSHA256Mismatch"),
@ -81,7 +83,7 @@ func TestS3ToObjectError(t *testing.T) {
}, },
{ {
inputErr: errResponse("EntityTooSmall"), inputErr: errResponse("EntityTooSmall"),
expectedErr: PartTooSmall{}, expectedErr: minio.PartTooSmall{},
}, },
{ {
inputErr: nil, inputErr: nil,
@ -89,34 +91,37 @@ func TestS3ToObjectError(t *testing.T) {
}, },
// Special test case for NoSuchKey with object name // Special test case for NoSuchKey with object name
{ {
inputErr: minio.ErrorResponse{ inputErr: miniogo.ErrorResponse{
Code: "NoSuchKey", Code: "NoSuchKey",
}, },
expectedErr: ObjectNotFound{}, expectedErr: minio.ObjectNotFound{
bucket: "bucket", Bucket: "bucket",
object: "obbject", Object: "object",
},
bucket: "bucket",
object: "object",
}, },
// N B error values that aren't of expected types // N B error values that aren't of expected types
// should be left untouched. // should be left untouched.
// Special test case for error that is not of type // Special test case for error that is not of type
// minio.ErrorResponse // miniogo.ErrorResponse
{ {
inputErr: errors.New("not a minio.ErrorResponse"), inputErr: fmt.Errorf("not a minio.ErrorResponse"),
expectedErr: errors.New("not a minio.ErrorResponse"), expectedErr: fmt.Errorf("not a minio.ErrorResponse"),
}, },
// Special test case for error value that is not of // Special test case for error value that is not of
// type (*Error) // type (*Error)
{ {
inputErr: errors.New("not a *Error"), inputErr: fmt.Errorf("not a *Error"),
expectedErr: errors.New("not a *Error"), expectedErr: fmt.Errorf("not a *Error"),
}, },
} }
for i, tc := range testCases { for i, tc := range testCases {
actualErr := s3ToObjectError(tc.inputErr, tc.bucket, tc.object) actualErr := minio.ErrorRespToObjectError(errors.Trace(tc.inputErr), tc.bucket, tc.object)
if e, ok := actualErr.(*errors2.Error); ok && e.Cause != tc.expectedErr { if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.Cause) t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e)
} }
} }
} }

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package sia
import ( import (
"bytes" "bytes"
@ -22,15 +22,21 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/sha256-simd" "github.com/minio/sha256-simd"
@ -41,7 +47,7 @@ const (
) )
type siaObjects struct { type siaObjects struct {
gatewayUnsupported minio.GatewayUnsupported
Address string // Address and port of Sia Daemon. Address string // Address and port of Sia Daemon.
TempDir string // Temporary storage location for file transfers. TempDir string // Temporary storage location for file transfers.
RootDir string // Root directory to store files on Sia. RootDir string // Root directory to store files on Sia.
@ -72,12 +78,11 @@ EXAMPLES:
` `
MustRegisterGatewayCommand(cli.Command{ minio.RegisterGatewayCommand(cli.Command{
Name: siaBackend, Name: siaBackend,
Usage: "Sia Decentralized Cloud.", Usage: "Sia Decentralized Cloud.",
Action: siaGatewayMain, Action: siaGatewayMain,
CustomHelpTemplate: siaGatewayTemplate, CustomHelpTemplate: siaGatewayTemplate,
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true, HideHelpCommand: true,
}) })
} }
@ -87,26 +92,67 @@ func siaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
fatalIf(validateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
startGateway(ctx, &SiaGateway{host}) minio.StartGateway(ctx, &Sia{host})
} }
// SiaGateway implements Gateway. // Sia implements Gateway.
type SiaGateway struct { type Sia struct {
host string // Sia daemon host address host string // Sia daemon host address
} }
// Name implements Gateway interface. // Name implements Gateway interface.
func (g *SiaGateway) Name() string { func (g *Sia) Name() string {
return siaBackend return siaBackend
} }
// NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to // NewGatewayLayer returns Sia gateway layer, implements GatewayLayer interface to
// talk to B2 remote backend. // talk to Sia backend.
func (g *SiaGateway) NewGatewayLayer() (GatewayLayer, error) { func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) sia := &siaObjects{
return newSiaGatewayLayer(g.host) Address: g.host,
// RootDir uses access key directly, provides partitioning for
// concurrent users talking to same sia daemon.
RootDir: creds.AccessKey,
TempDir: os.Getenv("SIA_TEMP_DIR"),
password: os.Getenv("SIA_API_PASSWORD"),
}
// If Address not provided on command line or ENV, default to:
if sia.Address == "" {
sia.Address = "127.0.0.1:9980"
}
// If local Sia temp directory not specified, default to:
if sia.TempDir == "" {
sia.TempDir = ".sia_temp"
}
var err error
sia.TempDir, err = filepath.Abs(sia.TempDir)
if err != nil {
return nil, err
}
// Create the temp directory with proper permissions.
// Ignore error when dir already exists.
if err = os.MkdirAll(sia.TempDir, 0700); err != nil {
return nil, err
}
colorBlue := color.New(color.FgBlue).SprintfFunc()
colorBold := color.New(color.Bold).SprintFunc()
log.Println(colorBlue("\nSia Gateway Configuration:"))
log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
return sia, nil
}
// Production - sia gateway is not ready for production use.
func (g *Sia) Production() bool {
return false
} }
// non2xx returns true for non-success HTTP status codes. // non2xx returns true for non-success HTTP status codes.
@ -139,12 +185,12 @@ func decodeError(resp *http.Response) error {
return apiErr return apiErr
} }
// SiaMethodNotSupported - returned if call returned error. // MethodNotSupported - returned if call returned error.
type SiaMethodNotSupported struct { type MethodNotSupported struct {
method string method string
} }
func (s SiaMethodNotSupported) Error() string { func (s MethodNotSupported) Error() string {
return fmt.Sprintf("API call not recognized: %s", s.method) return fmt.Sprintf("API call not recognized: %s", s.method)
} }
@ -166,7 +212,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
} }
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
resp.Body.Close() resp.Body.Close()
return nil, SiaMethodNotSupported{call} return nil, MethodNotSupported{call}
} }
if non2xx(resp.StatusCode) { if non2xx(resp.StatusCode) {
err := decodeError(resp) err := decodeError(resp)
@ -191,12 +237,12 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
} }
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, errors.Trace(err)
} }
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
resp.Body.Close() resp.Body.Close()
return nil, SiaMethodNotSupported{call} return nil, MethodNotSupported{call}
} }
if non2xx(resp.StatusCode) { if non2xx(resp.StatusCode) {
@ -244,45 +290,6 @@ func get(addr, call, apiPassword string) error {
return nil return nil
} }
// newSiaGatewayLayer returns Sia gatewaylayer
func newSiaGatewayLayer(host string) (GatewayLayer, error) {
sia := &siaObjects{
Address: host,
// RootDir uses access key directly, provides partitioning for
// concurrent users talking to same sia daemon.
RootDir: os.Getenv("MINIO_ACCESS_KEY"),
TempDir: os.Getenv("SIA_TEMP_DIR"),
password: os.Getenv("SIA_API_PASSWORD"),
}
// If Address not provided on command line or ENV, default to:
if sia.Address == "" {
sia.Address = "127.0.0.1:9980"
}
// If local Sia temp directory not specified, default to:
if sia.TempDir == "" {
sia.TempDir = ".sia_temp"
}
var err error
sia.TempDir, err = filepath.Abs(sia.TempDir)
if err != nil {
return nil, err
}
// Create the temp directory with proper permissions.
// Ignore error when dir already exists.
if err = os.MkdirAll(sia.TempDir, 0700); err != nil {
return nil, err
}
log.Println(colorBlue("\nSia Gateway Configuration:"))
log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
return sia, nil
}
// Shutdown saves any gateway metadata to disk // Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart. // if necessary and reload upon next restart.
func (s *siaObjects) Shutdown() error { func (s *siaObjects) Shutdown() error {
@ -290,40 +297,44 @@ func (s *siaObjects) Shutdown() error {
} }
// StorageInfo is not relevant to Sia backend. // StorageInfo is not relevant to Sia backend.
func (s *siaObjects) StorageInfo() (si StorageInfo) { func (s *siaObjects) StorageInfo() (si minio.StorageInfo) {
return si return si
} }
// MakeBucket creates a new container on Sia backend. // MakeBucket creates a new container on Sia backend.
func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error { func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error {
srcFile := pathJoin(s.TempDir, mustGetUUID()) srcFile := path.Join(s.TempDir, minio.MustGetUUID())
defer fsRemoveFile(srcFile) defer os.Remove(srcFile)
if _, err := fsCreateFile(srcFile, bytes.NewReader([]byte("")), nil, 0); err != nil { writer, err := os.Create(srcFile)
if err != nil {
return err
}
if _, err = io.Copy(writer, bytes.NewReader([]byte(""))); err != nil {
return err return err
} }
sha256sum := sha256.Sum256([]byte(bucket)) sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password) return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
} }
// GetBucketInfo gets bucket metadata. // GetBucketInfo gets bucket metadata.
func (s *siaObjects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { func (s *siaObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
sha256sum := sha256.Sum256([]byte(bucket)) sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
dstFile := pathJoin(s.TempDir, mustGetUUID()) dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer fsRemoveFile(dstFile) defer os.Remove(dstFile)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return bi, err return bi, err
} }
return BucketInfo{Name: bucket}, nil return minio.BucketInfo{Name: bucket}, nil
} }
// ListBuckets will detect and return existing buckets on Sia. // ListBuckets will detect and return existing buckets on Sia.
func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) { func (s *siaObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
sObjs, serr := s.listRenterFiles("") sObjs, serr := s.listRenterFiles("")
if serr != nil { if serr != nil {
return buckets, serr return buckets, serr
@ -343,7 +354,7 @@ func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) {
} }
for _, bktName := range m.ToSlice() { for _, bktName := range m.ToSlice() {
buckets = append(buckets, BucketInfo{ buckets = append(buckets, minio.BucketInfo{
Name: bktName, Name: bktName,
Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
}) })
@ -355,12 +366,12 @@ func (s *siaObjects) ListBuckets() (buckets []BucketInfo, err error) {
// DeleteBucket deletes a bucket on Sia. // DeleteBucket deletes a bucket on Sia.
func (s *siaObjects) DeleteBucket(bucket string) error { func (s *siaObjects) DeleteBucket(bucket string) error {
sha256sum := sha256.Sum256([]byte(bucket)) sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = pathJoin(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/delete/"+siaObj, "", s.password) return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
} }
func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
siaObjs, siaErr := s.listRenterFiles(bucket) siaObjs, siaErr := s.listRenterFiles(bucket)
if siaErr != nil { if siaErr != nil {
return loi, siaErr return loi, siaErr
@ -376,13 +387,13 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de
// based filtering. Once list renter files API supports paginated output we can support // based filtering. Once list renter files API supports paginated output we can support
// paginated results here as well - until then Listing is an expensive operation. // paginated results here as well - until then Listing is an expensive operation.
for _, sObj := range siaObjs { for _, sObj := range siaObjs {
name := strings.TrimPrefix(sObj.SiaPath, pathJoin(root, bucket, "/")) name := strings.TrimPrefix(sObj.SiaPath, path.Join(root, bucket)+"/")
// Skip the file created specially when bucket was created. // Skip the file created specially when bucket was created.
if name == hex.EncodeToString(sha256sum[:]) { if name == hex.EncodeToString(sha256sum[:]) {
continue continue
} }
if strings.HasPrefix(name, prefix) { if strings.HasPrefix(name, prefix) {
loi.Objects = append(loi.Objects, ObjectInfo{ loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
Name: name, Name: name,
Size: int64(sObj.Filesize), Size: int64(sObj.Filesize),
@ -394,33 +405,45 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de
} }
func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
dstFile := pathJoin(s.TempDir, mustGetUUID()) dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer fsRemoveFile(dstFile) defer os.Remove(dstFile)
var siaObj = pathJoin(s.RootDir, bucket, object) var siaObj = path.Join(s.RootDir, bucket, object)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return err return err
} }
reader, size, err := fsOpenFile(dstFile, startOffset) reader, err := os.Open(dstFile)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return err
} }
defer reader.Close() defer reader.Close()
st, err := reader.Stat()
if err != nil {
return err
}
size := st.Size()
if _, err = reader.Seek(startOffset, os.SEEK_SET); err != nil {
return err
}
// For negative length we read everything. // For negative length we read everything.
if length < 0 { if length < 0 {
length = size - startOffset length = size - startOffset
} }
bufSize := int64(readSizeV1) bufSize := int64(1 * humanize.MiByte)
if bufSize > length { if bufSize > length {
bufSize = length bufSize = length
} }
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size { if startOffset > size || startOffset+length > size {
return errors.Trace(InvalidRange{startOffset, length, size}) return errors.Trace(minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
})
} }
// Allocate a staging buffer. // Allocate a staging buffer.
@ -433,7 +456,9 @@ func (s *siaObjects) GetObject(bucket string, object string, startOffset int64,
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given // findSiaObject retrieves the siaObjectInfo for the Sia object with the given
// Sia path name. // Sia path name.
func (s *siaObjects) findSiaObject(siaPath string) (siaObjectInfo, error) { func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error) {
siaPath := path.Join(s.RootDir, bucket, object)
sObjs, err := s.listRenterFiles("") sObjs, err := s.listRenterFiles("")
if err != nil { if err != nil {
return siaObjectInfo{}, err return siaObjectInfo{}, err
@ -445,64 +470,62 @@ func (s *siaObjects) findSiaObject(siaPath string) (siaObjectInfo, error) {
} }
} }
return siaObjectInfo{}, errors.Trace(ObjectNotFound{"", siaPath}) return siaObjectInfo{}, errors.Trace(minio.ObjectNotFound{
Bucket: bucket,
Object: object,
})
} }
// GetObjectInfo reads object info and replies back ObjectInfo // GetObjectInfo reads object info and replies back ObjectInfo
func (s *siaObjects) GetObjectInfo(bucket string, object string) (ObjectInfo, error) { func (s *siaObjects) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) {
siaPath := pathJoin(s.RootDir, bucket, object) so, err := s.findSiaObject(bucket, object)
so, err := s.findSiaObject(siaPath)
if err != nil { if err != nil {
return ObjectInfo{}, err return minio.ObjectInfo{}, err
} }
// Metadata about sia objects is just quite minimal. Sia only provides // Metadata about sia objects is just quite minimal. Sia only provides file size.
// file size. return minio.ObjectInfo{
return ObjectInfo{ Bucket: bucket,
Bucket: bucket, Name: object,
Name: object, ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: int64(so.Filesize), Size: int64(so.Filesize),
IsDir: false, IsDir: false,
}, nil }, nil
} }
// PutObject creates a new object with the incoming data, // PutObject creates a new object with the incoming data,
func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
bufSize := int64(readSizeV1) srcFile := path.Join(s.TempDir, minio.MustGetUUID())
size := data.Size() writer, err := os.Create(srcFile)
if size > 0 && bufSize > size { if err != nil {
bufSize = size
}
buf := make([]byte, int(bufSize))
srcFile := pathJoin(s.TempDir, mustGetUUID())
if _, err = fsCreateFile(srcFile, data, buf, data.Size()); err != nil {
return objInfo, err return objInfo, err
} }
var siaPath = pathJoin(s.RootDir, bucket, object) wsize, err := io.CopyN(writer, data, data.Size())
if err = post(s.Address, "/renter/upload/"+siaPath, "source="+srcFile, s.password); err != nil { if err != nil {
fsRemoveFile(srcFile) os.Remove(srcFile)
return objInfo, err return objInfo, err
} }
defer s.deleteTempFileWhenUploadCompletes(srcFile, siaPath)
objInfo = ObjectInfo{ if err = post(s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
os.Remove(srcFile)
return objInfo, err
}
defer s.deleteTempFileWhenUploadCompletes(srcFile, bucket, object)
return minio.ObjectInfo{
Name: object, Name: object,
Bucket: bucket, Bucket: bucket,
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: size, Size: wsize,
ETag: genETag(), ETag: minio.GenETag(),
} }, nil
return objInfo, nil
} }
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
func (s *siaObjects) DeleteObject(bucket string, object string) error { func (s *siaObjects) DeleteObject(bucket string, object string) error {
// Tell Sia daemon to delete the object // Tell Sia daemon to delete the object
var siaObj = pathJoin(s.RootDir, bucket, object) var siaObj = path.Join(s.RootDir, bucket, object)
return post(s.Address, "/renter/delete/"+siaObj, "", s.password) return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
} }
@ -549,22 +572,23 @@ func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, er
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload // deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
// until it reaches 100% upload progress, then deletes the local temp copy from // until it reaches 100% upload progress, then deletes the local temp copy from
// the filesystem. // the filesystem.
func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, siaPath string) { func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, bucket, object string) {
var soi siaObjectInfo var soi siaObjectInfo
// Wait until 100% upload instead of 1x redundancy because if we delete // Wait until 100% upload instead of 1x redundancy because if we delete
// after 1x redundancy, the user has to pay the cost of other hosts // after 1x redundancy, the user has to pay the cost of other hosts
// redistributing the file. // redistributing the file.
for soi.UploadProgress < 100.0 { for soi.UploadProgress < 100.0 {
var err error var err error
soi, err = s.findSiaObject(siaPath) soi, err = s.findSiaObject(bucket, object)
if err != nil { if err != nil {
errorIf(err, "Unable to find file uploaded to Sia path %s", siaPath) minio.ErrorIf(err, "Unable to find file uploaded to Sia path %s/%s", bucket, object)
break break
} }
// Sleep between each check so that we're not hammering the Sia // Sleep between each check so that we're not hammering
// daemon with requests. // the Sia daemon with requests.
time.Sleep(15 * time.Second) time.Sleep(15 * time.Second)
} }
fsRemoveFile(tempFile)
os.Remove(tempFile)
} }

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package sia
import ( import (
"testing" "testing"

View file

@ -53,9 +53,6 @@ const (
globalMinioModeDistXL = "mode-server-distributed-xl" globalMinioModeDistXL = "mode-server-distributed-xl"
globalMinioModeGatewayPrefix = "mode-gateway-" globalMinioModeGatewayPrefix = "mode-gateway-"
// globalMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API.
globalMinioSysTmp = "minio.sys.tmp/"
// Add new global values here. // Add new global values here.
) )

View file

@ -27,10 +27,6 @@ var errUnexpected = errors.New("Unexpected error, please report this issue at ht
// errCorruptedFormat - corrupted backend format. // errCorruptedFormat - corrupted backend format.
var errCorruptedFormat = errors.New("corrupted backend format, please join https://slack.minio.io for assistance") var errCorruptedFormat = errors.New("corrupted backend format, please join https://slack.minio.io for assistance")
// errFormatNotSupported - returned when older minio tries to parse metadata
// created by newer minio.
var errFormatNotSupported = errors.New("format not supported")
// errUnformattedDisk - unformatted disk found. // errUnformattedDisk - unformatted disk found.
var errUnformattedDisk = errors.New("unformatted disk found") var errUnformattedDisk = errors.New("unformatted disk found")

View file

@ -2394,13 +2394,4 @@ func TestToErrIsNil(t *testing.T) {
if toAPIErrorCode(nil) != ErrNone { if toAPIErrorCode(nil) != ErrNone {
t.Errorf("Test expected error code to be ErrNone, failed instead provided %d", toAPIErrorCode(nil)) t.Errorf("Test expected error code to be ErrNone, failed instead provided %d", toAPIErrorCode(nil))
} }
if s3ToObjectError(nil) != nil {
t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", s3ToObjectError(nil))
}
if azureToObjectError(nil) != nil {
t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", azureToObjectError(nil))
}
if gcsToObjectError(nil) != nil {
t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", gcsToObjectError(nil))
}
} }

View file

@ -18,12 +18,14 @@ package cmd
import ( import (
"bytes" "bytes"
"crypto/tls"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -212,13 +214,13 @@ func UTCNow() time.Time {
return time.Now().UTC() return time.Now().UTC()
} }
// genETag - generate UUID based ETag // GenETag - generate UUID based ETag
func genETag() string { func GenETag() string {
return toS3ETag(getMD5Hash([]byte(mustGetUUID()))) return ToS3ETag(getMD5Hash([]byte(mustGetUUID())))
} }
// toS3ETag - return checksum to ETag // ToS3ETag - return checksum to ETag
func toS3ETag(etag string) string { func ToS3ETag(etag string) string {
etag = canonicalizeETag(etag) etag = canonicalizeETag(etag)
if !strings.HasSuffix(etag, "-1") { if !strings.HasSuffix(etag, "-1") {
@ -229,3 +231,23 @@ func toS3ETag(etag string) string {
return etag return etag
} }
// NewCustomHTTPTransport returns a new http configuration
// used while communicating with the cloud backends.
// This sets the value for MaxIdleConns from 2 (go default) to
// 100.
func NewCustomHTTPTransport() http.RoundTripper {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
DisableCompression: true,
}
}

View file

@ -294,7 +294,7 @@ func TestDumpRequest(t *testing.T) {
} }
} }
// Test toS3ETag() // Test ToS3ETag()
func TestToS3ETag(t *testing.T) { func TestToS3ETag(t *testing.T) {
testCases := []struct { testCases := []struct {
etag string etag string
@ -306,7 +306,7 @@ func TestToS3ETag(t *testing.T) {
{"5d57546eeb86b3eba68967292fba0644-1", "5d57546eeb86b3eba68967292fba0644-1"}, {"5d57546eeb86b3eba68967292fba0644-1", "5d57546eeb86b3eba68967292fba0644-1"},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
etag := toS3ETag(testCase.etag) etag := ToS3ETag(testCase.etag)
if etag != testCase.expectedETag { if etag != testCase.expectedETag {
t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedETag, etag) t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedETag, etag)
} }

View file

@ -30,6 +30,9 @@ import (
version "github.com/hashicorp/go-version" version "github.com/hashicorp/go-version"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
// Import gateway
_ "github.com/minio/minio/cmd/gateway"
) )
const ( const (