diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 6cb0709be..8604f18a6 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -150,7 +150,7 @@ func makeAdminPeers(eps []*url.URL) adminPeers { secretKey: serverCred.SecretKey, serverAddr: ep.Host, secureConn: globalIsSSL, - serviceEndpoint: path.Join(reservedBucket, adminPath), + serviceEndpoint: path.Join(minioReservedBucketPath, adminPath), serviceName: "Admin", } diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index d5f8178bd..b11eb8092 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -141,7 +141,7 @@ func registerAdminRPCRouter(mux *router.Router) error { if err != nil { return traceError(err) } - adminRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() + adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() adminRouter.Path(adminPath).Handler(adminRPCServer) return nil } diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 398b907bf..f078c43d0 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -111,7 +111,7 @@ func updateCredsOnPeers(creds credential) map[string]error { secretKey: serverCred.SecretKey, serverAddr: peers[ix], secureConn: globalIsSSL, - serviceEndpoint: path.Join(reservedBucket, browserPeerPath), + serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath), serviceName: "BrowserPeer", }) diff --git a/cmd/browser-peer-rpc_test.go b/cmd/browser-peer-rpc_test.go index 5554c4f32..ccf3301cf 100644 --- a/cmd/browser-peer-rpc_test.go +++ b/cmd/browser-peer-rpc_test.go @@ -36,7 +36,7 @@ func (s *TestRPCBrowserPeerSuite) SetUpSuite(c *testing.T) { serverAddr: s.testServer.Server.Listener.Addr().String(), accessKey: s.testServer.AccessKey, secretKey: s.testServer.SecretKey, - serviceEndpoint: path.Join(reservedBucket, browserPeerPath), + serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath), serviceName: "BrowserPeer", } } diff --git a/cmd/browser-rpc-router.go b/cmd/browser-rpc-router.go index f7b762441..d88c5779d 100644 --- a/cmd/browser-rpc-router.go +++ b/cmd/browser-rpc-router.go @@ -45,7 +45,7 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error { return traceError(err) } - bpRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() + bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() bpRouter.Path(browserPeerPath).Handler(bpRPCServer) return nil } diff --git a/cmd/bucket-notification-utils.go b/cmd/bucket-notification-utils.go index 8845c5cdb..39a719f12 100644 --- a/cmd/bucket-notification-utils.go +++ b/cmd/bucket-notification-utils.go @@ -253,19 +253,19 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { } sqsType := strings.TrimPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":") switch { - case strings.HasSuffix(sqsType, queueTypeAMQP): + case hasSuffix(sqsType, queueTypeAMQP): mSqs.Type = queueTypeAMQP - case strings.HasSuffix(sqsType, queueTypeNATS): + case hasSuffix(sqsType, queueTypeNATS): mSqs.Type = queueTypeNATS - case strings.HasSuffix(sqsType, queueTypeElastic): + case hasSuffix(sqsType, queueTypeElastic): mSqs.Type = queueTypeElastic - case strings.HasSuffix(sqsType, queueTypeRedis): + case hasSuffix(sqsType, queueTypeRedis): mSqs.Type = queueTypeRedis - case strings.HasSuffix(sqsType, queueTypePostgreSQL): + case hasSuffix(sqsType, queueTypePostgreSQL): mSqs.Type = queueTypePostgreSQL - case strings.HasSuffix(sqsType, queueTypeKafka): + case hasSuffix(sqsType, queueTypeKafka): mSqs.Type = queueTypeKafka - case strings.HasSuffix(sqsType, queueTypeWebhook): + case hasSuffix(sqsType, queueTypeWebhook): mSqs.Type = queueTypeWebhook } // Add more queues here. mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type) diff --git a/cmd/errors.go b/cmd/errors.go index b61685af8..0476a3231 100644 --- a/cmd/errors.go +++ b/cmd/errors.go @@ -86,10 +86,10 @@ func traceError(e error, errs ...error) error { fn := runtime.FuncForPC(pc) file, line := fn.FileLine(pc) name := fn.Name() - if strings.HasSuffix(name, "ServeHTTP") { + if hasSuffix(name, "ServeHTTP") { break } - if strings.HasSuffix(name, "runtime.") { + if hasSuffix(name, "runtime.") { break } diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 4ae57635c..b28eaf24a 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -278,7 +278,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) - if strings.HasSuffix(walkResult.entry, slashSeparator) { + if hasSuffix(walkResult.entry, slashSeparator) { uploads = append(uploads, uploadMetadata{ Object: entry, }) @@ -314,7 +314,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark for _, upload := range uploads { var objectName string var uploadID string - if strings.HasSuffix(upload.Object, slashSeparator) { + if hasSuffix(upload.Object, slashSeparator) { // All directory entries are common prefixes. uploadID = "" // Upload ids are empty for CommonPrefixes. objectName = upload.Object diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 16f3263a3..de778b32b 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -19,7 +19,6 @@ package cmd import ( "crypto/md5" "encoding/hex" - "errors" "fmt" "hash" "io" @@ -291,12 +290,11 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) { return nil, toObjectErr(traceError(errDiskNotFound)) } - var invalidBucketNames []string for _, entry := range entries { - if entry == minioMetaBucket+"/" || !strings.HasSuffix(entry, slashSeparator) { + // Ignore all reserved bucket names and invalid bucket names. + if isReservedOrInvalidBucket(entry) { continue } - var fi os.FileInfo fi, err = fsStatDir(pathJoin(fs.fsPath, entry)) if err != nil { @@ -310,24 +308,13 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) { return nil, err } - if !IsValidBucketName(fi.Name()) { - invalidBucketNames = append(invalidBucketNames, fi.Name()) - continue - } - bucketInfos = append(bucketInfos, BucketInfo{ Name: fi.Name(), - // As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime. + // As os.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime. Created: fi.ModTime(), }) } - // Print a user friendly message if we indeed skipped certain directories which are - // incompatible with S3's bucket name restrictions. - if len(invalidBucketNames) > 0 { - errorIf(errors.New("One or more invalid bucket names found"), "Skipping %s", invalidBucketNames) - } - // Sort bucket infos by bucket name. sort.Sort(byBucketName(bucketInfos)) @@ -780,7 +767,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey // Convert entry to ObjectInfo entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) { - if strings.HasSuffix(entry, slashSeparator) { + if hasSuffix(entry, slashSeparator) { // Object name needs to be full path. objInfo.Name = entry objInfo.IsDir = true @@ -804,7 +791,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey // bucket argument is unused as we don't need to StatFile // to figure if it's a file, just need to check that the // object string does not end with "/". - return !strings.HasSuffix(object, slashSeparator) + return !hasSuffix(object, slashSeparator) } listDir := fs.listDirFactory(isLeaf) walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 0bf9fd9da..0b3c7f3dc 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -68,7 +68,8 @@ func (h requestSizeLimitHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques // Reserved bucket. const ( - reservedBucket = "/minio" + minioReservedBucket = "minio" + minioReservedBucketPath = "/" + minioReservedBucket ) // Adds redirect rules for incoming requests. @@ -86,8 +87,8 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler { // serves only limited purpose on redirect-handler for // browser requests. func getRedirectLocation(urlPath string) (rLocation string) { - if urlPath == reservedBucket { - rLocation = reservedBucket + "/" + if urlPath == minioReservedBucketPath { + rLocation = minioReservedBucketPath + "/" } if contains([]string{ "/", @@ -95,7 +96,7 @@ func getRedirectLocation(urlPath string) (rLocation string) { "/login", "/favicon.ico", }, urlPath) { - rLocation = reservedBucket + urlPath + rLocation = minioReservedBucketPath + urlPath } return rLocation } @@ -143,8 +144,8 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler { func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method == httpGET && guessIsBrowserReq(r) && globalIsBrowserEnabled { // For all browser requests set appropriate Cache-Control policies - if hasPrefix(r.URL.Path, reservedBucket+"/") { - if hasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" { + if hasPrefix(r.URL.Path, minioReservedBucketPath+"/") { + if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" { // For assets set cache expiry of one year. For each release, the name // of the asset name will change and hence it can not be served from cache. w.Header().Set("Cache-Control", "max-age=31536000") @@ -160,17 +161,17 @@ func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Adds verification for incoming paths. type minioPrivateBucketHandler struct { - handler http.Handler - reservedBucket string + handler http.Handler + reservedBucketPath string } func setPrivateBucketHandler(h http.Handler) http.Handler { - return minioPrivateBucketHandler{handler: h, reservedBucket: reservedBucket} + return minioPrivateBucketHandler{h, minioReservedBucketPath} } func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // For all non browser requests, reject access to 'reservedBucket'. - if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == reservedBucket { + // For all non browser requests, reject access to 'reservedBucketPath'. + if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == h.reservedBucketPath { writeErrorResponse(w, ErrAllAccessDisabled, r.URL) return } diff --git a/cmd/generic-handlers_test.go b/cmd/generic-handlers_test.go index f1645d030..f9872709e 100644 --- a/cmd/generic-handlers_test.go +++ b/cmd/generic-handlers_test.go @@ -29,28 +29,28 @@ func TestRedirectLocation(t *testing.T) { }{ { // 1. When urlPath is '/minio' - urlPath: reservedBucket, - location: reservedBucket + "/", + urlPath: minioReservedBucketPath, + location: minioReservedBucketPath + "/", }, { // 2. When urlPath is '/' urlPath: "/", - location: reservedBucket + "/", + location: minioReservedBucketPath + "/", }, { // 3. When urlPath is '/webrpc' urlPath: "/webrpc", - location: reservedBucket + "/webrpc", + location: minioReservedBucketPath + "/webrpc", }, { // 4. When urlPath is '/login' urlPath: "/login", - location: reservedBucket + "/login", + location: minioReservedBucketPath + "/login", }, { // 5. When urlPath is '/favicon.ico' urlPath: "/favicon.ico", - location: reservedBucket + "/favicon.ico", + location: minioReservedBucketPath + "/favicon.ico", }, { // 6. When urlPath is '/unknown' diff --git a/cmd/humanized-duration_test.go b/cmd/humanized-duration_test.go index 9f51b1296..193710767 100644 --- a/cmd/humanized-duration_test.go +++ b/cmd/humanized-duration_test.go @@ -17,7 +17,6 @@ package cmd import ( - "strings" "testing" "time" ) @@ -26,10 +25,10 @@ import ( func TestHumanizedDuration(t *testing.T) { duration := time.Duration(90487000000000) humanDuration := timeDurationToHumanizedDuration(duration) - if !strings.HasSuffix(humanDuration.String(), "seconds") { + if !hasSuffix(humanDuration.String(), "seconds") { t.Fatal("Stringer method for humanized duration should have seconds.", humanDuration.String()) } - if strings.HasSuffix(humanDuration.StringShort(), "seconds") { + if hasSuffix(humanDuration.StringShort(), "seconds") { t.Fatal("StringShorter method for humanized duration should not have seconds.", humanDuration.StringShort()) } @@ -42,9 +41,9 @@ func TestHumanizedDuration(t *testing.T) { t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", expectedHumanSecDuration, humanSecDuration) } - if strings.HasSuffix(humanSecDuration.String(), "days") || - strings.HasSuffix(humanSecDuration.String(), "hours") || - strings.HasSuffix(humanSecDuration.String(), "minutes") { + if hasSuffix(humanSecDuration.String(), "days") || + hasSuffix(humanSecDuration.String(), "hours") || + hasSuffix(humanSecDuration.String(), "minutes") { t.Fatal("Stringer method for humanized duration should have only seconds.", humanSecDuration.String()) } @@ -57,7 +56,7 @@ func TestHumanizedDuration(t *testing.T) { t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", expectedHumanMinDuration, humanMinDuration) } - if strings.HasSuffix(humanMinDuration.String(), "hours") { + if hasSuffix(humanMinDuration.String(), "hours") { t.Fatal("Stringer method for humanized duration should have only minutes.", humanMinDuration.String()) } @@ -70,7 +69,7 @@ func TestHumanizedDuration(t *testing.T) { t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", expectedHumanHourDuration, humanHourDuration) } - if strings.HasSuffix(humanHourDuration.String(), "days") { + if hasSuffix(humanHourDuration.String(), "days") { t.Fatal("Stringer method for humanized duration should have hours.", humanHourDuration.String()) } } diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index c1761dbd7..ad86b035d 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -30,7 +30,7 @@ import ( const ( // Lock rpc server endpoint. - lockRPCPath = "/minio/lock" + lockRPCPath = "/lock" // Lock maintenance interval. lockMaintenanceInterval = 1 * time.Minute // 1 minute. @@ -122,8 +122,8 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil { return traceError(err) } - lockRouter := mux.PathPrefix(reservedBucket).Subrouter() - lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) + lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() + lockRouter.Path(path.Join(lockRPCPath, lockServer.rpcPath)).Handler(lockRPCServer) } return nil } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index 0f0c4a414..e27131ac6 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -20,7 +20,6 @@ import ( "net" "net/url" "runtime" - "strings" "sync" "time" @@ -59,7 +58,7 @@ func isRemoteDisk(disk StorageAPI) bool { // if size == 0 and object ends with slashSeparator then // returns true. func isObjectDir(object string, size int64) bool { - return strings.HasSuffix(object, slashSeparator) && size == 0 + return hasSuffix(object, slashSeparator) && size == 0 } // Converts just bucket, object metadata into ObjectInfo datatype. @@ -284,7 +283,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error { var delFunc func(string) error // Function to delete entries recursively. delFunc = func(entryPath string) error { - if !strings.HasSuffix(entryPath, slashSeparator) { + if !hasSuffix(entryPath, slashSeparator) { // Delete the file entry. return traceError(storage.DeleteFile(volume, entryPath)) } diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 206c0c1a0..4cf72acde 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -129,7 +129,7 @@ func retainSlash(s string) string { func pathJoin(elem ...string) string { trailingSlash := "" if len(elem) > 0 { - if strings.HasSuffix(elem[len(elem)-1], slashSeparator) { + if hasSuffix(elem[len(elem)-1], slashSeparator) { trailingSlash = "/" } } @@ -180,6 +180,15 @@ func hasSuffix(s string, suffix string) bool { return strings.HasSuffix(s, suffix) } +// Ignores all reserved bucket names or invalid bucket names. +func isReservedOrInvalidBucket(bucketEntry string) bool { + bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator) + if !IsValidBucketName(bucketEntry) { + return true + } + return bucketEntry == minioMetaBucket || bucketEntry == minioReservedBucket +} + // byBucketName is a collection satisfying sort.Interface. type byBucketName []BucketInfo diff --git a/cmd/posix.go b/cmd/posix.go index 89d1c77ed..12d9045ae 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -24,7 +24,6 @@ import ( slashpath "path" "path/filepath" "runtime" - "strings" "sync" "sync/atomic" "syscall" @@ -319,7 +318,7 @@ func listVols(dirPath string) ([]VolInfo, error) { } var volsInfo []VolInfo for _, entry := range entries { - if !strings.HasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) { + if !hasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) { // Skip if entry is neither a directory not a valid volume name. continue } @@ -917,8 +916,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e } } - srcIsDir := strings.HasSuffix(srcPath, slashSeparator) - dstIsDir := strings.HasSuffix(dstPath, slashSeparator) + srcIsDir := hasSuffix(srcPath, slashSeparator) + dstIsDir := hasSuffix(dstPath, slashSeparator) // Either src and dst have to be directories or files, else return error. if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { return errFileAccessDenied diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 6358f5881..a90022eb4 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -66,7 +66,7 @@ func makeS3Peers(eps []*url.URL) s3Peers { accessKey: serverCred.AccessKey, secretKey: serverCred.SecretKey, serverAddr: ep.Host, - serviceEndpoint: path.Join(reservedBucket, s3Path), + serviceEndpoint: path.Join(minioReservedBucketPath, s3Path), secureConn: globalIsSSL, serviceName: "S3", } diff --git a/cmd/s3-peer-router.go b/cmd/s3-peer-router.go index e843c17bb..3701bc8ab 100644 --- a/cmd/s3-peer-router.go +++ b/cmd/s3-peer-router.go @@ -45,7 +45,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error { return traceError(err) } - s3PeerRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() + s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() s3PeerRouter.Path(s3Path).Handler(s3PeerRPCServer) return nil } diff --git a/cmd/s3-peer-rpc-handlers_test.go b/cmd/s3-peer-rpc-handlers_test.go index a9e813cb7..4e7378cb9 100644 --- a/cmd/s3-peer-rpc-handlers_test.go +++ b/cmd/s3-peer-rpc-handlers_test.go @@ -36,7 +36,7 @@ func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) { serverAddr: s.testServer.Server.Listener.Addr().String(), accessKey: s.testServer.AccessKey, secretKey: s.testServer.SecretKey, - serviceEndpoint: path.Join(reservedBucket, s3Path), + serviceEndpoint: path.Join(minioReservedBucketPath, s3Path), serviceName: "S3", } } diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index 4020b6382..0064fe35d 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -34,7 +34,7 @@ type networkStorage struct { } const ( - storageRPCPath = reservedBucket + "/storage" + storageRPCPath = minioReservedBucketPath + "/storage" ) // Converts rpc.ServerError to underlying error. This function is diff --git a/cmd/storage-rpc-server.go b/cmd/storage-rpc-server.go index 6ad287868..c1e9ab787 100644 --- a/cmd/storage-rpc-server.go +++ b/cmd/storage-rpc-server.go @@ -233,7 +233,7 @@ func registerStorageRPCRouters(mux *router.Router, srvCmdConfig serverCmdConfig) return traceError(err) } // Add minio storage routes. - storageRouter := mux.PathPrefix(reservedBucket).Subrouter() + storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() storageRouter.Path(path.Join("/storage", stServer.path)).Handler(storageRPCServer) } return nil diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index 9490c6c56..211630d05 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -21,7 +21,6 @@ import ( "io/ioutil" "reflect" "sort" - "strings" "testing" "time" ) @@ -187,7 +186,7 @@ func TestTreeWalk(t *testing.T) { } isLeaf := func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) } listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) // Simple test for prefix based walk. @@ -225,7 +224,7 @@ func TestTreeWalkTimeout(t *testing.T) { } isLeaf := func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) } listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) @@ -304,7 +303,7 @@ func TestListDir(t *testing.T) { // create listDir function. listDir := listDirFactory(func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) }, xlTreeWalkIgnoredErrs, disk1, disk2) // Create file1 in fsDir1 and file2 in fsDir2. @@ -376,7 +375,7 @@ func TestRecursiveTreeWalk(t *testing.T) { // Simple isLeaf check, returns true if there is no trailing "/" isLeaf := func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) } // Create listDir function. @@ -486,7 +485,7 @@ func TestSortedness(t *testing.T) { // Simple isLeaf check, returns true if there is no trailing "/" isLeaf := func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) } // Create listDir function. listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) @@ -563,7 +562,7 @@ func TestTreeWalkIsEnd(t *testing.T) { } isLeaf := func(volume, prefix string) bool { - return !strings.HasSuffix(prefix, slashSeparator) + return !hasSuffix(prefix, slashSeparator) } // Create listDir function. listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index c988ab835..3f092c6d4 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -161,10 +161,6 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re return toJSONError(err) } for _, bucket := range buckets { - if bucket.Name == path.Base(reservedBucket) { - continue - } - reply.Buckets = append(reply.Buckets, WebBucketInfo{ Name: bucket.Name, CreationDate: bucket.Created, @@ -584,7 +580,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { return objectAPI.GetObject(args.BucketName, objectName, 0, info.Size, writer) } - if !strings.HasSuffix(object, "/") { + if !hasSuffix(object, slashSeparator) { // If not a directory, compress the file and write it to response. err := zipit(pathJoin(args.Prefix, object)) if err != nil { diff --git a/cmd/web-router.go b/cmd/web-router.go index c4fdfbb2c..66505bdff 100644 --- a/cmd/web-router.go +++ b/cmd/web-router.go @@ -39,7 +39,7 @@ type indexHandler struct { } func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.URL.Path = reservedBucket + "/" + r.URL.Path = minioReservedBucketPath + "/" h.handler.ServeHTTP(w, r) } @@ -68,7 +68,7 @@ func registerWebRouter(mux *router.Router) error { codec := json2.NewCodec() // Minio browser router. - webBrowserRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() + webBrowserRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() // Initialize json rpc handlers. webRPC := jsonrpc.NewServer() @@ -87,13 +87,13 @@ func registerWebRouter(mux *router.Router) error { webBrowserRouter.Methods("GET").Path("/zip").Queries("token", "{token:.*}").HandlerFunc(web.DownloadZip) // Add compression for assets. - compressedAssets := handlers.CompressHandler(http.StripPrefix(reservedBucket, http.FileServer(assetFS()))) + compressedAssets := handlers.CompressHandler(http.StripPrefix(minioReservedBucketPath, http.FileServer(assetFS()))) // Serve javascript files and favicon from assets. webBrowserRouter.Path(fmt.Sprintf("/{assets:[^/]+.js|%s}", specialAssets)).Handler(compressedAssets) // Serve index.html for rest of the requests. - webBrowserRouter.Path("/{index:.*}").Handler(indexHandler{http.StripPrefix(reservedBucket, http.FileServer(assetFS()))}) + webBrowserRouter.Path("/{index:.*}").Handler(indexHandler{http.StripPrefix(minioReservedBucketPath, http.FileServer(assetFS()))}) return nil } diff --git a/cmd/xl-v1-bucket.go b/cmd/xl-v1-bucket.go index 4078b7292..f2b9a5af7 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/xl-v1-bucket.go @@ -165,13 +165,7 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) { // should take care of this. var bucketsInfo []BucketInfo for _, volInfo := range volsInfo { - // StorageAPI can send volume names which are incompatible - // with buckets, handle it and skip them. - if !IsValidBucketName(volInfo.Name) { - continue - } - // Ignore the volume special bucket. - if volInfo.Name == minioMetaBucket { + if isReservedOrInvalidBucket(volInfo.Name) { continue } bucketsInfo = append(bucketsInfo, BucketInfo{ diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go index 1fb6702ac..2fcb02c3c 100644 --- a/cmd/xl-v1-list-objects-heal.go +++ b/cmd/xl-v1-list-objects-heal.go @@ -114,7 +114,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma } entry := walkResult.entry var objInfo ObjectInfo - if strings.HasSuffix(entry, slashSeparator) { + if hasSuffix(entry, slashSeparator) { // Object name needs to be full path. objInfo.Bucket = bucket objInfo.Name = entry diff --git a/cmd/xl-v1-list-objects.go b/cmd/xl-v1-list-objects.go index b9fff36ee..480f5359c 100644 --- a/cmd/xl-v1-list-objects.go +++ b/cmd/xl-v1-list-objects.go @@ -16,8 +16,6 @@ package cmd -import "strings" - // Returns function "listDir" of the type listDirFunc. // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry. // disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks. @@ -83,7 +81,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey } entry := walkResult.entry var objInfo ObjectInfo - if strings.HasSuffix(entry, slashSeparator) { + if hasSuffix(entry, slashSeparator) { // Object name needs to be full path. objInfo.Bucket = bucket objInfo.Name = entry diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index 51624784c..491223f13 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -356,7 +356,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) // For an entry looking like a directory, store and // continue the loop not need to fetch uploads. - if strings.HasSuffix(walkResult.entry, slashSeparator) { + if hasSuffix(walkResult.entry, slashSeparator) { uploads = append(uploads, uploadMetadata{ Object: entry, }) @@ -409,7 +409,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark for _, upload := range uploads { var objectName string var uploadID string - if strings.HasSuffix(upload.Object, slashSeparator) { + if hasSuffix(upload.Object, slashSeparator) { // All directory entries are common prefixes. uploadID = "" // For common prefixes, upload ids are empty. objectName = upload.Object