Convert errors tracer into a separate package (#5221)

This commit is contained in:
Harshavardhana 2017-11-25 11:58:29 -08:00 committed by GitHub
parent 6e6aeb6a9e
commit 8efa82126b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
82 changed files with 1117 additions and 896 deletions

View file

@ -33,6 +33,7 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
)
var configJSON = []byte(`{
@ -1033,7 +1034,7 @@ func buildAdminRequest(queryVal url.Values, opHdr, method string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
req, err := newTestRequest(method, "/?"+queryVal.Encode(), contentLength, bodySeeker)
if err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
req.Header.Set(minioAdminOpHeader, opHdr)
@ -1041,7 +1042,7 @@ func buildAdminRequest(queryVal url.Values, opHdr, method string,
cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
return req, nil

View file

@ -18,7 +18,6 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"net"
"os"
@ -30,6 +29,7 @@ import (
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
)
const (
@ -159,7 +159,7 @@ func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
// GetConfig - returns config.json of the local server.
func (lc localAdminClient) GetConfig() ([]byte, error) {
if serverConfig == nil {
return nil, errors.New("config not present")
return nil, fmt.Errorf("config not present")
}
return json.Marshal(serverConfig)
@ -483,7 +483,7 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil {
errorIf(err, "Unable to find a valid server config")
return nil, traceError(err)
return nil, errors.Trace(err)
}
// Return the config.json that was present quorum or more

View file

@ -18,7 +18,6 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
@ -26,11 +25,12 @@ import (
"time"
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
)
const adminPath = "/admin"
var errUnsupportedBackend = errors.New("not supported for non erasure-code backend")
var errUnsupportedBackend = fmt.Errorf("not supported for non erasure-code backend")
// adminCmd - exports RPC methods for service status, stop and
// restart commands.
@ -166,7 +166,7 @@ func (s *adminCmd) GetConfig(args *AuthRPCArgs, reply *ConfigReply) error {
}
if serverConfig == nil {
return errors.New("config not present")
return fmt.Errorf("config not present")
}
jsonBytes, err := json.Marshal(serverConfig)
@ -238,7 +238,7 @@ func registerAdminRPCRouter(mux *router.Router) error {
adminRPCServer := newRPCServer()
err := adminRPCServer.RegisterName("Admin", adminRPCHandler)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer)

View file

@ -21,6 +21,7 @@ import (
"net/http"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -752,7 +753,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
return ErrNone
}
err = errorCause(err)
err = errors.Cause(err)
// Verify if the underlying error is signature mismatch.
switch err {
case errSignatureMismatch:

View file

@ -18,6 +18,8 @@ package cmd
import (
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
)
// Set up an RPC endpoint that receives browser related calls. The
@ -40,7 +42,7 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
bpRPCServer := newRPCServer()
err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View file

@ -32,6 +32,7 @@ import (
mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -40,7 +41,7 @@ import (
func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
// Verify if bucket actually exists
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case BucketNameInvalid:
// Return error for invalid bucket name.
@ -328,7 +329,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deletedObjects = append(deletedObjects, object)
continue
}
if _, ok := errorCause(err).(ObjectNotFound); ok {
if _, ok := errors.Cause(err).(ObjectNotFound); ok {
// If the object is not found it should be
// accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object)

View file

@ -28,6 +28,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
)
const (
@ -65,13 +66,13 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// Attempt to successfully load notification config.
nConfig, err := loadNotificationConfig(bucket, objAPI)
if err != nil && errorCause(err) != errNoSuchNotifications {
if err != nil && errors.Cause(err) != errNoSuchNotifications {
errorIf(err, "Unable to read notification configuration.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// For no notifications we write a dummy XML.
if errorCause(err) == errNoSuchNotifications {
if errors.Cause(err) == errNoSuchNotifications {
// Complies with the s3 behavior in this regard.
nConfig = &notificationConfig{}
}

View file

@ -24,6 +24,7 @@ import (
"sync"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -88,7 +89,7 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
buckets, err := objAPI.ListBuckets()
if err != nil {
errorIf(err, "Unable to list buckets.")
return nil, errorCause(err)
return nil, errors.Cause(err)
}
policies = make(map[string]policy.BucketAccessPolicy)
@ -99,7 +100,7 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
if pErr != nil {
// net.Dial fails for rpc client or any
// other unexpected errors during net.Dial.
if !isErrIgnored(pErr, errDiskNotFound) {
if !errors.IsErrIgnored(pErr, errDiskNotFound) {
if !isErrBucketPolicyNotFound(pErr) {
pErrs = append(pErrs, pErr)
}
@ -162,7 +163,7 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
return nil, BucketPolicyNotFound{Bucket: bucket}
}
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, errorCause(err)
return nil, errors.Cause(err)
}
return &buffer, nil
@ -199,7 +200,7 @@ func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
err := objAPI.DeleteObject(minioMetaBucket, policyPath)
if err != nil {
errorIf(err, "Unable to remove bucket-policy on bucket %s.", bucket)
err = errorCause(err)
err = errors.Cause(err)
if _, ok := err.(ObjectNotFound); ok {
return BucketPolicyNotFound{Bucket: bucket}
}
@ -226,12 +227,12 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
if err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err)
return errors.Cause(err)
}
if _, err = objAPI.PutObject(minioMetaBucket, policyPath, hashReader, nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err)
return errors.Cause(err)
}
return nil
}

View file

@ -16,11 +16,13 @@
package cmd
import "go/build"
// DO NOT EDIT THIS FILE DIRECTLY. These are build-time constants
// set through buildscripts/gen-ldflags.go.
var (
// GOPATH - GOPATH value at the time of build.
GOPATH = ""
GOPATH = build.Default.GOPATH
// Go get development tag.
goGetTag = "DEVELOPMENT.GOGET"

View file

@ -19,6 +19,8 @@ package cmd
import (
"hash"
"io"
"github.com/minio/minio/pkg/errors"
)
// CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create
@ -26,14 +28,14 @@ import (
// be used to protect the erasure encoded file.
func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) {
if !algorithm.Available() {
return f, traceError(errBitrotHashAlgoInvalid)
return f, errors.Trace(errBitrotHashAlgoInvalid)
}
f.Checksums = make([][]byte, len(s.disks))
hashers := make([]hash.Hash, len(s.disks))
for i := range hashers {
hashers[i] = algorithm.New()
}
errChans, errors := make([]chan error, len(s.disks)), make([]error, len(s.disks))
errChans, errs := make([]chan error, len(s.disks)), make([]error, len(s.disks))
for i := range errChans {
errChans[i] = make(chan error, 1) // create buffered channel to let finished go-routines die early
}
@ -53,19 +55,19 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
return f, err
}
} else {
return f, traceError(err)
return f, errors.Trace(err)
}
for i := range errChans { // span workers
go erasureAppendFile(s.disks[i], volume, path, hashers[i], blocks[i], errChans[i])
}
for i := range errChans { // what until all workers are finished
errors[i] = <-errChans[i]
errs[i] = <-errChans[i]
}
if err = reduceWriteQuorumErrs(errors, objectOpIgnoredErrs, writeQuorum); err != nil {
if err = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum); err != nil {
return f, err
}
s.disks = evalDisks(s.disks, errors)
s.disks = evalDisks(s.disks, errs)
f.Size += int64(n)
}
@ -83,7 +85,7 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
// the hash of the written data. It sends the write error (or nil) over the error channel.
func erasureAppendFile(disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) {
if disk == OfflineDisk {
errChan <- traceError(errDiskNotFound)
errChan <- errors.Trace(errDiskNotFound)
return
}
err := disk.AppendFile(volume, path, buf)

View file

@ -20,6 +20,8 @@ import (
"fmt"
"hash"
"strings"
"github.com/minio/minio/pkg/errors"
)
// HealFile tries to reconstruct an erasure-coded file spread over all
@ -48,7 +50,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
f ErasureFileInfo, err error) {
if !alg.Available() {
return f, traceError(errBitrotHashAlgoInvalid)
return f, errors.Trace(errBitrotHashAlgoInvalid)
}
// Initialization
@ -144,7 +146,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
// If all disks had write errors we quit.
if !writeSucceeded {
// build error from all write errors
return f, traceError(joinWriteErrors(writeErrors))
return f, errors.Trace(joinWriteErrors(writeErrors))
}
}

View file

@ -18,6 +18,8 @@ package cmd
import (
"io"
"github.com/minio/minio/pkg/errors"
)
// ReadFile reads as much data as requested from the file under the given volume and path and writes the data to the provided writer.
@ -25,13 +27,13 @@ import (
// up to the given length. If parts of the file are corrupted ReadFile tries to reconstruct the data.
func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, length int64, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, blocksize int64) (f ErasureFileInfo, err error) {
if offset < 0 || length < 0 {
return f, traceError(errUnexpected)
return f, errors.Trace(errUnexpected)
}
if offset+length > totalLength {
return f, traceError(errUnexpected)
return f, errors.Trace(errUnexpected)
}
if !algorithm.Available() {
return f, traceError(errBitrotHashAlgoInvalid)
return f, errors.Trace(errBitrotHashAlgoInvalid)
}
f.Checksums = make([][]byte, len(s.disks))
@ -66,7 +68,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
}
err = s.readConcurrent(volume, path, blockOffset, blocks, verifiers, errChans)
if err != nil {
return f, traceError(errXLReadQuorum)
return f, errors.Trace(errXLReadQuorum)
}
writeLength := blocksize - startOffset
@ -150,7 +152,7 @@ func erasureReadBlocksConcurrent(disks []StorageAPI, volume, path string, offset
// It sends the returned error through the error channel.
func erasureReadFromFile(disk StorageAPI, volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier, errChan chan<- error) {
if disk == OfflineDisk {
errChan <- traceError(errDiskNotFound)
errChan <- errors.Trace(errDiskNotFound)
return
}
_, err := disk.ReadFile(volume, path, offset, buffer, verifier)

View file

@ -21,6 +21,7 @@ import (
"io"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
)
// getDataBlockLen - get length of data blocks from encoded blocks.
@ -38,17 +39,17 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative.
if offset < 0 || length < 0 {
return 0, traceError(errUnexpected)
return 0, errors.Trace(errUnexpected)
}
// Do we have enough blocks?
if len(enBlocks) < dataBlocks {
return 0, traceError(reedsolomon.ErrTooFewShards)
return 0, errors.Trace(reedsolomon.ErrTooFewShards)
}
// Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
return 0, traceError(reedsolomon.ErrShortData)
return 0, errors.Trace(reedsolomon.ErrShortData)
}
// Counter to decrement total left to write.
@ -76,7 +77,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
totalWritten += n
break
@ -84,7 +85,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
// Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
// Decrement output size.

View file

@ -21,6 +21,7 @@ import (
"hash"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
)
// OfflineDisk represents an unavailable disk.
@ -46,7 +47,7 @@ type ErasureStorage struct {
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s ErasureStorage, err error) {
erasure, err := reedsolomon.New(dataBlocks, parityBlocks)
if err != nil {
return s, traceErrorf("failed to create erasure coding: %v", err)
return s, errors.Tracef("failed to create erasure coding: %v", err)
}
s = ErasureStorage{
disks: make([]StorageAPI, len(disks)),
@ -63,10 +64,10 @@ func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s Eras
func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
encoded, err := s.erasure.Split(data)
if err != nil {
return nil, traceErrorf("failed to split data: %v", err)
return nil, errors.Tracef("failed to split data: %v", err)
}
if err = s.erasure.Encode(encoded); err != nil {
return nil, traceErrorf("failed to encode data: %v", err)
return nil, errors.Tracef("failed to encode data: %v", err)
}
return encoded, nil
}
@ -76,7 +77,7 @@ func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
// It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
if err := s.erasure.ReconstructData(data); err != nil {
return traceErrorf("failed to reconstruct data: %v", err)
return errors.Tracef("failed to reconstruct data: %v", err)
}
return nil
}
@ -85,7 +86,7 @@ func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
// It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(data [][]byte) error {
if err := s.erasure.Reconstruct(data); err != nil {
return traceErrorf("failed to reconstruct data: %v", err)
return errors.Tracef("failed to reconstruct data: %v", err)
}
return nil
}

View file

@ -1,158 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
// Holds the current directory path. Used for trimming path in traceError()
var rootPath string
// Figure out the rootPath
func initError() {
// Root path is automatically determined from the calling function's source file location.
// Catch the calling function's source file path.
_, file, _, _ := runtime.Caller(1)
// Save the directory alone.
rootPath = filepath.Dir(file)
}
// Represents a stack frame in the stack trace.
type traceInfo struct {
file string // File where error occurred
line int // Line where error occurred
name string // Name of the function where error occurred
}
// Error - error type containing cause and the stack trace.
type Error struct {
e error // Holds the cause error
trace []traceInfo // stack trace
errs []error // Useful for XL to hold errors from all disks
}
// Implement error interface.
func (e Error) Error() string {
return e.e.Error()
}
// Trace - returns stack trace.
func (e Error) Trace() []string {
var traceArr []string
for _, info := range e.trace {
traceArr = append(traceArr, fmt.Sprintf("%s:%d:%s",
info.file, info.line, info.name))
}
return traceArr
}
// NewStorageError - return new Error type.
func traceError(e error, errs ...error) error {
if e == nil {
return nil
}
err := &Error{}
err.e = e
err.errs = errs
stack := make([]uintptr, 40)
length := runtime.Callers(2, stack)
if length > len(stack) {
length = len(stack)
}
stack = stack[:length]
for _, pc := range stack {
pc = pc - 1
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
name := fn.Name()
if hasSuffix(name, "ServeHTTP") {
break
}
if hasSuffix(name, "runtime.") {
break
}
file = strings.TrimPrefix(file, rootPath+string(os.PathSeparator))
name = strings.TrimPrefix(name, "github.com/minio/minio/cmd.")
err.trace = append(err.trace, traceInfo{file, line, name})
}
return err
}
// Returns the underlying cause error.
func errorCause(err error) error {
if e, ok := err.(*Error); ok {
err = e.e
}
return err
}
// Returns slice of underlying cause error.
func errorsCause(errs []error) []error {
cerrs := make([]error, len(errs))
for i, err := range errs {
if err == nil {
continue
}
cerrs[i] = errorCause(err)
}
return cerrs
}
// Collection of basic errors.
var baseErrs = []error{
errDiskNotFound,
errFaultyDisk,
errFaultyRemoteDisk,
}
var baseIgnoredErrs = baseErrs
// isErrIgnored returns whether given error is ignored or not.
func isErrIgnored(err error, ignoredErrs ...error) bool {
err = errorCause(err)
for _, ignoredErr := range ignoredErrs {
if ignoredErr == err {
return true
}
}
return false
}
// isErr returns whether given error is exact error.
func isErr(err error, errs ...error) bool {
err = errorCause(err)
for _, exactErr := range errs {
if err == exactErr {
return true
}
}
return false
}
// traceErrorf behaves like fmt.traceErrorf but also traces the returned error.
func traceErrorf(format string, args ...interface{}) error {
return traceError(fmt.Errorf(format, args...))
}

View file

@ -27,6 +27,7 @@ import (
"sync"
"github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -378,7 +379,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
// 'errNoSuchNotifications'. This is default when no
// bucket notifications are found on the bucket.
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, traceError(errNoSuchNotifications)
return nil, errors.Trace(errNoSuchNotifications)
}
errorIf(err, "Unable to load bucket-notification for bucket %s", bucket)
// Returns error for other errors.
@ -387,7 +388,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
// if `notifications.xml` is empty we should return NoSuchNotifications.
if buffer.Len() == 0 {
return nil, traceError(errNoSuchNotifications)
return nil, errors.Trace(errNoSuchNotifications)
}
// Unmarshal notification bytes.
@ -395,7 +396,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
notificationCfg := &notificationConfig{}
// Unmarshal notification bytes only if we read data.
if err = xml.Unmarshal(notificationConfigBytes, notificationCfg); err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
// Return success.
@ -429,7 +430,7 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
// 'errNoSuchNotifications'. This is default when no
// bucket listeners are found on the bucket
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, traceError(errNoSuchNotifications)
return nil, errors.Trace(errNoSuchNotifications)
}
errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket)
// Returns error for other errors.
@ -438,14 +439,14 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
// if `listener.json` is empty we should return NoSuchNotifications.
if buffer.Len() == 0 {
return nil, traceError(errNoSuchNotifications)
return nil, errors.Trace(errNoSuchNotifications)
}
var lCfg []listenerConfig
lConfigBytes := buffer.Bytes()
if err = json.Unmarshal(lConfigBytes, &lCfg); err != nil {
errorIf(err, "Unable to unmarshal listener config from JSON.")
return nil, traceError(err)
return nil, errors.Trace(err)
}
// Return success.
@ -552,13 +553,13 @@ func removeListenerConfig(bucket string, objAPI ObjectLayer) error {
func loadNotificationAndListenerConfig(bucketName string, objAPI ObjectLayer) (nCfg *notificationConfig, lCfg []listenerConfig, err error) {
// Loads notification config if any.
nCfg, err = loadNotificationConfig(bucketName, objAPI)
if err != nil && !isErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return nil, nil, err
}
// Loads listener config if any.
lCfg, err = loadListenerConfig(bucketName, objAPI)
if err != nil && !isErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return nil, nil, err
}
return nCfg, lCfg, nil

View file

@ -23,6 +23,8 @@ import (
"reflect"
"testing"
"time"
"github.com/minio/minio/pkg/errors"
)
// Test InitEventNotifier with faulty disks
@ -71,7 +73,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
}
// Test initEventNotifier() with faulty disks
for i := 1; i <= 3; i++ {
if err := initEventNotifier(xl); errorCause(err) != errFaultyDisk {
if err := initEventNotifier(xl); errors.Cause(err) != errFaultyDisk {
t.Fatal("Unexpected error:", err)
}
}

View file

@ -25,6 +25,7 @@ import (
"reflect"
"sync"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock"
)
@ -123,10 +124,10 @@ func (f *formatConfigV1) CheckFS() error {
// if reading format.json fails with io.EOF.
func (f *formatConfigV1) LoadFormat(lk *lock.LockedFile) error {
_, err := f.ReadFrom(lk)
if errorCause(err) == io.EOF {
if errors2.Cause(err) == io.EOF {
// No data on disk `format.json` still empty
// treat it as unformatted disk.
return traceError(errUnformattedDisk)
return errors2.Trace(errUnformattedDisk)
}
return err
}
@ -136,14 +137,14 @@ func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var fbytes []byte
fbytes, err = json.Marshal(f)
if err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
if err = lk.Truncate(0); err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
_, err = lk.Write(fbytes)
if err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
return int64(len(fbytes)), nil
}
@ -152,18 +153,18 @@ func (f *formatConfigV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var fbytes []byte
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
fbytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
if len(fbytes) == 0 {
return 0, traceError(io.EOF)
return 0, errors2.Trace(io.EOF)
}
// Decode `format.json`.
if err = json.Unmarshal(fbytes, f); err != nil {
return 0, traceError(err)
return 0, errors2.Trace(err)
}
return int64(len(fbytes)), nil
}

View file

@ -23,6 +23,7 @@ import (
"path/filepath"
"testing"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@ -756,7 +757,7 @@ func TestFSCheckFormatFSErr(t *testing.T) {
t.Errorf("Test %d: Should fail with expected %s, got nil", i+1, testCase.formatCheckErr)
}
if err != nil && !testCase.shouldPass {
if errorCause(err).Error() != testCase.formatCheckErr.Error() {
if errors2.Cause(err).Error() != testCase.formatCheckErr.Error() {
t.Errorf("Test %d: Should fail with expected %s, got %s", i+1, testCase.formatCheckErr, err)
}
}

View file

@ -22,6 +22,8 @@ import (
"os"
pathutil "path"
"runtime"
"github.com/minio/minio/pkg/errors"
)
// Removes only the file at given path does not remove
@ -29,11 +31,11 @@ import (
// windows automatically.
func fsRemoveFile(filePath string) (err error) {
if filePath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err = checkPathLength(filePath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err = os.Remove((filePath)); err != nil {
@ -47,20 +49,20 @@ func fsRemoveFile(filePath string) (err error) {
// long paths for windows automatically.
func fsRemoveAll(dirPath string) (err error) {
if dirPath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err = checkPathLength(dirPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err = os.RemoveAll(dirPath); err != nil {
if os.IsPermission(err) {
return traceError(errVolumeAccessDenied)
return errors.Trace(errVolumeAccessDenied)
} else if isSysErrNotEmpty(err) {
return traceError(errVolumeNotEmpty)
return errors.Trace(errVolumeNotEmpty)
}
return traceError(err)
return errors.Trace(err)
}
return nil
@ -70,20 +72,20 @@ func fsRemoveAll(dirPath string) (err error) {
// paths for windows automatically.
func fsRemoveDir(dirPath string) (err error) {
if dirPath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err = checkPathLength(dirPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err = os.Remove((dirPath)); err != nil {
if os.IsNotExist(err) {
return traceError(errVolumeNotFound)
return errors.Trace(errVolumeNotFound)
} else if isSysErrNotEmpty(err) {
return traceError(errVolumeNotEmpty)
return errors.Trace(errVolumeNotEmpty)
}
return traceError(err)
return errors.Trace(err)
}
return nil
@ -93,15 +95,15 @@ func fsRemoveDir(dirPath string) (err error) {
// if it doesn't exist.
func fsMkdirAll(dirPath string) (err error) {
if dirPath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err = checkPathLength(dirPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err = os.MkdirAll(dirPath, 0777); err != nil {
return traceError(err)
return errors.Trace(err)
}
return nil
@ -113,27 +115,27 @@ func fsMkdirAll(dirPath string) (err error) {
// are handled automatically.
func fsMkdir(dirPath string) (err error) {
if dirPath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err = checkPathLength(dirPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err = os.Mkdir((dirPath), 0777); err != nil {
if os.IsExist(err) {
return traceError(errVolumeExists)
return errors.Trace(errVolumeExists)
} else if os.IsPermission(err) {
return traceError(errDiskAccessDenied)
return errors.Trace(errDiskAccessDenied)
} else if isSysErrNotDir(err) {
// File path cannot be verified since
// one of the parents is a file.
return traceError(errDiskAccessDenied)
return errors.Trace(errDiskAccessDenied)
} else if isSysErrPathNotFound(err) {
// Add specific case for windows.
return traceError(errDiskAccessDenied)
return errors.Trace(errDiskAccessDenied)
}
return traceError(err)
return errors.Trace(err)
}
return nil
@ -146,14 +148,14 @@ func fsMkdir(dirPath string) (err error) {
// fsStatFileDir, fsStatFile, fsStatDir.
func fsStat(statLoc string) (os.FileInfo, error) {
if statLoc == "" {
return nil, traceError(errInvalidArgument)
return nil, errors.Trace(errInvalidArgument)
}
if err := checkPathLength(statLoc); err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
fi, err := os.Stat((statLoc))
if err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
return fi, nil
@ -163,17 +165,17 @@ func fsStat(statLoc string) (os.FileInfo, error) {
func fsStatVolume(volume string) (os.FileInfo, error) {
fi, err := fsStat(volume)
if err != nil {
err = errorCause(err)
err = errors.Cause(err)
if os.IsNotExist(err) {
return nil, traceError(errVolumeNotFound)
return nil, errors.Trace(errVolumeNotFound)
} else if os.IsPermission(err) {
return nil, traceError(errVolumeAccessDenied)
return nil, errors.Trace(errVolumeAccessDenied)
}
return nil, traceError(err)
return nil, errors.Trace(err)
}
if !fi.IsDir() {
return nil, traceError(errVolumeAccessDenied)
return nil, errors.Trace(errVolumeAccessDenied)
}
return fi, nil
@ -187,18 +189,18 @@ func osErrToFSFileErr(err error) error {
if err == nil {
return nil
}
err = errorCause(err)
err = errors.Cause(err)
if os.IsNotExist(err) {
return traceError(errFileNotFound)
return errors.Trace(errFileNotFound)
}
if os.IsPermission(err) {
return traceError(errFileAccessDenied)
return errors.Trace(errFileAccessDenied)
}
if isSysErrNotDir(err) {
return traceError(errFileAccessDenied)
return errors.Trace(errFileAccessDenied)
}
if isSysErrPathNotFound(err) {
return traceError(errFileNotFound)
return errors.Trace(errFileNotFound)
}
return err
}
@ -210,7 +212,7 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
return nil, osErrToFSFileErr(err)
}
if !fi.IsDir() {
return nil, traceError(errFileAccessDenied)
return nil, errors.Trace(errFileAccessDenied)
}
return fi, nil
}
@ -222,7 +224,7 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
return nil, osErrToFSFileErr(err)
}
if fi.IsDir() {
return nil, traceError(errFileAccessDenied)
return nil, errors.Trace(errFileAccessDenied)
}
return fi, nil
}
@ -231,10 +233,10 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
// a readable stream and the size of the readable stream.
func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
if readPath == "" || offset < 0 {
return nil, 0, traceError(errInvalidArgument)
return nil, 0, errors.Trace(errInvalidArgument)
}
if err := checkPathLength(readPath); err != nil {
return nil, 0, traceError(err)
return nil, 0, errors.Trace(err)
}
fr, err := os.Open((readPath))
@ -245,19 +247,19 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Stat to get the size of the file at path.
st, err := os.Stat((readPath))
if err != nil {
return nil, 0, traceError(err)
return nil, 0, errors.Trace(err)
}
// Verify if its not a regular file, since subsequent Seek is undefined.
if !st.Mode().IsRegular() {
return nil, 0, traceError(errIsNotRegular)
return nil, 0, errors.Trace(errIsNotRegular)
}
// Seek to the requested offset.
if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET)
if err != nil {
return nil, 0, traceError(err)
return nil, 0, errors.Trace(err)
}
}
@ -268,19 +270,19 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if filePath == "" || reader == nil {
return 0, traceError(errInvalidArgument)
return 0, errors.Trace(errInvalidArgument)
}
if err := checkPathLength(filePath); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if err := os.MkdirAll(pathutil.Dir(filePath), 0777); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
writer, err := os.OpenFile((filePath), os.O_CREATE|os.O_WRONLY, 0666)
@ -292,7 +294,7 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Fallocate only if the size is final object is known.
if fallocSize > 0 {
if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
}
@ -300,12 +302,12 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
if buf != nil {
bytesWritten, err = io.CopyBuffer(writer, reader, buf)
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
} else {
bytesWritten, err = io.Copy(writer, reader)
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
}
return bytesWritten, nil
@ -314,26 +316,26 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Removes uploadID at destination path.
func fsRemoveUploadIDPath(basePath, uploadIDPath string) error {
if basePath == "" || uploadIDPath == "" {
return traceError(errInvalidArgument)
return errors.Trace(errInvalidArgument)
}
if err := checkPathLength(basePath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err := checkPathLength(uploadIDPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
// List all the entries in uploadID.
entries, err := readDir(uploadIDPath)
if err != nil && err != errFileNotFound {
return traceError(err)
return errors.Trace(err)
}
// Delete all the entries obtained from previous readdir.
for _, entryPath := range entries {
err = fsDeleteFile(basePath, pathJoin(uploadIDPath, entryPath))
if err != nil && err != errFileNotFound {
return traceError(err)
return errors.Trace(err)
}
}
@ -367,23 +369,23 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
// missing parents if they don't exist.
func fsRenameFile(sourcePath, destPath string) error {
if err := checkPathLength(sourcePath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err := checkPathLength(destPath); err != nil {
return traceError(err)
return errors.Trace(err)
}
// Verify if source path exists.
if _, err := os.Stat((sourcePath)); err != nil {
return osErrToFSFileErr(err)
}
if err := os.MkdirAll(pathutil.Dir(destPath), 0777); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err := os.Rename((sourcePath), (destPath)); err != nil {
if isSysErrCrossDevice(err) {
return traceError(fmt.Errorf("%s (%s)->(%s)", errCrossDeviceLink, sourcePath, destPath))
return errors.Trace(fmt.Errorf("%s (%s)->(%s)", errCrossDeviceLink, sourcePath, destPath))
}
return traceError(err)
return errors.Trace(err)
}
return nil
}
@ -391,11 +393,11 @@ func fsRenameFile(sourcePath, destPath string) error {
// fsDeleteFile is a wrapper for deleteFile(), after checking the path length.
func fsDeleteFile(basePath, deletePath string) error {
if err := checkPathLength(basePath); err != nil {
return traceError(err)
return errors.Trace(err)
}
if err := checkPathLength(deletePath); err != nil {
return traceError(err)
return errors.Trace(err)
}
return deleteFile(basePath, deletePath)

View file

@ -24,6 +24,7 @@ import (
"path"
"testing"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock"
)
@ -36,11 +37,11 @@ func TestFSMkdirAll(t *testing.T) {
}
defer os.RemoveAll(path)
if err = fsMkdirAll(""); errorCause(err) != errInvalidArgument {
if err = fsMkdirAll(""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if err = fsMkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
if err = fsMkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
@ -63,13 +64,13 @@ func TestFSRenameFile(t *testing.T) {
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
t.Fatal(err)
}
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNotFound {
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound {
t.Fatal(err)
}
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNameTooLong {
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
}
@ -84,11 +85,11 @@ func TestFSStats(t *testing.T) {
// Setup test environment.
if err = fsMkdir(""); errorCause(err) != errInvalidArgument {
if err = fsMkdir(""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
@ -103,7 +104,7 @@ func TestFSStats(t *testing.T) {
// Seek back.
reader.Seek(0, 0)
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errorCause(err) != errVolumeExists {
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists {
t.Fatal("Unexpected error", err)
}
@ -191,11 +192,11 @@ func TestFSStats(t *testing.T) {
for i, testCase := range testCases {
if testCase.srcPath != "" {
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol,
testCase.srcPath)); errorCause(err) != testCase.expectedErr {
testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
} else {
if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errorCause(err) != testCase.expectedErr {
if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
@ -214,11 +215,11 @@ func TestFSCreateAndOpen(t *testing.T) {
t.Fatalf("Unable to create directory, %s", err)
}
if _, err = fsCreateFile("", nil, nil, 0); errorCause(err) != errInvalidArgument {
if _, err = fsCreateFile("", nil, nil, 0); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if _, _, err = fsOpenFile("", -1); errorCause(err) != errInvalidArgument {
if _, _, err = fsOpenFile("", -1); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
@ -252,17 +253,17 @@ func TestFSCreateAndOpen(t *testing.T) {
for i, testCase := range testCases {
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
if errorCause(err) != testCase.expectedErr {
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
_, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
if errorCause(err) != testCase.expectedErr {
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
// Attempt to open a directory.
if _, _, err = fsOpenFile(pathJoin(path), 0); errorCause(err) != errIsNotRegular {
if _, _, err = fsOpenFile(pathJoin(path), 0); errors.Cause(err) != errIsNotRegular {
t.Fatal("Unexpected error", err)
}
}
@ -364,7 +365,7 @@ func TestFSDeletes(t *testing.T) {
}
for i, testCase := range testCases {
if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
@ -498,11 +499,11 @@ func TestFSRemoves(t *testing.T) {
for i, testCase := range testCases {
if testCase.srcPath != "" {
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
} else {
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr {
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Error(err)
}
}
@ -512,11 +513,11 @@ func TestFSRemoves(t *testing.T) {
t.Fatal(err)
}
if err = fsRemoveAll(""); errorCause(err) != errInvalidArgument {
if err = fsRemoveAll(""); errors.Cause(err) != errInvalidArgument {
t.Fatal(err)
}
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errorCause(err) != errFileNameTooLong {
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong {
t.Fatal(err)
}
}

View file

@ -26,6 +26,7 @@ import (
"strings"
"time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson"
@ -166,15 +167,15 @@ func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var metadataBytes []byte
metadataBytes, err = json.Marshal(m)
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if err = lk.Truncate(0); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if _, err = lk.Write(metadataBytes); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
// Success.
@ -222,16 +223,16 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var fsMetaBuf []byte
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if len(fsMetaBuf) == 0 {
return 0, traceError(io.EOF)
return 0, errors.Trace(io.EOF)
}
// obtain version.
@ -243,7 +244,7 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
// Verify if the format is valid, return corrupted format
// for unrecognized formats.
if !isFSMetaValid(m.Version, m.Format) {
return 0, traceError(errCorruptedFormat)
return 0, errors.Trace(errCorruptedFormat)
}
// obtain metadata.
@ -278,9 +279,9 @@ func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
if os.IsNotExist(err) {
// If format.json not found then
// its an unformatted disk.
return nil, traceError(errUnformattedDisk)
return nil, errors.Trace(errUnformattedDisk)
}
return nil, traceError(err)
return nil, errors.Trace(err)
}
var format = &formatConfigV1{}
@ -296,7 +297,7 @@ func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
}
// Always return read lock here and should be closed by the caller.
return rlk, traceError(err)
return rlk, errors.Trace(err)
}
// Creates a new format.json if unformatted.
@ -307,7 +308,7 @@ func createFormatFS(fsPath string) error {
// file stored in minioMetaBucket(.minio.sys) directory.
lk, err := lock.TryLockedOpenFile((fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
// Close the locked file upon return.
defer lk.Close()
@ -316,7 +317,7 @@ func createFormatFS(fsPath string) error {
// writes the new format.json
var format = &formatConfigV1{}
err = format.LoadFormat(lk)
if errorCause(err) == errUnformattedDisk {
if errors.Cause(err) == errUnformattedDisk {
_, err = newFSFormat().WriteTo(lk)
return err
}
@ -338,10 +339,10 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
// is blocked if attempted in-turn avoiding corruption on
// the backend disk.
return rlk, nil
case errorCause(err) == errUnformattedDisk:
case errors.Cause(err) == errUnformattedDisk:
if err = createFormatFS(fsPath); err != nil {
// Existing write locks detected.
if errorCause(err) == lock.ErrAlreadyLocked {
if errors.Cause(err) == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again.
time.Sleep(100 * time.Millisecond)
continue

View file

@ -25,6 +25,7 @@ import (
"strings"
"time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@ -41,7 +42,7 @@ func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile)
_, err := fsStatFile(uploadsIDPath)
if err != nil {
if errorCause(err) == errFileNotFound {
if errors.Cause(err) == errFileNotFound {
return false
}
errorIf(err, "Unable to access uploads.json "+uploadsIDPath)
@ -91,13 +92,13 @@ func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.
_, err := uploadIDs.ReadFrom(rwlk)
// For all unexpected errors, we return.
if err != nil && errorCause(err) != io.EOF {
if err != nil && errors.Cause(err) != io.EOF {
return err
}
// If we couldn't read anything, we assume a default
// (empty) upload info.
if errorCause(err) == io.EOF {
if errors.Cause(err) == io.EOF {
uploadIDs = newUploadsV1("fs")
}
@ -117,7 +118,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
// do not leave a stale uploads.json behind.
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucketName, objectName))
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
return nil, false, traceError(err)
return nil, false, errors.Trace(err)
}
defer objectMPartPathLock.RUnlock()
@ -127,7 +128,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
if err == errFileNotFound || err == errFileAccessDenied {
return nil, true, nil
}
return nil, false, traceError(err)
return nil, false, errors.Trace(err)
}
defer fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath))
@ -235,7 +236,7 @@ func (fs fsObjects) listMultipartUploadsCleanup(bucket, prefix, keyMarker, uploa
// For any walk error return right away.
if walkResult.err != nil {
// File not found or Disk not found is a valid case.
if isErrIgnored(walkResult.err, fsTreeWalkIgnoredErrs...) {
if errors.IsErrIgnored(walkResult.err, fsTreeWalkIgnoredErrs...) {
eof = true
break
}
@ -372,7 +373,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
uploadsPath := pathJoin(bucket, object, uploadsJSONFile)
rwlk, err := fs.rwPool.Create(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath))
if err != nil {
return "", toObjectErr(traceError(err), bucket, object)
return "", toObjectErr(errors.Trace(err), bucket, object)
}
defer rwlk.Close()
@ -380,7 +381,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
return "", toObjectErr(traceError(err), bucket, object)
return "", toObjectErr(errors.Trace(err), bucket, object)
}
defer metaFile.Close()
@ -490,7 +491,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument))
return pi, toObjectErr(errors.Trace(errInvalidArgument))
}
// Hold the lock so that two parallel complete-multipart-uploads
@ -505,9 +506,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)
if _, err := fs.rwPool.Open(uploadsPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return pi, traceError(InvalidUploadID{UploadID: uploadID})
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return pi, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(errors.Trace(err), bucket, object)
}
defer fs.rwPool.Close(uploadsPath)
@ -518,9 +519,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
rwlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return pi, traceError(InvalidUploadID{UploadID: uploadID})
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return pi, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(errors.Trace(err), bucket, object)
}
defer rwlk.Close()
@ -550,7 +551,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(fsPartPath)
return pi, traceError(IncompleteBody{})
return pi, errors.Trace(IncompleteBody{})
}
// Delete temporary part in case of failure. If
@ -618,9 +619,9 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
// On windows oddly this is returned.
return lpi, traceError(InvalidUploadID{UploadID: uploadID})
return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return lpi, toObjectErr(traceError(err), bucket, object)
return lpi, toObjectErr(errors.Trace(err), bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
@ -695,7 +696,7 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
// do not leave a stale uploads.json behind.
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
return lpi, traceError(err)
return lpi, errors.Trace(err)
}
defer objectMPartPathLock.RUnlock()
@ -720,7 +721,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
if _, err := fs.statBucketDir(bucket); err != nil {
@ -747,7 +748,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if removeObjectDir {
basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket)
derr := fsDeleteFile(basePath, pathJoin(basePath, object))
if derr = errorCause(derr); derr != nil {
if derr = errors.Cause(derr); derr != nil {
// In parallel execution, CompleteMultipartUpload could have deleted temporary
// state files/directory, it is safe to ignore errFileNotFound
if derr != errFileNotFound {
@ -762,9 +763,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
rlk, err := fs.rwPool.Open(fsMetaPathMultipart)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return oi, traceError(InvalidUploadID{UploadID: uploadID})
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
// Disallow any parallel abort or complete multipart operations.
@ -772,9 +773,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound || err == errFileAccessDenied {
return oi, traceError(InvalidUploadID{UploadID: uploadID})
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
defer rwlk.Close()
@ -792,18 +793,18 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{})
return oi, errors.Trace(InvalidPart{})
}
if fsMeta.Parts[partIdx].ETag != part.ETag {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{})
return oi, errors.Trace(InvalidPart{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(PartTooSmall{
return oi, errors.Trace(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag,
@ -821,7 +822,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// renamed to the main name-space.
if (i < len(parts)-1) && partSize != fsMeta.Parts[partIdx].Size {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(PartsSizeUnequal{})
return oi, errors.Trace(PartsSizeUnequal{})
}
}
@ -831,7 +832,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
defer metaFile.Close()
@ -877,9 +878,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound {
return oi, traceError(InvalidPart{})
return oi, errors.Trace(InvalidPart{})
}
return oi, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix)
return oi, toObjectErr(errors.Trace(err), minioMetaMultipartBucket, partSuffix)
}
// No need to hold a lock, this is a unique file and will be only written
@ -889,7 +890,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
_, err = io.CopyBuffer(wfile, reader, buf)
@ -897,7 +898,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
wfile.Close()
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
wfile.Close()
@ -988,7 +989,7 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
if removeObjectDir {
basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket)
derr := fsDeleteFile(basePath, pathJoin(basePath, object))
if derr = errorCause(derr); derr != nil {
if derr = errors.Cause(derr); derr != nil {
// In parallel execution, AbortMultipartUpload could have deleted temporary
// state files/directory, it is safe to ignore errFileNotFound
if derr != errFileNotFound {
@ -1002,9 +1003,9 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile)
if _, err := fs.rwPool.Open(fsMetaPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return traceError(InvalidUploadID{UploadID: uploadID})
return errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
uploadsPath := pathJoin(bucket, object, uploadsJSONFile)
@ -1012,9 +1013,9 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
if err != nil {
fs.rwPool.Close(fsMetaPath)
if err == errFileNotFound || err == errFileAccessDenied {
return traceError(InvalidUploadID{UploadID: uploadID})
return errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
defer rwlk.Close()

View file

@ -22,6 +22,8 @@ import (
"path/filepath"
"testing"
"time"
"github.com/minio/minio/pkg/errors"
)
func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
@ -56,7 +58,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
err = errorCause(err)
err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err)
}
@ -93,7 +95,7 @@ func TestFSCleanupMultipartUpload(t *testing.T) {
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
err = errorCause(err)
err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err)
}
@ -122,7 +124,7 @@ func TestFSWriteUploadJSON(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = obj.NewMultipartUpload(bucketName, objectName, nil)
if err != nil {
if _, ok := errorCause(err).(BucketNotFound); !ok {
if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatal("Unexpected err: ", err)
}
}
@ -146,7 +148,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
// Test with disk removed.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
}
@ -184,7 +186,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
}
@ -220,7 +222,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
}
@ -323,7 +325,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
}

View file

@ -27,6 +27,7 @@ import (
"sort"
"syscall"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@ -190,7 +191,7 @@ func (fs fsObjects) StorageInfo() StorageInfo {
func (fs fsObjects) getBucketDir(bucket string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket})
return "", errors.Trace(BucketNameInvalid{Bucket: bucket})
}
bucketDir := pathJoin(fs.fsPath, bucket)
@ -242,12 +243,12 @@ func (fs fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// ListBuckets - list all s3 compatible buckets (directories) at fsPath.
func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
if err := checkPathLength(fs.fsPath); err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
var bucketInfos []BucketInfo
entries, err := readDir((fs.fsPath))
if err != nil {
return nil, toObjectErr(traceError(errDiskNotFound))
return nil, toObjectErr(errors.Trace(errDiskNotFound))
}
for _, entry := range entries {
@ -330,7 +331,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
var wlk *lock.LockedFile
wlk, err = fs.rwPool.Write(fsMetaPath)
if err != nil {
return oi, toObjectErr(traceError(err), srcBucket, srcObject)
return oi, toObjectErr(errors.Trace(err), srcBucket, srcObject)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@ -395,25 +396,25 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
// Offset cannot be negative.
if offset < 0 {
return toObjectErr(traceError(errUnexpected), bucket, object)
return toObjectErr(errors.Trace(errUnexpected), bucket, object)
}
// Writer cannot be nil.
if writer == nil {
return toObjectErr(traceError(errUnexpected), bucket, object)
return toObjectErr(errors.Trace(errUnexpected), bucket, object)
}
// If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) {
_, err = writer.Write([]byte(""))
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
}
@ -438,7 +439,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
// Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size {
return traceError(InvalidRange{offset, length, size})
return errors.Trace(InvalidRange{offset, length, size})
}
// Allocate a staging buffer.
@ -446,14 +447,14 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{}
fi, err := fsStatDir(pathJoin(fs.fsPath, bucket, object))
if err != nil && errorCause(err) != errFileAccessDenied {
if err != nil && errors.Cause(err) != errFileAccessDenied {
return oi, toObjectErr(err, bucket, object)
}
if fi != nil {
@ -477,7 +478,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
// `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if errorCause(rerr) != io.EOF {
if errors.Cause(rerr) != io.EOF {
return oi, toObjectErr(rerr, bucket, object)
}
}
@ -485,7 +486,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return oi, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
// Stat the file to get file size.
@ -501,14 +502,14 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
func checkBucketAndObjectNamesFS(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
return errors.Trace(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if len(object) == 0 {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
}
if !IsValidObjectPrefix(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
}
return nil
}
@ -572,7 +573,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
if err = fsMkdirAll(pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -590,12 +591,12 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, traceError(errInvalidArgument)
return ObjectInfo{}, errors.Trace(errInvalidArgument)
}
var wlk *lock.LockedFile
@ -604,7 +605,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@ -643,7 +644,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, traceError(IncompleteBody{})
return ObjectInfo{}, errors.Trace(IncompleteBody{})
}
// Delete the temporary object in the case of a
@ -694,7 +695,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
defer rwlk.Close()
}
if lerr != nil && lerr != errFileNotFound {
return toObjectErr(traceError(lerr), bucket, object)
return toObjectErr(errors.Trace(lerr), bucket, object)
}
}
@ -706,7 +707,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
if bucket != minioMetaBucket {
// Delete the metadata object.
err := fsDeleteFile(minioMetaBucketDir, fsMetaPath)
if err != nil && errorCause(err) != errFileNotFound {
if err != nil && errors.Cause(err) != errFileNotFound {
return toObjectErr(err, bucket, object)
}
}
@ -747,7 +748,7 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return "", toObjectErr(traceError(err), bucket, entry)
return "", toObjectErr(errors.Trace(err), bucket, entry)
}
// If file is not found, we don't need to proceed forward.
@ -761,7 +762,7 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// Fetch the size of the underlying file.
fi, err := rlk.LockedFile.Stat()
if err != nil {
return "", toObjectErr(traceError(err), bucket, entry)
return "", toObjectErr(errors.Trace(err), bucket, entry)
}
// `fs.json` can be empty due to previously failed
@ -775,12 +776,12 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// make sure the underlying offsets don't move.
fsMetaBuf, err := ioutil.ReadAll(io.NewSectionReader(rlk.LockedFile, 0, fi.Size()))
if err != nil {
return "", traceError(err)
return "", errors.Trace(err)
}
// Check if FS metadata is valid, if not return error.
if !isFSMetaValid(parseFSVersion(fsMetaBuf), parseFSFormat(fsMetaBuf)) {
return "", toObjectErr(traceError(errCorruptedFormat), bucket, entry)
return "", toObjectErr(errors.Trace(errCorruptedFormat), bucket, entry)
}
return extractETag(parseFSMetaMap(fsMetaBuf)), nil
@ -902,7 +903,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// For any walk error return right away.
if walkResult.err != nil {
// File not found is a valid case.
if errorCause(walkResult.err) == errFileNotFound {
if errors.Cause(walkResult.err) == errFileNotFound {
return loi, nil
}
return loi, toObjectErr(walkResult.err, bucket, prefix)
@ -943,25 +944,25 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// HealObject - no-op for fs. Valid only for XL.
func (fs fsObjects) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
return 0, 0, errors.Trace(NotImplemented{})
}
// HealBucket - no-op for fs, Valid only for XL.
func (fs fsObjects) HealBucket(bucket string) error {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
return loi, errors.Trace(NotImplemented{})
}
// ListBucketsHeal - list all buckets to be healed. Valid only for XL
func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
return []BucketInfo{}, traceError(NotImplemented{})
return []BucketInfo{}, errors.Trace(NotImplemented{})
}
func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
return lmi, errors.Trace(NotImplemented{})
}

View file

@ -22,6 +22,8 @@ import (
"os"
"path/filepath"
"testing"
"github.com/minio/minio/pkg/errors"
)
// Tests for if parent directory is object
@ -178,7 +180,7 @@ func TestFSGetBucketInfo(t *testing.T) {
// Test with inexistant bucket
_, err = fs.GetBucketInfo("a")
if !isSameType(errorCause(err), BucketNameInvalid{}) {
if !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned")
}
@ -186,7 +188,7 @@ func TestFSGetBucketInfo(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.GetBucketInfo(bucketName)
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("BucketNotFound error not returned")
}
}
@ -209,7 +211,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist")
}
if _, ok := errorCause(err).(BucketNotFound); !ok {
if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
}
@ -218,7 +220,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist")
}
if _, ok := errorCause(err).(BucketNotFound); !ok {
if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
}
@ -230,7 +232,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil {
t.Fatal("Unexpected should fail here, backend corruption occurred")
}
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok {
if nerr, ok := errors.Cause(err).(PrefixAccessDenied); !ok {
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
} else {
if nerr.Bucket != "bucket" {
@ -245,7 +247,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil {
t.Fatal("Unexpected should fail here, backned corruption occurred")
}
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok {
if nerr, ok := errors.Cause(err).(PrefixAccessDenied); !ok {
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
} else {
if nerr.Bucket != "bucket" {
@ -272,19 +274,19 @@ func TestFSDeleteObject(t *testing.T) {
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
// Test with invalid bucket name
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {
if err := fs.DeleteObject("fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with bucket does not exist
if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errorCause(err), BucketNotFound{}) {
if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with invalid object name
if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errorCause(err), ObjectNameInvalid{}) {
if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with object does not exist.
if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errorCause(err), ObjectNotFound{}) {
if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with valid condition
@ -295,7 +297,7 @@ func TestFSDeleteObject(t *testing.T) {
// Delete object should err disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err := fs.DeleteObject(bucketName, objectName); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
}
@ -318,11 +320,11 @@ func TestFSDeleteBucket(t *testing.T) {
}
// Test with an invalid bucket name
if err = fs.DeleteBucket("fo"); !isSameType(errorCause(err), BucketNameInvalid{}) {
if err = fs.DeleteBucket("fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with an inexistant bucket
if err = fs.DeleteBucket("foobucket"); !isSameType(errorCause(err), BucketNotFound{}) {
if err = fs.DeleteBucket("foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with a valid case
@ -335,7 +337,7 @@ func TestFSDeleteBucket(t *testing.T) {
// Delete bucket should get error disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err = fs.DeleteBucket(bucketName); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) {
if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
}
@ -378,7 +380,7 @@ func TestFSListBuckets(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListBuckets(); err != nil {
if errorCause(err) != errDiskNotFound {
if errors.Cause(err) != errDiskNotFound {
t.Fatal("Unexpected error: ", err)
}
}
@ -386,7 +388,7 @@ func TestFSListBuckets(t *testing.T) {
longPath := fmt.Sprintf("%0256d", 1)
fs.fsPath = longPath
if _, err := fs.ListBuckets(); err != nil {
if errorCause(err) != errFileNameTooLong {
if errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error: ", err)
}
}
@ -399,7 +401,7 @@ func TestFSHealObject(t *testing.T) {
obj := initFSObjects(disk, t)
_, _, err := obj.HealObject("bucket", "object")
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}
}
@ -411,7 +413,7 @@ func TestFSListObjectsHeal(t *testing.T) {
obj := initFSObjects(disk, t)
_, err := obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}
}

View file

@ -28,6 +28,7 @@ import (
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
)
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
@ -116,22 +117,22 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo,
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return bucketInfo, azureToObjectError(traceError(err))
return bucketInfo, azureToObjectError(errors.Trace(err))
}
url.RawQuery = "restype=container"
resp, err := azureAnonRequest(httpHEAD, url.String(), nil)
if err != nil {
return bucketInfo, azureToObjectError(traceError(err), bucket)
return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
return bucketInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, traceError(err)
return bucketInfo, errors.Trace(err)
}
bucketInfo = BucketInfo{
@ -155,16 +156,16 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpGET, blobURL, h)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
return azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
return azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return traceError(err)
return errors.Trace(err)
}
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
@ -173,12 +174,12 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpHEAD, blobURL, nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
return objInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
@ -186,13 +187,13 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, azureToObjectError(traceError(errUnexpected), bucket, object)
return objInfo, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, traceError(err)
return objInfo, errors.Trace(err)
}
objInfo.ModTime = t
@ -225,13 +226,13 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := azureAnonRequest(httpGET, url.String(), nil)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
@ -239,11 +240,11 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
result.IsTruncated = listResp.NextMarker != ""
@ -279,13 +280,13 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
@ -293,11 +294,11 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(traceError(err))
return result, azureToObjectError(errors.Trace(err))
}
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set

View file

@ -23,7 +23,6 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -36,6 +35,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -133,7 +133,7 @@ func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata
storage.BlobProperties, error) {
for k := range s3Metadata {
if strings.Contains(k, "--") {
return storage.BlobMetadata{}, storage.BlobProperties{}, traceError(UnsupportedMetadata{})
return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(UnsupportedMetadata{})
}
}
@ -248,15 +248,15 @@ func azureToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*Error)
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
err = e.Cause
bucket := ""
object := ""
if len(params) >= 1 {
@ -294,7 +294,7 @@ func azureToObjectError(err error, params ...string) error {
err = BucketNameInvalid{Bucket: bucket}
}
}
e.e = err
e.Cause = err
return e
}
@ -316,11 +316,11 @@ func mustGetAzureUploadID() string {
// checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(uploadID string) (err error) {
if len(uploadID) != 16 {
return traceError(MalformedUploadID{uploadID})
return errors.Trace(MalformedUploadID{uploadID})
}
if _, err = hex.DecodeString(uploadID); err != nil {
return traceError(MalformedUploadID{uploadID})
return errors.Trace(MalformedUploadID{uploadID})
}
return nil
@ -403,7 +403,7 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
err := container.Create(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate,
})
return azureToObjectError(traceError(err), bucket)
return azureToObjectError(errors.Trace(err), bucket)
}
// GetBucketInfo - Get bucket metadata..
@ -413,7 +413,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !IsValidBucketName(bucket) {
return bi, traceError(BucketNameInvalid{Bucket: bucket})
return bi, errors.Trace(BucketNameInvalid{Bucket: bucket})
}
// Azure does not have an equivalent call, hence use
@ -422,7 +422,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
Prefix: bucket,
})
if err != nil {
return bi, azureToObjectError(traceError(err), bucket)
return bi, azureToObjectError(errors.Trace(err), bucket)
}
for _, container := range resp.Containers {
if container.Name == bucket {
@ -435,19 +435,19 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
} // else continue
}
}
return bi, traceError(BucketNotFound{Bucket: bucket})
return bi, errors.Trace(BucketNotFound{Bucket: bucket})
}
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil {
return nil, azureToObjectError(traceError(err))
return nil, azureToObjectError(errors.Trace(err))
}
for _, container := range resp.Containers {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e != nil {
return nil, traceError(e)
return nil, errors.Trace(e)
}
buckets = append(buckets, BucketInfo{
Name: container.Name,
@ -460,7 +460,7 @@ func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
func (a *azureObjects) DeleteBucket(bucket string) error {
container := a.client.GetContainerReference(bucket)
return azureToObjectError(traceError(container.Delete(nil)), bucket)
return azureToObjectError(errors.Trace(container.Delete(nil)), bucket)
}
// ListObjects - lists all blobs on azure with in a container filtered by prefix
@ -477,7 +477,7 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
MaxResults: uint(maxKeys),
})
if err != nil {
return result, azureToObjectError(traceError(err), bucket, prefix)
return result, azureToObjectError(errors.Trace(err), bucket, prefix)
}
for _, object := range resp.Blobs {
@ -545,7 +545,7 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimite
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
// startOffset cannot be negative.
if startOffset < 0 {
return toObjectErr(traceError(errUnexpected), bucket, object)
return toObjectErr(errors.Trace(errUnexpected), bucket, object)
}
blobRange := &storage.BlobRange{Start: uint64(startOffset)}
@ -564,11 +564,11 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt
})
}
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
return azureToObjectError(errors.Trace(err), bucket, object)
}
_, err = io.Copy(writer, rc)
rc.Close()
return traceError(err)
return errors.Trace(err)
}
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
@ -577,7 +577,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties)
@ -604,7 +604,7 @@ func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metad
}
err = blob.CreateBlockBlobFromReader(data, nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
return a.GetObjectInfo(bucket, object)
}
@ -621,12 +621,12 @@ func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject s
destBlob.Metadata = azureMeta
err = destBlob.Copy(srcBlobURL, nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
}
destBlob.Properties = props
err = destBlob.SetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
}
return a.GetObjectInfo(destBucket, destObject)
}
@ -637,7 +637,7 @@ func (a *azureObjects) DeleteObject(bucket, object string) error {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err := blob.Delete(nil)
if err != nil {
return azureToObjectError(traceError(err), bucket, object)
return azureToObjectError(errors.Trace(err), bucket, object)
}
return nil
}
@ -661,10 +661,10 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
blob := a.client.GetContainerReference(bucketName).GetBlobReference(
getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil)
err = azureToObjectError(traceError(err), bucketName, objectName)
err = azureToObjectError(errors.Trace(err), bucketName, objectName)
oerr := ObjectNotFound{bucketName, objectName}
if errorCause(err) == oerr {
err = traceError(InvalidUploadID{})
if errors.Cause(err) == oerr {
err = errors.Trace(InvalidUploadID{})
}
return err
}
@ -673,19 +673,19 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
uploadID = mustGetAzureUploadID()
if err = a.checkUploadIDExists(bucket, object, uploadID); err == nil {
return "", traceError(errors.New("Upload ID name collision"))
return "", errors.Trace(fmt.Errorf("Upload ID name collision"))
}
metadataObject := getAzureMetadataObjectName(object, uploadID)
var jsonData []byte
if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil {
return "", traceError(err)
return "", errors.Trace(err)
}
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
if err != nil {
return "", azureToObjectError(traceError(err), bucket, metadataObject)
return "", azureToObjectError(errors.Trace(err), bucket, metadataObject)
}
return uploadID, nil
@ -721,7 +721,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil {
return info, azureToObjectError(traceError(err), bucket, object)
return info, azureToObjectError(errors.Trace(err), bucket, object)
}
subPartNumber++
}
@ -747,7 +747,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil {
return result, azureToObjectError(traceError(err), bucket, object)
return result, azureToObjectError(errors.Trace(err), bucket, object)
}
// Build a sorted list of parts and return the requested entries.
partsMap := make(map[int]PartInfo)
@ -756,7 +756,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
var parsedUploadID string
var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
return result, azureToObjectError(traceError(errUnexpected), bucket, object)
return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
}
if parsedUploadID != uploadID {
continue
@ -773,7 +773,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is.
return result, azureToObjectError(traceError(errUnexpected), bucket, object)
return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
}
part.Size += block.Size
partsMap[partNumber] = part
@ -839,12 +839,12 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
var metadataReader io.Reader
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
if metadataReader, err = blob.Get(nil); err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject)
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
}
var metadata azureMultipartMetadata
if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject)
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
}
defer func() {
@ -860,7 +860,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) {
@ -896,7 +896,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
var size int64
blocks, size, err = getBlocks(part.PartNumber, part.ETag)
if err != nil {
return objInfo, traceError(err)
return objInfo, errors.Trace(err)
}
allBlocks = append(allBlocks, blocks...)
@ -906,7 +906,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize {
return objInfo, traceError(PartTooSmall{
return objInfo, errors.Trace(PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
@ -916,7 +916,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
err = objBlob.PutBlockList(allBlocks, nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
if len(metadata.Metadata) > 0 {
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(metadata.Metadata)
@ -925,11 +925,11 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
}
err = objBlob.SetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
err = objBlob.SetMetadata(nil)
if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object)
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
}
return a.GetObjectInfo(bucket, object)
@ -952,13 +952,13 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
if policies[0].Prefix != prefix {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer,
@ -966,7 +966,7 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket
}
container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil)
return azureToObjectError(traceError(err), bucket)
return azureToObjectError(errors.Trace(err), bucket)
}
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
@ -975,15 +975,15 @@ func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPoli
container := a.client.GetContainerReference(bucket)
perm, err := container.GetPermissions(nil)
if err != nil {
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket)
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(err), bucket)
}
switch perm.AccessType {
case storage.ContainerAccessTypePrivate:
return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket})
case storage.ContainerAccessTypeContainer:
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default:
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(NotImplemented{}))
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(NotImplemented{}))
}
return policyInfo, nil
}
@ -996,5 +996,5 @@ func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
}
container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil)
return azureToObjectError(traceError(err))
return azureToObjectError(errors.Trace(err))
}

View file

@ -23,6 +23,7 @@ import (
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
)
// Test canonical metadata.
@ -60,7 +61,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
"invalid--meta": "value",
}
_, _, err = s3MetaToAzureProperties(headers)
if err = errorCause(err); err != nil {
if err = errors.Cause(err); err != nil {
if _, ok := err.(UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
}
@ -118,23 +119,23 @@ func TestAzureToObjectError(t *testing.T) {
nil, nil, "", "",
},
{
traceError(errUnexpected), errUnexpected, "", "",
errors.Trace(errUnexpected), errUnexpected, "", "",
},
{
traceError(errUnexpected), traceError(errUnexpected), "", "",
errors.Trace(errUnexpected), errors.Trace(errUnexpected), "", "",
},
{
traceError(storage.AzureStorageServiceError{
errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
traceError(storage.AzureStorageServiceError{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName",
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
{
traceError(storage.AzureStorageServiceError{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), ObjectNotFound{
Bucket: "bucket",
@ -142,12 +143,12 @@ func TestAzureToObjectError(t *testing.T) {
}, "bucket", "object",
},
{
traceError(storage.AzureStorageServiceError{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), BucketNotFound{Bucket: "bucket"}, "bucket", "",
},
{
traceError(storage.AzureStorageServiceError{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest,
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},

View file

@ -17,7 +17,6 @@
package cmd
import (
"errors"
"fmt"
"io"
"net/http"
@ -25,6 +24,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/errors"
)
// mkRange converts offset, size into Range header equivalent.
@ -44,7 +45,7 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
rng := mkRange(startOffset, length)
if rng != "" {
@ -52,14 +53,14 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
}
resp, err := l.anonClient.Do(req)
if err != nil {
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object)
return b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// Converts http Header into ObjectInfo. This function looks for all the
@ -73,13 +74,13 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) {
clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
// Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime.
timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
// Populate user metadata by looking for all the X-Bz-Info-<name>
@ -91,12 +92,12 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje
var name string
name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-"))
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
var val string
val, err = url.QueryUnescape(header.Get(key))
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
userMetadata[name] = val
}
@ -119,15 +120,15 @@ func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo Obj
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("HEAD", uri, nil)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
resp, err := l.anonClient.Do(req)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
}
return headerToObjectInfo(bucket, object, resp.Header)
}

View file

@ -32,6 +32,7 @@ import (
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash"
)
@ -134,15 +135,15 @@ func b2ToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*Error)
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
err = e.Cause
bucket := ""
object := ""
uploadID := ""
@ -189,7 +190,7 @@ func b2ToObjectError(err error, params ...string) error {
err = InvalidUploadID{uploadID}
}
e.e = err
e.Cause = err
return e
}
@ -211,7 +212,7 @@ func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error {
// All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
return b2ToObjectError(traceError(err), bucket)
return b2ToObjectError(errors.Trace(err), bucket)
}
func (l *b2Objects) reAuthorizeAccount() error {
@ -252,14 +253,14 @@ func (l *b2Objects) listBuckets(err error) ([]*b2.Bucket, error) {
func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(nil)
if err != nil {
return nil, b2ToObjectError(traceError(err), bucket)
return nil, b2ToObjectError(errors.Trace(err), bucket)
}
for _, bkt := range bktList {
if bkt.Name == bucket {
return bkt, nil
}
}
return nil, traceError(BucketNotFound{Bucket: bucket})
return nil, errors.Trace(BucketNotFound{Bucket: bucket})
}
// GetBucketInfo gets bucket metadata..
@ -296,7 +297,7 @@ func (l *b2Objects) DeleteBucket(bucket string) error {
return err
}
err = bkt.DeleteBucket(l.ctx)
return b2ToObjectError(traceError(err), bucket)
return b2ToObjectError(errors.Trace(err), bucket)
}
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
@ -308,7 +309,7 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del
loi = ListObjectsInfo{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(traceError(lerr), bucket)
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
}
loi.IsTruncated = next != ""
loi.NextMarker = next
@ -342,7 +343,7 @@ func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter s
loi = ListObjectsV2Info{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(traceError(lerr), bucket)
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
}
loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken
@ -379,11 +380,11 @@ func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, l
}
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil {
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
defer reader.Close()
_, err = io.Copy(writer, reader)
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// GetObjectInfo reads object info and replies back ObjectInfo
@ -394,12 +395,12 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
}
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
objInfo = ObjectInfo{
Bucket: bucket,
@ -491,20 +492,20 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
hr := newB2Reader(data, data.Size())
var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object)
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
}
return ObjectInfo{
@ -521,7 +522,7 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
// CopyObject copies a blob from source container to destination container.
func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string,
dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{})
return objInfo, errors.Trace(NotImplemented{})
}
// DeleteObject deletes a blob in bucket
@ -532,12 +533,12 @@ func (l *b2Objects) DeleteObject(bucket string, object string) error {
}
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
io.Copy(ioutil.Discard, reader)
reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
return b2ToObjectError(traceError(err), bucket, object)
return b2ToObjectError(errors.Trace(err), bucket, object)
}
// ListMultipartUploads lists all multipart uploads.
@ -556,7 +557,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker
}
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil {
return lmi, b2ToObjectError(traceError(err), bucket)
return lmi, b2ToObjectError(errors.Trace(err), bucket)
}
lmi = ListMultipartsInfo{
MaxUploads: maxUploads,
@ -591,7 +592,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
delete(metadata, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
if err != nil {
return uploadID, b2ToObjectError(traceError(err), bucket, object)
return uploadID, b2ToObjectError(errors.Trace(err), bucket, object)
}
return lf.ID, nil
@ -600,7 +601,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
// CopyObjectPart copy part of object to other bucket and object.
func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string,
uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
return PartInfo{}, traceError(NotImplemented{})
return PartInfo{}, errors.Trace(NotImplemented{})
}
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
@ -612,13 +613,13 @@ func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string,
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil {
return pi, b2ToObjectError(traceError(err), bucket, object, uploadID)
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil {
return pi, b2ToObjectError(traceError(err), bucket, object, uploadID)
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
return PartInfo{
@ -646,7 +647,7 @@ func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID strin
partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil {
return lpi, b2ToObjectError(traceError(err), bucket, object, uploadID)
return lpi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
if next != 0 {
lpi.IsTruncated = true
@ -669,7 +670,7 @@ func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID
return err
}
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
return b2ToObjectError(traceError(err), bucket, object, uploadID)
return b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
@ -683,7 +684,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
// B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID)
return oi, b2ToObjectError(errors.Trace(InvalidPart{}), bucket, object, uploadID)
}
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
@ -691,7 +692,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
}
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
return oi, b2ToObjectError(traceError(err), bucket, object, uploadID)
return oi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
}
return l.GetObjectInfo(bucket, object)
@ -712,13 +713,13 @@ func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
if policies[0].Prefix != prefix {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
bkt, err := l.Bucket(bucket)
if err != nil {
@ -726,7 +727,7 @@ func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
}
bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx)
return b2ToObjectError(traceError(err))
return b2ToObjectError(errors.Trace(err))
}
// GetBucketPolicies, returns the current bucketType from B2 backend and convert
@ -744,7 +745,7 @@ func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket})
}
// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.
@ -755,5 +756,5 @@ func (l *b2Objects) DeleteBucketPolicies(bucket string) error {
}
bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx)
return b2ToObjectError(traceError(err))
return b2ToObjectError(errors.Trace(err))
}

View file

@ -22,6 +22,8 @@ import (
"net/http"
"strconv"
"time"
"github.com/minio/minio/pkg/errors"
)
func toGCSPublicURL(bucket, object string) string {
@ -32,7 +34,7 @@ func toGCSPublicURL(bucket, object string) string {
func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil)
if err != nil {
return gcsToObjectError(traceError(err), bucket, object)
return gcsToObjectError(errors.Trace(err), bucket, object)
}
if length > 0 && startOffset > 0 {
@ -43,28 +45,28 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int
resp, err := http.DefaultClient.Do(req)
if err != nil {
return gcsToObjectError(traceError(err), bucket, object)
return gcsToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
return gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return gcsToObjectError(traceError(err), bucket, object)
return gcsToObjectError(errors.Trace(err), bucket, object)
}
// AnonGetObjectInfo - Get object info anonymously
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, object))
if err != nil {
return objInfo, gcsToObjectError(traceError(err), bucket, object)
return objInfo, gcsToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
return objInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
@ -72,13 +74,13 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, gcsToObjectError(traceError(errUnexpected), bucket, object)
return objInfo, gcsToObjectError(errors.Trace(errUnexpected), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, traceError(err)
return objInfo, errors.Trace(err)
}
objInfo.ModTime = t
@ -99,7 +101,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
return ListObjectsInfo{}, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
@ -110,7 +112,7 @@ func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimi
// Request V1 List Object to the backend
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
return ListObjectsV2Info{}, s3ToObjectError(errors.Trace(err), bucket)
}
// translate V1 Result to V2Info
return fromMinioClientListBucketResultToV2Info(bucket, result), nil
@ -120,18 +122,18 @@ func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimi
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, ""))
if err != nil {
return bucketInfo, gcsToObjectError(traceError(err))
return bucketInfo, gcsToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
return bucketInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, traceError(err)
return bucketInfo, errors.Trace(err)
}
// Last-Modified date being returned by GCS

View file

@ -38,6 +38,8 @@ import (
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
errors2 "github.com/minio/minio/pkg/errors"
)
var (
@ -179,15 +181,15 @@ func gcsToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*Error)
e, ok := err.(*errors2.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Code should be fixed if this function is called without doing errors2.Trace()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
err = e.Cause
bucket := ""
object := ""
@ -208,7 +210,7 @@ func gcsToObjectError(err error, params ...string) error {
err = BucketNotFound{
Bucket: bucket,
}
e.e = err
e.Cause = err
return e
case "storage: object doesn't exist":
if uploadID != "" {
@ -221,7 +223,7 @@ func gcsToObjectError(err error, params ...string) error {
Object: object,
}
}
e.e = err
e.Cause = err
return e
}
@ -229,12 +231,12 @@ func gcsToObjectError(err error, params ...string) error {
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
e.e = err
e.Cause = err
return e
}
if len(googleAPIErr.Errors) == 0 {
e.e = err
e.Cause = err
return e
}
@ -279,7 +281,7 @@ func gcsToObjectError(err error, params ...string) error {
err = fmt.Errorf("Unsupported error reason: %s", reason)
}
e.e = err
e.Cause = err
return e
}
@ -424,14 +426,14 @@ func (l *gcsGateway) MakeBucketWithLocation(bucket, location string) error {
Location: location,
})
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
// GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil {
return BucketInfo{}, gcsToObjectError(traceError(err), bucket)
return BucketInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
}
return BucketInfo{
@ -452,7 +454,7 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) {
}
if ierr != nil {
return buckets, gcsToObjectError(traceError(ierr))
return buckets, gcsToObjectError(errors2.Trace(ierr))
}
buckets = append(buckets, BucketInfo{
@ -477,7 +479,7 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break
}
if err != nil {
return gcsToObjectError(traceError(err))
return gcsToObjectError(errors2.Trace(err))
}
if objAttrs.Prefix == globalMinioSysTmp {
gcsMinioPathFound = true
@ -487,7 +489,7 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break
}
if nonGCSMinioPathFound {
return gcsToObjectError(traceError(BucketNotEmpty{}))
return gcsToObjectError(errors2.Trace(BucketNotEmpty{}))
}
if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket.
@ -498,16 +500,16 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break
}
if err != nil {
return gcsToObjectError(traceError(err))
return gcsToObjectError(errors2.Trace(err))
}
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil {
return gcsToObjectError(traceError(err))
return gcsToObjectError(errors2.Trace(err))
}
}
}
err := l.client.Bucket(bucket).Delete(l.ctx)
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
func toGCSPageToken(name string) string {
@ -589,7 +591,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
break
}
if err != nil {
return ListObjectsInfo{}, gcsToObjectError(traceError(err), bucket, prefix)
return ListObjectsInfo{}, gcsToObjectError(errors2.Trace(err), bucket, prefix)
}
nextMarker = toGCSPageToken(attrs.Name)
@ -672,7 +674,7 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter
}
if err != nil {
return ListObjectsV2Info{}, gcsToObjectError(traceError(err), bucket, prefix)
return ListObjectsV2Info{}, gcsToObjectError(errors2.Trace(err), bucket, prefix)
}
if attrs.Prefix == globalMinioSysTmp {
@ -716,18 +718,18 @@ func (l *gcsGateway) GetObject(bucket string, key string, startOffset int64, len
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
object := l.client.Bucket(bucket).Object(key)
r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil {
return gcsToObjectError(traceError(err), bucket, key)
return gcsToObjectError(errors2.Trace(err), bucket, key)
}
defer r.Close()
if _, err := io.Copy(writer, r); err != nil {
return gcsToObjectError(traceError(err), bucket, key)
return gcsToObjectError(errors2.Trace(err), bucket, key)
}
return nil
@ -776,12 +778,12 @@ func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, er
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
}
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, object)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, object)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -792,7 +794,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
}
object := l.client.Bucket(bucket).Object(key)
@ -806,7 +808,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
w.CloseWithError(err)
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
// Close the object writer upon success.
@ -814,7 +816,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
attrs, err := object.Attrs(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -832,7 +834,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
attrs, err := copier.Run(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), destBucket, destObject)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), destBucket, destObject)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -842,7 +844,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
func (l *gcsGateway) DeleteObject(bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil {
return gcsToObjectError(traceError(err), bucket, object)
return gcsToObjectError(errors2.Trace(err), bucket, object)
}
return nil
@ -868,7 +870,7 @@ func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[
bucket,
key,
}); err != nil {
return "", gcsToObjectError(traceError(err), bucket, key)
return "", gcsToObjectError(errors2.Trace(err), bucket, key)
}
return uploadID, nil
}
@ -888,7 +890,7 @@ func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarke
// an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
return gcsToObjectError(traceError(err), bucket, key, uploadID)
return gcsToObjectError(errors2.Trace(err), bucket, key, uploadID)
}
// PutObjectPart puts a part of object in bucket
@ -909,7 +911,7 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p
if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error.
w.Close()
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
return PartInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
// Make sure to close the object writer upon success.
w.Close()
@ -940,7 +942,7 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error
break
}
if err != nil {
return gcsToObjectError(traceError(err), bucket, key)
return gcsToObjectError(errors2.Trace(err), bucket, key)
}
object := l.client.Bucket(bucket).Object(attrs.Name)
@ -973,23 +975,23 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key, uploadID)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key, uploadID)
}
r, err := object.NewReader(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
defer r.Close()
// Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
return ObjectInfo{}, gcsToObjectError(traceError(errFormatNotSupported), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(errFormatNotSupported), bucket, key)
}
// Validate if the gcs.json stores valid entries for the bucket and key.
@ -1006,7 +1008,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil {
return ObjectInfo{}, gcsToObjectError(traceError(pErr), bucket, key, uploadID)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(pErr), bucket, key, uploadID)
}
partSizes[i] = partAttr.Size
}
@ -1014,7 +1016,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize {
return ObjectInfo{}, traceError(PartTooSmall{
return ObjectInfo{}, errors2.Trace(PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
@ -1045,7 +1047,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
}
@ -1058,10 +1060,10 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx)
if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
@ -1080,16 +1082,16 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
return errors2.Trace(NotImplemented{})
}
if policies[0].Prefix != prefix {
return traceError(NotImplemented{})
return errors2.Trace(NotImplemented{})
}
acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == policy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
return nil
}
@ -1101,11 +1103,11 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
case policy.BucketPolicyWriteOnly:
role = storage.RoleWriter
default:
return traceError(NotImplemented{})
return errors2.Trace(NotImplemented{})
}
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
return nil
@ -1115,7 +1117,7 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil {
return policy.BucketAccessPolicy{}, gcsToObjectError(traceError(err), bucket)
return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(err), bucket)
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, r := range rules {
@ -1131,7 +1133,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
}
// Return NoSuchBucketPolicy error, when policy is not set
if len(policyInfo.Statements) == 0 {
return policy.BucketAccessPolicy{}, gcsToObjectError(traceError(PolicyNotFound{}), bucket)
return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(PolicyNotFound{}), bucket)
}
return policyInfo, nil
}
@ -1140,7 +1142,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
func (l *gcsGateway) DeleteBucketPolicies(bucket string) error {
// This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(traceError(err), bucket)
return gcsToObjectError(errors2.Trace(err), bucket)
}
return nil

View file

@ -17,7 +17,6 @@
package cmd
import (
"errors"
"fmt"
"net/url"
"os"
@ -28,6 +27,7 @@ import (
"github.com/gorilla/mux"
"github.com/minio/cli"
"github.com/minio/minio/pkg/errors"
miniohttp "github.com/minio/minio/pkg/http"
)
@ -115,7 +115,7 @@ func validateGatewayArguments(serverAddr, endpointAddr string) error {
return err
}
if sameTarget {
return errors.New("endpoint points to the local gateway")
return fmt.Errorf("endpoint points to the local gateway")
}
}
return nil
@ -144,7 +144,7 @@ func startGateway(ctx *cli.Context, gw Gateway) {
// Validate if we have access, secret set through environment.
gatewayName := gw.Name()
if !globalIsEnvCreds {
errorIf(errors.New("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName)
errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName)
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
@ -158,7 +158,7 @@ func startGateway(ctx *cli.Context, gw Gateway) {
enableLoggers()
// Init the error tracing module.
initError()
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates.
var err error

View file

@ -20,6 +20,7 @@ import (
"io"
minio "github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -27,7 +28,7 @@ import (
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) {
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@ -37,17 +38,17 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reade
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
opts := minio.GetObjectOptions{}
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
object, _, err := l.anonClient.GetObject(bucket, key, opts)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
return nil
@ -57,7 +58,7 @@ func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64,
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) {
oi, err := l.anonClient.StatObject(bucket, object, minio.StatObjectOptions{})
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@ -67,7 +68,7 @@ func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo Obj
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
@ -77,7 +78,7 @@ func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string,
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
@ -86,14 +87,14 @@ func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimit
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return bi, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(errors.Trace(err), bucket)
} else if !exists {
return bi, traceError(BucketNotFound{Bucket: bucket})
return bi, errors.Trace(BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return bi, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
@ -107,5 +108,5 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
}, nil
}
return bi, traceError(BucketNotFound{Bucket: bucket})
return bi, errors.Trace(BucketNotFound{Bucket: bucket})
}

View file

@ -24,6 +24,7 @@ import (
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -105,15 +106,15 @@ func s3ToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*Error)
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
err = e.Cause
bucket := ""
object := ""
@ -163,7 +164,7 @@ func s3ToObjectError(err error, params ...string) error {
err = PartTooSmall{}
}
e.e = err
e.Cause = err
return e
}
@ -230,7 +231,7 @@ func (l *s3Objects) StorageInfo() (si StorageInfo) {
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
return s3ToObjectError(errors.Trace(err), bucket)
}
return err
}
@ -245,12 +246,12 @@ func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil {
return bi, traceError(BucketNameInvalid{Bucket: bucket})
return bi, errors.Trace(BucketNameInvalid{Bucket: bucket})
}
buckets, err := l.Client.ListBuckets()
if err != nil {
return bi, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(errors.Trace(err), bucket)
}
for _, bi := range buckets {
@ -264,14 +265,14 @@ func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
}, nil
}
return bi, traceError(BucketNotFound{Bucket: bucket})
return bi, errors.Trace(BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, s3ToObjectError(traceError(err))
return nil, s3ToObjectError(errors.Trace(err))
}
b := make([]BucketInfo, len(buckets))
@ -289,7 +290,7 @@ func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
func (l *s3Objects) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
return s3ToObjectError(errors.Trace(err), bucket)
}
return nil
}
@ -298,7 +299,7 @@ func (l *s3Objects) DeleteBucket(bucket string) error {
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
@ -308,7 +309,7 @@ func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, del
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(errors.Trace(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
@ -366,23 +367,23 @@ func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResul
// length indicates the total length of the object.
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
if length < 0 && length != -1 {
return s3ToObjectError(traceError(errInvalidArgument), bucket, key)
return s3ToObjectError(errors.Trace(errInvalidArgument), bucket, key)
}
opts := minio.GetObjectOptions{}
if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
}
object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
return s3ToObjectError(errors.Trace(err), bucket, key)
}
return nil
}
@ -408,7 +409,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, minio.StatObjectOptions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return ObjectInfo{}, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@ -418,7 +419,7 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@ -432,7 +433,7 @@ func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket str
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
metadata["x-amz-metadata-directive"] = "REPLACE"
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil {
return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject)
return objInfo, s3ToObjectError(errors.Trace(err), srcBucket, srcObject)
}
return l.GetObjectInfo(dstBucket, dstObject)
}
@ -441,7 +442,7 @@ func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket str
func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return s3ToObjectError(traceError(err), bucket, object)
return s3ToObjectError(errors.Trace(err), bucket, object)
}
return nil
@ -519,7 +520,7 @@ func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata ma
opts := minio.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil {
return uploadID, s3ToObjectError(traceError(err), bucket, object)
return uploadID, s3ToObjectError(errors.Trace(err), bucket, object)
}
return uploadID, nil
}
@ -538,7 +539,7 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5(), data.SHA256())
if err != nil {
return pi, s3ToObjectError(traceError(err), bucket, object)
return pi, s3ToObjectError(errors.Trace(err), bucket, object)
}
return fromMinioClientObjectPart(info), nil
@ -582,7 +583,7 @@ func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID strin
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return s3ToObjectError(traceError(err), bucket, object)
return s3ToObjectError(errors.Trace(err), bucket, object)
}
// toMinioClientCompletePart converts CompletePart to minio CompletePart
@ -606,7 +607,7 @@ func toMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart {
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return oi, s3ToObjectError(traceError(err), bucket, object)
return oi, s3ToObjectError(errors.Trace(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
@ -615,7 +616,7 @@ func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, upload
// SetBucketPolicies sets policy on bucket
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
return s3ToObjectError(errors.Trace(err), bucket, "")
}
return nil
@ -625,7 +626,7 @@ func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "")
return policy.BucketAccessPolicy{}, s3ToObjectError(errors.Trace(err), bucket, "")
}
return policyInfo, nil
}
@ -633,7 +634,7 @@ func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
// DeleteBucketPolicies deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
return s3ToObjectError(errors.Trace(err), bucket, "")
}
return nil
}

View file

@ -21,6 +21,7 @@ import (
"testing"
minio "github.com/minio/minio-go"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -114,8 +115,8 @@ func TestS3ToObjectError(t *testing.T) {
for i, tc := range testCases {
actualErr := s3ToObjectError(tc.inputErr, tc.bucket, tc.object)
if e, ok := actualErr.(*Error); ok && e.e != tc.expectedErr {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.e)
if e, ok := actualErr.(*errors2.Error); ok && e.Cause != tc.expectedErr {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.Cause)
}
}
}

View file

@ -18,7 +18,6 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -31,6 +30,7 @@ import (
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -152,7 +152,7 @@ func (s SiaMethodNotSupported) Error() string {
func apiGet(addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" {
@ -160,7 +160,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, traceError(err)
return nil, errors.Trace(err)
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
@ -225,7 +225,7 @@ func list(addr string, apiPassword string, obj *renterFiles) error {
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return errors.New("Expecting a response, but API returned status code 204 No Content")
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
}
return json.NewDecoder(resp.Body).Decode(obj)
@ -369,7 +369,7 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de
func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
if !isValidObjectName(object) {
return traceError(ObjectNameInvalid{bucket, object})
return errors.Trace(ObjectNameInvalid{bucket, object})
}
dstFile := pathJoin(s.TempDir, mustGetUUID())
@ -398,7 +398,7 @@ func (s *siaObjects) GetObject(bucket string, object string, startOffset int64,
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size {
return traceError(InvalidRange{startOffset, length, size})
return errors.Trace(InvalidRange{startOffset, length, size})
}
// Allocate a staging buffer.
@ -430,14 +430,14 @@ func (s *siaObjects) GetObjectInfo(bucket string, object string) (objInfo Object
}
}
return objInfo, traceError(ObjectNotFound{bucket, object})
return objInfo, errors.Trace(ObjectNotFound{bucket, object})
}
// PutObject creates a new object with the incoming data,
func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
// Check the object's name first
if !isValidObjectName(object) {
return objInfo, traceError(ObjectNameInvalid{bucket, object})
return objInfo, errors.Trace(ObjectNameInvalid{bucket, object})
}
bufSize := int64(readSizeV1)

View file

@ -20,6 +20,7 @@ import (
"io"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -27,119 +28,120 @@ type gatewayUnsupported struct{}
// ListMultipartUploads lists all multipart uploads.
func (a gatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, traceError(NotImplemented{})
return lmi, errors.Trace(NotImplemented{})
}
// NewMultipartUpload upload object in multiple parts
func (a gatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return "", traceError(NotImplemented{})
}
// CopyObjectPart copy part of object to other bucket and object
func (a gatewayUnsupported) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, err error) {
return pi, traceError(NotImplemented{})
return "", errors.Trace(NotImplemented{})
}
// PutObjectPart puts a part of object in bucket
func (a gatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
return pi, traceError(NotImplemented{})
return pi, errors.Trace(NotImplemented{})
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (a gatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
return lpi, traceError(NotImplemented{})
return lpi, errors.Trace(NotImplemented{})
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (a gatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a gatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
return oi, traceError(NotImplemented{})
return oi, errors.Trace(NotImplemented{})
}
// SetBucketPolicies sets policy on bucket
func (a gatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// GetBucketPolicies will get policy on bucket
func (a gatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) {
return bal, traceError(NotImplemented{})
return bal, errors.Trace(NotImplemented{})
}
// DeleteBucketPolicies deletes all policies on bucket
func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// CopyObjectPart - Not implemented.
func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64) (info PartInfo, err error) {
return info, errors.Trace(NotImplemented{})
}
// HealBucket - Not relevant.
func (a gatewayUnsupported) HealBucket(bucket string) error {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// ListBucketsHeal - Not relevant.
func (a gatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) {
return nil, traceError(NotImplemented{})
return nil, errors.Trace(NotImplemented{})
}
// HealObject - Not relevant.
func (a gatewayUnsupported) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
return 0, 0, errors.Trace(NotImplemented{})
}
func (a gatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, traceError(NotImplemented{})
return result, errors.Trace(NotImplemented{})
}
// ListObjectsHeal - Not relevant.
func (a gatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
return loi, errors.Trace(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
return lmi, errors.Trace(NotImplemented{})
}
// AnonListObjects - List objects anonymously
func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string,
maxKeys int) (loi ListObjectsInfo, err error) {
return loi, traceError(NotImplemented{})
return loi, errors.Trace(NotImplemented{})
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) {
return loi, traceError(NotImplemented{})
return loi, errors.Trace(NotImplemented{})
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) {
return bi, traceError(NotImplemented{})
return bi, errors.Trace(NotImplemented{})
}
// AnonPutObject creates a new object anonymously with the incoming data,
func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader,
metadata map[string]string) (ObjectInfo, error) {
return ObjectInfo{}, traceError(NotImplemented{})
return ObjectInfo{}, errors.Trace(NotImplemented{})
}
// AnonGetObject downloads object anonymously.
func (a gatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
return traceError(NotImplemented{})
return errors.Trace(NotImplemented{})
}
// AnonGetObjectInfo returns stat information about an object anonymously.
func (a gatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{})
return objInfo, errors.Trace(NotImplemented{})
}
// CopyObject copies a blob from source container to destination container.
func (a gatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string,
metadata map[string]string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{})
return objInfo, errors.Trace(NotImplemented{})
}

View file

@ -25,6 +25,7 @@ import (
"os"
"strings"
"github.com/minio/minio/pkg/errors"
httptracer "github.com/minio/minio/pkg/handlers"
)
@ -112,7 +113,7 @@ var userMetadataKeyPrefixes = []string{
// extractMetadataFromHeader extracts metadata from HTTP header.
func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
if header == nil {
return nil, traceError(errInvalidArgument)
return nil, errors.Trace(errInvalidArgument)
}
metadata := make(map[string]string)
// Save standard supported headers.
@ -129,7 +130,7 @@ func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
// Go through all other headers for any additional headers that needs to be saved.
for key := range header {
if key != http.CanonicalHeaderKey(key) {
return nil, traceError(errInvalidArgument)
return nil, errors.Trace(errInvalidArgument)
}
for _, prefix := range userMetadataKeyPrefixes {
if strings.HasPrefix(key, prefix) {
@ -187,7 +188,7 @@ func validateFormFieldSize(formValues http.Header) error {
for k := range formValues {
// Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize {
return traceError(errSizeUnexpected)
return errors.Trace(errSizeUnexpected)
}
}
@ -216,7 +217,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" {
if len(v) == 0 {
return nil, "", 0, nil, traceError(errInvalidArgument)
return nil, "", 0, nil, errors.Trace(errInvalidArgument)
}
// Fetch fileHeader which has the uploaded file information
fileHeader := v[0]
@ -225,17 +226,17 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
// Open the uploaded part
filePart, err = fileHeader.Open()
if err != nil {
return nil, "", 0, nil, traceError(err)
return nil, "", 0, nil, errors.Trace(err)
}
// Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil {
return nil, "", 0, nil, traceError(err)
return nil, "", 0, nil, errors.Trace(err)
}
// Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil {
return nil, "", 0, nil, traceError(err)
return nil, "", 0, nil, errors.Trace(err)
}
// File found and ready for reading
break

View file

@ -25,6 +25,8 @@ import (
"reflect"
"strings"
"testing"
"github.com/minio/minio/pkg/errors"
)
// Tests validate bucket LocationConstraint.
@ -114,7 +116,7 @@ func TestValidateFormFieldSize(t *testing.T) {
for i, testCase := range testCases {
err := validateFormFieldSize(testCase.header)
if err != nil {
if errorCause(err).Error() != testCase.err.Error() {
if errors.Cause(err).Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err)
}
}

View file

@ -19,6 +19,8 @@ package cmd
import (
"fmt"
"time"
"github.com/minio/minio/pkg/errors"
)
type statusType string
@ -116,23 +118,23 @@ func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID stri
// Check whether the lock info entry for <volume, path> pair already exists.
_, ok := n.debugLockMap[param]
if !ok {
return traceError(LockInfoVolPathMissing{param.volume, param.path})
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
}
// Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok {
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
}
// Check whether lockSource is same.
if lockInfo.lockSource != lockSource {
return traceError(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
}
// Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus {
return traceError(LockInfoStateNotBlocked{param.volume, param.path, opsID})
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
}
// Change lock status to running and update the time.
n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock)
@ -181,23 +183,23 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string,
func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error {
_, ok := n.debugLockMap[param]
if !ok {
return traceError(LockInfoVolPathMissing{param.volume, param.path})
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
}
// Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok {
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
}
// Check whether lockSource is same.
if lockInfo.lockSource != lockSource {
return traceError(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
}
// Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus {
return traceError(LockInfoStateNotBlocked{param.volume, param.path, opsID})
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
}
// Clear the status by removing the entry for the given `opsID`.
delete(n.debugLockMap[param].lockInfo, opsID)
@ -214,7 +216,7 @@ func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string,
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// delete the lock info for the given operation.
if _, found := n.debugLockMap[param]; !found {
return traceError(LockInfoVolPathMissing{param.volume, param.path})
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
}
// The following stats update is relevant only in case of a
@ -238,14 +240,14 @@ func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error
// delete the lock info for the given operation.
infoMap, found := n.debugLockMap[param]
if !found {
return traceError(LockInfoVolPathMissing{param.volume, param.path})
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
}
// The operation finished holding the lock on the resource, remove
// the entry for the given operation with the operation ID.
opsIDLock, foundInfo := infoMap.lockInfo[opsID]
if !foundInfo {
// Unlock request with invalid operation ID not accepted.
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
}
// Update global and (volume, path) lock status.
granted := opsIDLock.status == runningStatus

View file

@ -16,7 +16,11 @@
package cmd
import "testing"
import (
"testing"
"github.com/minio/minio/pkg/errors"
)
type lockStateCase struct {
volume string
@ -278,7 +282,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr {
if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
}
@ -298,7 +302,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock)
expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if errorCause(actualErr) != expectedOpsErr {
if errors.Cause(actualErr) != expectedOpsErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsErr, actualErr)
}
@ -321,7 +325,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock)
expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if errorCause(actualErr) != expectedBlockErr {
if errors.Cause(actualErr) != expectedBlockErr {
t.Fatalf("Errors mismatch: Expected: \"%s\", got: \"%s\"", expectedBlockErr, actualErr)
}
@ -342,7 +346,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
}
// invoking the method under test.
actualErr = globalNSMutex.statusBlockedToRunning(param, testCase.lockSource, testCase.opsID, testCase.readLock)
if errorCause(actualErr) != testCase.expectedErr {
if errors.Cause(actualErr) != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
}
// In case of no error proceed with validating the lock state information.
@ -461,7 +465,7 @@ func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr {
if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
}
@ -505,7 +509,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
actualErr := globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr {
if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
}
@ -524,7 +528,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
actualErr = globalNSMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID")
expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existent-OpsID"}
if errorCause(actualErr) != expectedOpsIDErr {
if errors.Cause(actualErr) != expectedOpsIDErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsIDErr, actualErr)
}
// case - 4.
@ -588,7 +592,7 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
param := nsParam{testCases[0].volume, testCases[0].path}
actualErr := globalNSMutex.deleteLockInfoEntryForVolumePath(param)
expectedNilErr := LockInfoVolPathMissing{param.volume, param.path}
if errorCause(actualErr) != expectedNilErr {
if errors.Cause(actualErr) != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}

View file

@ -25,6 +25,7 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/dsync"
"github.com/minio/minio/pkg/errors"
)
const (
@ -100,7 +101,7 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
for _, lockServer := range lockServers {
lockRPCServer := newRPCServer()
if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil {
return traceError(err)
return errors.Trace(err)
}
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer)

View file

@ -17,7 +17,6 @@
package cmd
import (
"errors"
"fmt"
"io/ioutil"
"path"
@ -27,6 +26,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
)
var log = NewLogger()
@ -42,7 +42,7 @@ func (l *loggers) Validate() (err error) {
if l != nil {
fileLogger := l.GetFile()
if fileLogger.Enable && fileLogger.Filename == "" {
err = errors.New("Missing filename for enabled file logger")
err = fmt.Errorf("Missing filename for enabled file logger")
}
}
@ -186,7 +186,7 @@ func getSource() string {
func logIf(level logrus.Level, source string, err error, msg string, data ...interface{}) {
isErrIgnored := func(err error) (ok bool) {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case BucketNotFound, BucketNotEmpty, BucketExists:
ok = true
@ -207,8 +207,8 @@ func logIf(level logrus.Level, source string, err error, msg string, data ...int
"cause": err.Error(),
}
if terr, ok := err.(*Error); ok {
fields["stack"] = strings.Join(terr.Trace(), " ")
if terr, ok := err.(*errors.Error); ok {
fields["stack"] = strings.Join(terr.Stack(), " ")
}
switch level {

View file

@ -20,6 +20,7 @@ import (
"sync"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
)
const (
@ -107,7 +108,7 @@ func houseKeeping(storageDisks []StorageAPI) error {
// Cleanup all temp entries upon start.
err := cleanupDir(disk, minioMetaTmpBucket, "")
if err != nil {
if !isErrIgnored(errorCause(err), errDiskNotFound, errVolumeNotFound, errFileNotFound) {
if !errors.IsErrIgnored(errors.Cause(err), errDiskNotFound, errVolumeNotFound, errFileNotFound) {
errs[index] = err
}
}
@ -164,21 +165,21 @@ func initMetaVolume(storageDisks []StorageAPI) error {
// Attempt to create `.minio.sys`.
err := disk.MakeVol(minioMetaBucket)
if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) {
if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err
return
}
}
err = disk.MakeVol(minioMetaTmpBucket)
if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) {
if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err
return
}
}
err = disk.MakeVol(minioMetaMultipartBucket)
if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) {
if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err
return
}
@ -208,7 +209,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
delFunc = func(entryPath string) error {
if !hasSuffix(entryPath, slashSeparator) {
// Delete the file entry.
return traceError(storage.DeleteFile(volume, entryPath))
return errors.Trace(storage.DeleteFile(volume, entryPath))
}
// If it's a directory, list and call delFunc() for each entry.
@ -217,7 +218,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
if err == errFileNotFound {
return nil
} else if err != nil { // For any other errors fail.
return traceError(err)
return errors.Trace(err)
} // else on success..
// Recurse and delete all other entries.

View file

@ -19,6 +19,8 @@ package cmd
import (
"sync"
"testing"
"github.com/minio/minio/pkg/errors"
)
func TestHouseKeeping(t *testing.T) {
@ -90,7 +92,7 @@ func TestHouseKeeping(t *testing.T) {
{nilDiskStorage, nil},
}
for i, test := range testCases {
actualErr := errorCause(houseKeeping(test.store))
actualErr := errors.Cause(houseKeeping(test.store))
if actualErr != test.expectedErr {
t.Errorf("Test %d - actual error is %#v, expected error was %#v",
i+1, actualErr, test.expectedErr)

View file

@ -19,15 +19,17 @@ package cmd
import (
"fmt"
"io"
"github.com/minio/minio/pkg/errors"
)
// Converts underlying storage error. Convenience function written to
// handle all cases where we have known types of errors returned by
// underlying storage layer.
func toObjectErr(err error, params ...string) error {
e, ok := err.(*Error)
e, ok := err.(*errors.Error)
if ok {
err = e.e
err = e.Cause
}
switch err {
@ -95,7 +97,7 @@ func toObjectErr(err error, params ...string) error {
err = IncompleteBody{}
}
if ok {
e.e = err
e.Cause = err
return e
}
return err
@ -377,7 +379,7 @@ func (e UnsupportedMetadata) Error() string {
// isErrIncompleteBody - Check if error type is IncompleteBody.
func isErrIncompleteBody(err error) bool {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case IncompleteBody:
return true
@ -387,7 +389,7 @@ func isErrIncompleteBody(err error) bool {
// isErrBucketPolicyNotFound - Check if error type is BucketPolicyNotFound.
func isErrBucketPolicyNotFound(err error) bool {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case BucketPolicyNotFound:
return true
@ -397,7 +399,7 @@ func isErrBucketPolicyNotFound(err error) bool {
// isErrObjectNotFound - Check if error type is ObjectNotFound.
func isErrObjectNotFound(err error) bool {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case ObjectNotFound:
return true

View file

@ -16,7 +16,10 @@
package cmd
import "github.com/skyrings/skyring-common/tools/uuid"
import (
"github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid"
)
// Checks on GetObject arguments, bucket and object.
func checkGetObjArgs(bucket, object string) error {
@ -32,15 +35,15 @@ func checkDelObjArgs(bucket, object string) error {
func checkBucketAndObjectNames(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
return errors.Trace(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
// Objects with "/" are invalid, verify to return a different error.
if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) {
return traceError(ObjectNotFound{Bucket: bucket, Object: object})
return errors.Trace(ObjectNotFound{Bucket: bucket, Object: object})
}
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
}
return nil
}
@ -53,24 +56,24 @@ func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err)
return errors.Trace(err)
}
// Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) {
return traceError(ObjectNameInvalid{
return errors.Trace(ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return traceError(UnsupportedDelimiter{
return errors.Trace(UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if marker != "" && !hasPrefix(marker, prefix) {
return traceError(InvalidMarkerPrefixCombination{
return errors.Trace(InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
@ -85,17 +88,17 @@ func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter
}
if uploadIDMarker != "" {
if hasSuffix(keyMarker, slashSeparator) {
return traceError(InvalidUploadIDKeyCombination{
return errors.Trace(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
if id.IsZero() {
return traceError(MalformedUploadID{
return errors.Trace(MalformedUploadID{
UploadID: uploadIDMarker,
})
}
@ -136,11 +139,11 @@ func checkPutObjectArgs(bucket, object string, obj ObjectLayer) error {
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err)
return errors.Trace(err)
}
// Validates object name validity after bucket exists.
if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{
return errors.Trace(ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
@ -155,7 +158,7 @@ func checkBucketExist(bucket string, obj ObjectLayer) error {
}
_, err := obj.GetBucketInfo(bucket)
if err != nil {
return errorCause(err)
return errors.Cause(err)
}
return nil
}

View file

@ -24,6 +24,7 @@ import (
"sort"
"time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock"
)
@ -80,14 +81,14 @@ func (u *uploadsV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var uplBytes []byte
uplBytes, err = json.Marshal(u)
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if err = lk.Truncate(0); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
_, err = lk.Write(uplBytes)
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
return int64(len(uplBytes)), nil
}
@ -96,18 +97,18 @@ func (u *uploadsV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var uploadIDBytes []byte
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
uploadIDBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
if len(uploadIDBytes) == 0 {
return 0, traceError(io.EOF)
return 0, errors.Trace(io.EOF)
}
// Decode `uploads.json`.
if err = json.Unmarshal(uploadIDBytes, u); err != nil {
return 0, traceError(err)
return 0, errors.Trace(err)
}
return int64(len(uploadIDBytes)), nil
}
@ -118,12 +119,12 @@ func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV
// Reads entire `uploads.json`.
buf, err := disk.ReadAll(minioMetaMultipartBucket, uploadJSONPath)
if err != nil {
return uploadsV1{}, traceError(err)
return uploadsV1{}, errors.Trace(err)
}
// Decode `uploads.json`.
if err = json.Unmarshal(buf, &uploadIDs); err != nil {
return uploadsV1{}, traceError(err)
return uploadsV1{}, errors.Trace(err)
}
// Success.
@ -142,20 +143,20 @@ func writeUploadJSON(u *uploadsV1, uploadsPath, tmpPath string, disk StorageAPI)
// Serialize to prepare to write to disk.
uplBytes, wErr := json.Marshal(&u)
if wErr != nil {
return traceError(wErr)
return errors.Trace(wErr)
}
// Write `uploads.json` to disk. First to tmp location and then rename.
if wErr = disk.AppendFile(minioMetaTmpBucket, tmpPath, uplBytes); wErr != nil {
return traceError(wErr)
return errors.Trace(wErr)
}
wErr = disk.RenameFile(minioMetaTmpBucket, tmpPath, minioMetaMultipartBucket, uploadsPath)
if wErr != nil {
if dErr := disk.DeleteFile(minioMetaTmpBucket, tmpPath); dErr != nil {
// we return the most recent error.
return traceError(dErr)
return errors.Trace(dErr)
}
return traceError(wErr)
return errors.Trace(wErr)
}
return nil
}

View file

@ -24,6 +24,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -122,7 +123,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
if testCase.expectedErrType == nil && err != nil {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
}
if testCase.expectedErrType != nil && !isSameType(errorCause(err), testCase.expectedErrType) {
if testCase.expectedErrType != nil && !isSameType(errors.Cause(err), testCase.expectedErrType) {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
}
}
@ -151,7 +152,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
}
err = obj.AbortMultipartUpload(bucket, object, "abc")
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case InvalidUploadID:
default:

View file

@ -26,6 +26,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -162,7 +163,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
actualErr = errorCause(actualErr)
actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.expectedError == nil {
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
}
@ -236,7 +237,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
sha256sum := ""
for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr)
actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
}
@ -286,7 +287,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
}
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr)
actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
}

View file

@ -24,6 +24,7 @@ import (
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid"
)
@ -178,7 +179,7 @@ func getCompleteMultipartMD5(parts []CompletePart) (string, error) {
for _, part := range parts {
md5Bytes, err := hex.DecodeString(part.ETag)
if err != nil {
return "", traceError(err)
return "", errors.Trace(err)
}
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
}

View file

@ -28,6 +28,7 @@ import (
"strconv"
mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/ioutil"
)
@ -1065,7 +1066,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
objInfo, err := objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
if err != nil {
errorIf(err, "Unable to complete multipart upload.")
err = errorCause(err)
err = errors.Cause(err)
switch oErr := err.(type) {
case PartTooSmall:
// Write part too small error.

View file

@ -24,6 +24,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
)
// Return pointer to testOneByteReadEOF{}
@ -754,7 +755,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
for i, testCase := range testCases {
_, expectedErr := obj.GetObjectInfo(bucketName, testCase.dir)
if expectedErr != nil {
expectedErr = errorCause(expectedErr)
expectedErr = errors.Cause(expectedErr)
if expectedErr.Error() != testCase.err.Error() {
t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
}

View file

@ -17,11 +17,11 @@
package cmd
import (
"errors"
"fmt"
"time"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
)
/*
@ -140,7 +140,7 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
// Count errors by error value.
errMap := make(map[error]int)
for _, err := range sErrs {
errMap[errorCause(err)]++
errMap[errors.Cause(err)]++
}
// Validates and converts specific config errors into WaitForConfig.
@ -296,7 +296,7 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks
console.Printf("Initializing data volume for first time. Waiting for first server to come online (elapsed %s)\n", getElapsedTime())
}
case <-globalServiceDoneCh:
return errors.New("Initializing data volumes gracefully stopped")
return fmt.Errorf("Initializing data volumes gracefully stopped")
}
}
}

View file

@ -18,6 +18,7 @@ package cmd
import (
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
)
const (
@ -40,7 +41,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error {
s3PeerRPCServer := newRPCServer()
err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View file

@ -25,6 +25,7 @@ import (
"github.com/minio/cli"
"github.com/minio/dsync"
"github.com/minio/minio/pkg/errors"
miniohttp "github.com/minio/minio/pkg/http"
)
@ -150,7 +151,7 @@ func serverMain(ctx *cli.Context) {
enableLoggers()
// Init the error tracing module.
initError()
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates.
var err error

View file

@ -99,3 +99,12 @@ func (h hashMismatchError) Error() string {
"Bitrot verification mismatch - expected %v, received %v",
h.expected, h.computed)
}
// Collection of basic errors.
var baseErrs = []error{
errDiskNotFound,
errFaultyDisk,
errFaultyRemoteDisk,
}
var baseIgnoredErrs = baseErrs

View file

@ -23,6 +23,7 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
)
// Storage server implements rpc primitives to facilitate exporting a
@ -223,7 +224,7 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
// Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newStorageRPCServer(endpoints)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
// Create a unique route for each disk exported from this node.
@ -231,7 +232,7 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
storageRPCServer := newRPCServer()
err = storageRPCServer.RegisterName("Storage", stServer)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
// Add minio storage routes.
storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()

View file

@ -21,6 +21,7 @@ import (
"testing"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
)
type testStorageRPCServer struct {
@ -68,7 +69,7 @@ func createTestStorageServer(t *testing.T) *testStorageRPCServer {
}
func errorIfInvalidToken(t *testing.T, err error) {
realErr := errorCause(err)
realErr := errors.Cause(err)
if realErr != errInvalidToken {
t.Errorf("Expected to fail with %s but failed with %s", errInvalidToken, realErr)
}

View file

@ -19,6 +19,8 @@ package cmd
import (
"sort"
"strings"
"github.com/minio/minio/pkg/errors"
)
// Tree walk result carries results of tree walking.
@ -141,7 +143,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
if err != nil {
select {
case <-endWalkCh:
return traceError(errWalkAbort)
return errors.Trace(errWalkAbort)
case resultCh <- treeWalkResult{err: err}:
return err
}
@ -203,7 +205,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
isEOF := ((i == len(entries)-1) && isEnd)
select {
case <-endWalkCh:
return traceError(errWalkAbort)
return errors.Trace(errWalkAbort)
case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}:
}
}

View file

@ -19,7 +19,6 @@ package cmd
import (
"archive/zip"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -37,6 +36,7 @@ import (
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/browser"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -433,7 +433,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
// Since the error message may be very long to display
// on the browser, we tell the user to check the
// server logs.
return toJSONError(errors.New("unexpected error(s) occurred - please check minio server logs"))
return toJSONError(fmt.Errorf("unexpected error(s) occurred - please check minio server logs"))
}
// As we have updated access/secret key, generate new auth token.
@ -748,7 +748,7 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
_, ok := errorCause(err).(PolicyNotFound)
_, ok := errors.Cause(err).(PolicyNotFound)
if !ok {
return toJSONError(err, args.BucketName)
}
@ -790,7 +790,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
_, ok := errorCause(err).(PolicyNotFound)
_, ok := errors.Cause(err).(PolicyNotFound)
if !ok {
return toJSONError(err, args.BucketName)
}
@ -834,7 +834,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil {
if _, ok := errorCause(err).(PolicyNotFound); !ok {
if _, ok := errors.Cause(err).(PolicyNotFound); !ok {
return toJSONError(err, args.BucketName)
}
policyInfo = policy.BucketAccessPolicy{Version: "2012-10-17"}
@ -878,7 +878,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
if apiErr.Code == "XMinioPolicyNesting" {
err = PolicyNesting{}
} else {
err = errors.New(apiErr.Description)
err = fmt.Errorf(apiErr.Description)
}
return toJSONError(err, args.BucketName)
}
@ -1004,7 +1004,7 @@ func toJSONError(err error, params ...string) (jerr *json2.Error) {
// toWebAPIError - convert into error into APIError.
func toWebAPIError(err error) APIError {
err = errorCause(err)
err = errors.Cause(err)
if err == errAuthentication {
return APIError{
Code: "AccessDenied",

View file

@ -19,6 +19,8 @@ package cmd
import (
"sort"
"sync"
"github.com/minio/minio/pkg/errors"
)
// list all errors that can be ignore in a bucket operation.
@ -33,7 +35,7 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket})
return errors.Trace(BucketNameInvalid{Bucket: bucket})
}
// Initialize sync waitgroup.
@ -45,7 +47,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
// Make a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks {
if disk == nil {
dErrs[index] = traceError(errDiskNotFound)
dErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -54,7 +56,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
defer wg.Done()
err := disk.MakeVol(bucket)
if err != nil {
dErrs[index] = traceError(err)
dErrs[index] = errors.Trace(err)
}
}(index, disk)
}
@ -63,7 +65,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
wg.Wait()
err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// Purge successfully created buckets if we don't have writeQuorum.
undoMakeBucket(xl.storageDisks, bucket)
}
@ -127,9 +129,9 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
}
return bucketInfo, nil
}
err = traceError(serr)
err = errors.Trace(serr)
// For any reason disk went offline continue and pick the next one.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
bucketErrs = append(bucketErrs, err)
continue
}
@ -187,9 +189,9 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
}
return bucketsInfo, nil
}
err = traceError(err)
err = errors.Trace(err)
// Ignore any disks not found.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
continue
}
break
@ -222,7 +224,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Remove a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks {
if disk == nil {
dErrs[index] = traceError(errDiskNotFound)
dErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -232,13 +234,13 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Attempt to delete bucket.
err := disk.DeleteVol(bucket)
if err != nil {
dErrs[index] = traceError(err)
dErrs[index] = errors.Trace(err)
return
}
// Cleanup all the previously incomplete multiparts.
err = cleanupDir(disk, minioMetaMultipartBucket, bucket)
if err != nil {
if errorCause(err) == errVolumeNotFound {
if errors.Cause(err) == errVolumeNotFound {
return
}
dErrs[index] = err
@ -250,7 +252,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
wg.Wait()
err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
xl.undoDeleteBucket(bucket)
}
return toObjectErr(err, bucket)

View file

@ -18,6 +18,8 @@ package cmd
import (
"path"
"github.com/minio/minio/pkg/errors"
)
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
@ -61,7 +63,7 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
return true
}
// Ignore for file not found, disk not found or faulty disk.
if isErrIgnored(err, xlTreeWalkIgnoredErrs...) {
if errors.IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue
}
errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile)

View file

@ -19,6 +19,8 @@ package cmd
import (
"path/filepath"
"time"
"github.com/minio/minio/pkg/errors"
)
// commonTime returns a maximally occurring time from a list of time.
@ -130,7 +132,7 @@ func outDatedDisks(disks, latestDisks []StorageAPI, errs []error, partsMetadata
continue
}
// disk either has an older xl.json or doesn't have one.
switch errorCause(errs[index]) {
switch errors.Cause(errs[index]) {
case nil, errFileNotFound:
outDatedDisks[index] = disks[index]
}
@ -210,7 +212,7 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealObject
// xl.json is not found, which implies the erasure
// coded blocks are unavailable in the corresponding disk.
// First half of the disks are data and the rest are parity.
switch realErr := errorCause(err); realErr {
switch realErr := errors.Cause(err); realErr {
case errDiskNotFound:
disksMissing = true
fallthrough
@ -280,7 +282,7 @@ func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs
availableDisks[i] = OfflineDisk
break
}
return nil, nil, traceError(hErr)
return nil, nil, errors.Trace(hErr)
}
}

View file

@ -21,6 +21,8 @@ import (
"path"
"sort"
"sync"
"github.com/minio/minio/pkg/errors"
)
// healFormatXL - heals missing `format.json` on freshly or corrupted
@ -105,7 +107,7 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
// Make a volume entry on all underlying storage disks.
for index, disk := range storageDisks {
if disk == nil {
dErrs[index] = traceError(errDiskNotFound)
dErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -114,11 +116,11 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
defer wg.Done()
if _, err := disk.StatVol(bucket); err != nil {
if err != errVolumeNotFound {
dErrs[index] = traceError(err)
dErrs[index] = errors.Trace(err)
return
}
if err = disk.MakeVol(bucket); err != nil {
dErrs[index] = traceError(err)
dErrs[index] = errors.Trace(err)
}
}
}(index, disk)
@ -128,7 +130,7 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
wg.Wait()
reducedErr := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
if errors.Cause(reducedErr) == errXLWriteQuorum {
// Purge successfully created buckets if we don't have writeQuorum.
undoMakeBucket(storageDisks, bucket)
}
@ -198,7 +200,7 @@ func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo, buck
continue
}
// Ignore any disks not found.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
continue
}
break
@ -416,7 +418,7 @@ func healObject(storageDisks []StorageAPI, bucket, object string, quorum int) (i
// may have object parts still present in the object
// directory. This needs to be deleted for object to
// healed successfully.
if errs[index] != nil && !isErr(errs[index], errFileNotFound) {
if errs[index] != nil && !errors.IsErr(errs[index], errFileNotFound) {
continue
}
@ -522,7 +524,7 @@ func healObject(storageDisks []StorageAPI, bucket, object string, quorum int) (i
aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket,
retainSlash(object))
if aErr != nil {
return 0, 0, toObjectErr(traceError(aErr), bucket, object)
return 0, 0, toObjectErr(errors.Trace(aErr), bucket, object)
}
}
return numOfflineDisks, numHealedDisks, nil

View file

@ -22,6 +22,8 @@ import (
"os"
"path/filepath"
"testing"
"github.com/minio/minio/pkg/errors"
)
// Tests healing of format XL.
@ -289,7 +291,7 @@ func TestUndoMakeBucket(t *testing.T) {
// Validate if bucket was deleted properly.
_, err = obj.GetBucketInfo(bucketName)
if err != nil {
err = errorCause(err)
err = errors.Cause(err)
switch err.(type) {
case BucketNotFound:
default:
@ -531,7 +533,7 @@ func TestHealObjectXL(t *testing.T) {
// Try healing now, expect to receive errDiskNotFound.
_, _, err = obj.HealObject(bucket, object)
if errorCause(err) != errDiskNotFound {
if errors.Cause(err) != errDiskNotFound {
t.Errorf("Expected %v but received %v", errDiskNotFound, err)
}
}

View file

@ -20,6 +20,8 @@ import (
"path/filepath"
"sort"
"strings"
"github.com/minio/minio/pkg/errors"
)
func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc {
@ -112,7 +114,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
objInfo, err = xl.getObjectInfo(bucket, entry)
if err != nil {
// Ignore errFileNotFound
if errorCause(err) == errFileNotFound {
if errors.Cause(err) == errFileNotFound {
continue
}
return loi, toObjectErr(err, bucket, prefix)
@ -238,7 +240,7 @@ func fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string,
uploads, end, err = listMultipartUploadIDs(bucket, keyMarker,
uploadIDMarker, maxUploads, disk)
if err == nil ||
!isErrIgnored(err, objMetadataOpIgnoredErrs...) {
!errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
break
}
}

View file

@ -16,6 +16,8 @@
package cmd
import "github.com/minio/minio/pkg/errors"
// Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks.
@ -30,10 +32,10 @@ func listDirFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks ...Sto
if err != nil {
// For any reason disk was deleted or goes offline, continue
// and list from other disks if possible.
if isErrIgnored(err, treeWalkIgnoredErrs...) {
if errors.IsErrIgnored(err, treeWalkIgnoredErrs...) {
continue
}
return nil, false, traceError(err)
return nil, false, errors.Trace(err)
}
entries, delayIsLeaf = filterListEntries(bucket, prefixDir, entries, prefixEntry, isLeaf)
@ -89,7 +91,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
objInfo, err = xl.getObjectInfo(bucket, entry)
if err != nil {
// Ignore errFileNotFound
if errorCause(err) == errFileNotFound {
if errors.Cause(err) == errFileNotFound {
continue
}
return loi, toObjectErr(err, bucket, prefix)

View file

@ -21,7 +21,6 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"hash"
"path"
@ -30,6 +29,7 @@ import (
"sync"
"time"
"github.com/minio/minio/pkg/errors"
"golang.org/x/crypto/blake2b"
)
@ -354,7 +354,7 @@ func (m xlMetaV1) ObjectToPartOffset(offset int64) (partIndex int, partOffset in
partOffset -= part.Size
}
// Offset beyond the size of the object return InvalidRange.
return 0, 0, traceError(InvalidRange{})
return 0, 0, errors.Trace(InvalidRange{})
}
// pickValidXLMeta - picks one valid xlMeta content and returns from a
@ -367,7 +367,7 @@ func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xmv xlMetaV1, e err
return meta, nil
}
}
return xmv, traceError(errors.New("No valid xl.json present"))
return xmv, errors.Trace(fmt.Errorf("No valid xl.json present"))
}
// list of all errors that can be ignored in a metadata operation.
@ -387,7 +387,7 @@ func (xl xlObjects) readXLMetaParts(bucket, object string) (xlMetaParts []object
}
// For any reason disk or bucket is not available continue
// and read from other disks.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err)
continue
}
@ -414,7 +414,7 @@ func (xl xlObjects) readXLMetaStat(bucket, object string) (xlStat statInfo, xlMe
}
// For any reason disk or bucket is not available continue
// and read from other disks.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err)
continue
}
@ -429,7 +429,7 @@ func (xl xlObjects) readXLMetaStat(bucket, object string) (xlStat statInfo, xlMe
// deleteXLMetadata - deletes `xl.json` on a single disk.
func deleteXLMetdata(disk StorageAPI, bucket, prefix string) error {
jsonFile := path.Join(prefix, xlMetaJSONFile)
return traceError(disk.DeleteFile(bucket, jsonFile))
return errors.Trace(disk.DeleteFile(bucket, jsonFile))
}
// writeXLMetadata - writes `xl.json` to a single disk.
@ -439,10 +439,10 @@ func writeXLMetadata(disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) er
// Marshal json.
metadataBytes, err := json.Marshal(&xlMeta)
if err != nil {
return traceError(err)
return errors.Trace(err)
}
// Persist marshalled data.
return traceError(disk.AppendFile(bucket, jsonFile, metadataBytes))
return errors.Trace(disk.AppendFile(bucket, jsonFile, metadataBytes))
}
// deleteAllXLMetadata - deletes all partially written `xl.json` depending on errs.
@ -482,7 +482,7 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []
// Start writing `xl.json` to all disks in parallel.
for index, disk := range disks {
if disk == nil {
mErrs[index] = traceError(errDiskNotFound)
mErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -505,7 +505,7 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []
wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, bucket, prefix, mErrs)
}
@ -520,7 +520,7 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet
// Start writing `xl.json` to all disks in parallel.
for index, disk := range disks {
if disk == nil {
mErrs[index] = traceError(errDiskNotFound)
mErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -543,7 +543,7 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet
wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, writeQuorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, bucket, prefix, mErrs)
}

View file

@ -26,6 +26,7 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
errors2 "github.com/minio/minio/pkg/errors"
)
// Tests for reading XL object info.
@ -93,7 +94,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
}
_, _, err = obj.(*xlObjects).readXLMetaStat(bucketName, objectName)
if errorCause(err) != errVolumeNotFound {
if errors2.Cause(err) != errVolumeNotFound {
t.Fatal(err)
}
}
@ -178,7 +179,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
}
_, err = obj.(*xlObjects).readXLMetaParts(minioMetaMultipartBucket, uploadIDPath)
if errorCause(err) != errFileNotFound {
if errors2.Cause(err) != errFileNotFound {
t.Fatal(err)
}
}
@ -297,7 +298,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Test them.
for _, testCase := range testCases {
index, offset, err := xlMeta.ObjectToPartOffset(testCase.offset)
err = errorCause(err)
err = errors2.Cause(err)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
@ -355,7 +356,7 @@ func TestPickValidXLMeta(t *testing.T) {
for i, test := range testCases {
xlMeta, err := pickValidXLMeta(test.metaArr, test.modTime)
if test.expectedErr != nil {
if errorCause(err).Error() != test.expectedErr.Error() {
if errors2.Cause(err).Error() != test.expectedErr.Error() {
t.Errorf("Test %d: Expected to fail with %v but received %v",
i+1, test.expectedErr, err)
}

View file

@ -26,6 +26,7 @@ import (
"sync"
"time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb"
)
@ -43,7 +44,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
wg := sync.WaitGroup{}
for index, disk := range xl.storageDisks {
if disk == nil {
errs[index] = traceError(errDiskNotFound)
errs[index] = errors.Trace(errDiskNotFound)
continue
}
// Update `uploads.json` in a go routine.
@ -53,7 +54,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
// read and parse uploads.json on this disk
uploadsJSON, err := readUploadsJSON(bucket, object, disk)
if errorCause(err) == errFileNotFound {
if errors.Cause(err) == errFileNotFound {
// If file is not found, we assume an
// default (empty) upload info.
uploadsJSON, err = newUploadsV1("xl"), nil
@ -84,7 +85,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
} else {
wErr := disk.RenameFile(minioMetaMultipartBucket, uploadsPath, minioMetaTmpBucket, tmpUploadsPath)
if wErr != nil {
errs[index] = traceError(wErr)
errs[index] = errors.Trace(wErr)
}
}
@ -95,7 +96,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
wg.Wait()
err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// No quorum. Perform cleanup on the minority of disks
// on which the operation succeeded.
@ -170,7 +171,7 @@ func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
return true
}
// For any reason disk was deleted or goes offline, continue
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue
}
break
@ -218,12 +219,12 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf
return fileInfo, nil
}
// For any reason disk was deleted or goes offline we continue to next disk.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err)
continue
}
// Error is not ignored, return right here.
return FileInfo{}, traceError(err)
return FileInfo{}, errors.Trace(err)
}
// If all errors were ignored, reduce to maximal occurrence
// based on the read quorum.
@ -241,7 +242,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Rename `xl.json` to all disks in parallel.
for index, disk := range disks {
if disk == nil {
mErrs[index] = traceError(errDiskNotFound)
mErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -254,7 +255,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Renames `xl.json` from source prefix to destination prefix.
rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
if rErr != nil {
mErrs[index] = traceError(rErr)
mErrs[index] = errors.Trace(rErr)
return
}
mErrs[index] = nil
@ -264,7 +265,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, dstBucket, dstPrefix, mErrs)
}
@ -317,7 +318,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue
}
break
@ -386,14 +387,14 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs...) {
if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue
}
break
}
entryLock.RUnlock()
if err != nil {
if isErrIgnored(err, xlTreeWalkIgnoredErrs...) {
if errors.IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue
}
return lmi, err
@ -585,7 +586,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument))
return pi, toObjectErr(errors.Trace(errInvalidArgument))
}
var partsMetadata []xlMetaV1
@ -601,14 +602,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
preUploadIDLock.RUnlock()
return pi, traceError(InvalidUploadID{UploadID: uploadID})
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
// Read metadata associated with the object from all disks.
partsMetadata, errs = readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket,
uploadIDPath)
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
if errors.Cause(reducedErr) == errXLWriteQuorum {
preUploadIDLock.RUnlock()
return pi, toObjectErr(reducedErr, bucket, object)
}
@ -654,7 +655,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if file.Size < data.Size() {
return pi, traceError(IncompleteBody{})
return pi, errors.Trace(IncompleteBody{})
}
// post-upload check (write) lock
@ -666,7 +667,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate again if upload ID still exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
return pi, traceError(InvalidUploadID{UploadID: uploadID})
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
// Rename temporary part file to its final location.
@ -679,7 +680,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
if errors.Cause(reducedErr) == errXLWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
@ -820,7 +821,7 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return lpi, traceError(InvalidUploadID{UploadID: uploadID})
return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
return result, err
@ -851,13 +852,13 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return oi, traceError(InvalidUploadID{UploadID: uploadID})
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
// Calculate s3 compatible md5sum for complete multipart.
@ -871,7 +872,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
if errors.Cause(reducedErr) == errXLWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object)
}
@ -903,17 +904,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
return oi, traceError(InvalidPart{})
return oi, errors.Trace(InvalidPart{})
}
// All parts should have same ETag as previously generated.
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
return oi, traceError(InvalidPart{})
return oi, errors.Trace(InvalidPart{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
return oi, traceError(PartTooSmall{
return oi, errors.Trace(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].Size,
PartETag: part.ETag,
@ -1057,7 +1058,7 @@ func (xl xlObjects) cleanupUploadedParts(bucket, object, uploadID string) error
// Cleanup uploadID for all disks.
for index, disk := range xl.storageDisks {
if disk == nil {
errs[index] = traceError(errDiskNotFound)
errs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
@ -1131,7 +1132,7 @@ func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return traceError(InvalidUploadID{UploadID: uploadID})
return errors.Trace(InvalidUploadID{UploadID: uploadID})
}
return xl.abortMultipartUpload(bucket, object, uploadID)
}

View file

@ -24,6 +24,7 @@ import (
"strings"
"sync"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/objcache"
@ -116,7 +117,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
hashReader, err := hash.NewReader(pipeReader, length, "", "")
if err != nil {
return oi, toObjectErr(traceError(err), dstBucket, dstObject)
return oi, toObjectErr(errors.Trace(err), dstBucket, dstObject)
}
objInfo, err := xl.PutObject(dstBucket, dstObject, hashReader, metadata)
@ -143,12 +144,12 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Start offset cannot be negative.
if startOffset < 0 {
return traceError(errUnexpected)
return errors.Trace(errUnexpected)
}
// Writer cannot be nil.
if writer == nil {
return traceError(errUnexpected)
return errors.Trace(errUnexpected)
}
// Read metadata associated with the object from all disks.
@ -179,13 +180,13 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size})
return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
}
// Get start part index and offset.
partIndex, partOffset, err := xlMeta.ObjectToPartOffset(startOffset)
if err != nil {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size})
return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
}
// Calculate endOffset according to length
@ -197,7 +198,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Get last part index to read given length.
lastPartIndex, _, err := xlMeta.ObjectToPartOffset(endOffset)
if err != nil {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size})
return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
}
// Save the writer.
@ -214,7 +215,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Copy the data out.
if _, err = io.Copy(writer, reader); err != nil {
return traceError(err)
return errors.Trace(err)
}
// Success.
@ -224,7 +225,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// For unknown error, return and error out.
if err != objcache.ErrKeyNotFoundInCache {
return traceError(err)
return errors.Trace(err)
} // Cache has not been found, fill the cache.
// Cache is only set if whole object is being read.
@ -241,7 +242,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Ignore error if cache is full, proceed to write the object.
if err != nil && err != objcache.ErrCacheFull {
// For any other error return here.
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
}
}
@ -390,7 +391,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
defer wg.Done()
err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry)
if err != nil && err != errFileNotFound {
errs[index] = traceError(err)
errs[index] = errors.Trace(err)
}
}(index, disk)
}
@ -401,7 +402,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
// We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum {
if errors.Cause(err) == errXLWriteQuorum {
// Undo all the partial rename operations.
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
}
@ -439,7 +440,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
}
@ -451,14 +452,14 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, toObjectErr(traceError(errInvalidArgument))
return ObjectInfo{}, toObjectErr(errors.Trace(errInvalidArgument))
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
}
// No metadata is set, allocate a new one.
@ -488,7 +489,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
} else {
// Return errors other than ErrCacheFull
if err != objcache.ErrCacheFull {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
}
}
}
@ -561,7 +562,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if file.Size < curPartSize {
return ObjectInfo{}, traceError(IncompleteBody{})
return ObjectInfo{}, errors.Trace(IncompleteBody{})
}
// Update the total written size
@ -663,14 +664,14 @@ func (xl xlObjects) deleteObject(bucket, object string) error {
for index, disk := range xl.storageDisks {
if disk == nil {
dErrs[index] = traceError(errDiskNotFound)
dErrs[index] = errors.Trace(errDiskNotFound)
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
err := cleanupDir(disk, bucket, object)
if err != nil && errorCause(err) != errVolumeNotFound {
if err != nil && errors.Cause(err) != errVolumeNotFound {
dErrs[index] = err
}
}(index, disk)
@ -692,7 +693,7 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
// Validate object exists.
if !xl.isObject(bucket, object) {
return traceError(ObjectNotFound{bucket, object})
return errors.Trace(ObjectNotFound{bucket, object})
} // else proceed to delete the object.
// Delete the object on all disks.

View file

@ -27,6 +27,7 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
)
func TestRepeatPutObjectPart(t *testing.T) {
@ -98,7 +99,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
}
for i, test := range testCases {
actualErr := xl.DeleteObject(test.bucket, test.object)
actualErr = errorCause(actualErr)
actualErr = errors.Cause(actualErr)
if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr)
}
@ -152,7 +153,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
xl.storageDisks[7] = nil
xl.storageDisks[8] = nil
err = obj.DeleteObject(bucket, object)
err = errorCause(err)
err = errors.Cause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
}
@ -203,7 +204,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
// Fetch object from store.
err = xl.GetObject(bucket, object, 0, int64(len("abcd")), ioutil.Discard)
err = errorCause(err)
err = errors.Cause(err)
if err != toObjectErr(errXLReadQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
}
@ -254,7 +255,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
// Upload new content to same object "object"
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
err = errorCause(err)
err = errors.Cause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
}

View file

@ -24,6 +24,7 @@ import (
"sync"
"time"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/tidwall/gjson"
)
@ -35,9 +36,9 @@ import (
// maximal values would occur quorum or more number of times.
func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) {
errorCounts := make(map[error]int)
errs = errorsCause(errs)
errs = errors2.Causes(errs)
for _, err := range errs {
if isErrIgnored(err, ignoredErrs...) {
if errors2.IsErrIgnored(err, ignoredErrs...) {
continue
}
errorCounts[err]++
@ -72,10 +73,10 @@ func reduceQuorumErrs(errs []error, ignoredErrs []error, quorum int, quorumErr e
}
if maxErr != nil && maxCount >= quorum {
// Errors in quorum.
return traceError(maxErr, errs...)
return errors2.Trace(maxErr, errs...)
}
// No quorum satisfied.
maxErr = traceError(quorumErr, errs...)
maxErr = errors2.Trace(quorumErr, errs...)
return
}
@ -174,11 +175,11 @@ func parseXLErasureInfo(xlMetaBuf []byte) (ErasureInfo, error) {
for i, v := range checkSumsResult {
algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String())
if !algorithm.Available() {
return erasure, traceError(errBitrotHashAlgoInvalid)
return erasure, errors2.Trace(errBitrotHashAlgoInvalid)
}
hash, err := hex.DecodeString(v.Get("hash").String())
if err != nil {
return erasure, traceError(err)
return erasure, errors2.Trace(err)
}
checkSums[i] = ChecksumInfo{Name: v.Get("name").String(), Algorithm: algorithm, Hash: hash}
}
@ -245,7 +246,7 @@ func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPar
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return nil, traceError(err)
return nil, errors2.Trace(err)
}
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
xlMetaParts := parseXLParts(xlMetaBuf)
@ -258,7 +259,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return si, nil, traceError(err)
return si, nil, errors2.Trace(err)
}
// obtain version.
@ -270,7 +271,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// Validate if the xl.json we read is sane, return corrupted format.
if !isXLMetaValid(xlVersion, xlFormat) {
// For version mismatchs and unrecognized format, return corrupted format.
return si, nil, traceError(errCorruptedFormat)
return si, nil, errors2.Trace(errCorruptedFormat)
}
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
@ -279,7 +280,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf)
if err != nil {
return si, nil, traceError(err)
return si, nil, errors2.Trace(err)
}
// Return structured `xl.json`.
@ -291,12 +292,12 @@ func readXLMeta(disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1,
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return xlMetaV1{}, traceError(err)
return xlMetaV1{}, errors2.Trace(err)
}
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
xlMeta, err = xlMetaV1UnmarshalJSON(xlMetaBuf)
if err != nil {
return xlMetaV1{}, traceError(err)
return xlMetaV1{}, errors2.Trace(err)
}
// Return structured `xl.json`.
return xlMeta, nil
@ -392,13 +393,13 @@ var (
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < 0 {
return 0, traceError(errInvalidArgument)
return 0, errors2.Trace(errInvalidArgument)
}
if partSize == 0 {
return 0, traceError(errPartSizeZero)
return 0, errors2.Trace(errPartSizeZero)
}
if partIndex < 1 {
return 0, traceError(errPartSizeIndex)
return 0, errors2.Trace(errPartSizeIndex)
}
if totalSize > 0 {
// Compute the total count of parts

View file

@ -25,6 +25,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
)
// Tests caclculating disk count.
@ -91,11 +92,11 @@ func TestReduceErrs(t *testing.T) {
// Validates list of all the testcases for returning valid errors.
for i, testCase := range testCases {
gotErr := reduceReadQuorumErrs(testCase.errs, testCase.ignoredErrs, 5)
if errorCause(gotErr) != testCase.err {
if errors.Cause(gotErr) != testCase.err {
t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr)
}
gotNewErr := reduceWriteQuorumErrs(testCase.errs, testCase.ignoredErrs, 6)
if errorCause(gotNewErr) != errXLWriteQuorum {
if errors.Cause(gotNewErr) != errXLWriteQuorum {
t.Errorf("Test %d : expected %s, got %s", i+1, errXLWriteQuorum, gotErr)
}
}
@ -382,8 +383,8 @@ func TestGetPartSizeFromIdx(t *testing.T) {
if err == nil {
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
}
if err != nil && errorCause(err) != testCaseFailure.err {
t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, errorCause(err))
if err != nil && errors.Cause(err) != testCaseFailure.err {
t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, errors.Cause(err))
}
}
}

View file

@ -24,6 +24,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/objcache"
)
@ -197,7 +198,7 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []disk.Info, onlineDisks int, o
info, err := storageDisk.DiskInfo()
if err != nil {
errorIf(err, "Unable to fetch disk info for %#v", storageDisk)
if isErr(err, baseErrs...) {
if errors.IsErr(err, baseErrs...) {
offlineDisks++
continue
}

View file

@ -84,7 +84,7 @@ GetObject() holds a read lock on `fs.json`.
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
rlk, err := fs.rwPool.Open(fsMetaPath)
if err != nil {
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
defer rlk.Close()

View file

@ -88,7 +88,7 @@ GetObject()持有`fs.json`的一个读锁。
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
rlk, err := fs.rwPool.Open(fsMetaPath)
if err != nil {
return toObjectErr(traceError(err), bucket, object)
return toObjectErr(errors.Trace(err), bucket, object)
}
defer rlk.Close()

154
pkg/errors/errors.go Normal file
View file

@ -0,0 +1,154 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var (
// Package path of the project.
pkgPath string
)
// Init - initialize package path.
func Init(gopath string, p string) {
pkgPath = filepath.Join(gopath, "src", p) + string(os.PathSeparator)
}
// stackInfo - Represents a stack frame in the stack trace.
type stackInfo struct {
Filename string `json:"fileName"` // File where error occurred
Line int `json:"line"` // Line where error occurred
Name string `json:"name"` // Name of the function where error occurred
}
// Error - error type containing cause and the stack trace.
type Error struct {
Cause error // Holds the cause error
stack []stackInfo // Stack trace info.
errs []error // Useful for XL to hold errors from all disks
}
// Implement error interface.
func (e Error) Error() string {
return e.Cause.Error()
}
// Stack - returns slice of stack trace.
func (e Error) Stack() []string {
var stack []string
for _, info := range e.stack {
stack = append(stack, fmt.Sprintf("%s:%d:%s()", info.Filename, info.Line, info.Name))
}
return stack
}
// Trace - return new Error type.
func Trace(e error, errs ...error) error {
// Error is nil nothing to do return nil.
if e == nil {
return nil
}
// Already a trace error should be returned as is.
if _, ok := e.(*Error); ok {
return e
}
err := &Error{}
err.Cause = e
err.errs = errs
stack := make([]uintptr, 40)
length := runtime.Callers(2, stack)
if length > len(stack) {
length = len(stack)
}
stack = stack[:length]
for _, pc := range stack {
pc = pc - 1
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
var suffixFound bool
for _, ignoreName := range []string{
"runtime.",
"testing.",
} {
if strings.HasPrefix(fn.Name(), ignoreName) {
suffixFound = true
break
}
}
if suffixFound {
continue
}
_, name := filepath.Split(fn.Name())
name = strings.SplitN(name, ".", 2)[1]
file = filepath.FromSlash(strings.TrimPrefix(filepath.ToSlash(file), filepath.ToSlash(pkgPath)))
err.stack = append(err.stack, stackInfo{
Filename: file,
Line: line,
Name: name,
})
}
return err
}
// Cause - Returns the underlying cause error.
func Cause(err error) error {
if e, ok := err.(*Error); ok {
err = e.Cause
}
return err
}
// Causes - Returns slice of underlying cause error.
func Causes(errs []error) (cerrs []error) {
for _, err := range errs {
cerrs = append(cerrs, Cause(err))
}
return cerrs
}
// IsErrIgnored returns whether given error is ignored or not.
func IsErrIgnored(err error, ignoredErrs ...error) bool {
return IsErr(err, ignoredErrs...)
}
// IsErr returns whether given error is exact error.
func IsErr(err error, errs ...error) bool {
err = Cause(err)
for _, exactErr := range errs {
if err == exactErr {
return true
}
}
return false
}
// Tracef behaves like fmt.Errorf but adds traces to the returned error.
func Tracef(format string, args ...interface{}) error {
return Trace(fmt.Errorf(format, args...))
}

120
pkg/errors/errors_test.go Normal file
View file

@ -0,0 +1,120 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"go/build"
"path/filepath"
"reflect"
"strings"
"testing"
)
// Test trace errors.
func TestTrace(t *testing.T) {
var errExpectedCause = fmt.Errorf("traceable error")
var testCases = []struct {
expectedCauseErr error
}{
{
expectedCauseErr: nil,
},
{
expectedCauseErr: errExpectedCause,
},
{
expectedCauseErr: Trace(errExpectedCause),
},
}
for i, testCase := range testCases {
if err := Trace(testCase.expectedCauseErr); err != nil {
if errGotCause := Cause(err); errGotCause != Cause(testCase.expectedCauseErr) {
t.Errorf("Test: %d Expected %s, got %s", i+1, testCase.expectedCauseErr, errGotCause)
}
}
}
}
// Test if isErrIgnored works correctly.
func TestIsErrIgnored(t *testing.T) {
var errIgnored = fmt.Errorf("ignored error")
var testCases = []struct {
err error
ignored bool
}{
{
err: nil,
ignored: false,
},
{
err: errIgnored,
ignored: true,
},
{
err: Trace(errIgnored),
ignored: true,
},
}
for i, testCase := range testCases {
if ok := IsErrIgnored(testCase.err, errIgnored); ok != testCase.ignored {
t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok)
}
}
}
// Tests if pkgPath is set properly in init.
func TestInit(t *testing.T) {
Init("/home/test/go", "test")
if filepath.ToSlash(pkgPath) != "/home/test/go/src/test/" {
t.Fatalf("Expected pkgPath to be \"/home/test/go/src/test/\", found %s", pkgPath)
}
}
// Tests stack output.
func TestStack(t *testing.T) {
Init(build.Default.GOPATH, "github.com/minio/minio")
err := Trace(fmt.Errorf("traceable error"))
if terr, ok := err.(*Error); ok {
if !strings.HasSuffix(terr.Stack()[0], "TestStack()") {
t.Errorf("Expected suffix \"TestStack()\", got %s", terr.Stack()[0])
}
}
// Test if the cause error is returned properly with the underlying string.
if err.Error() != "traceable error" {
t.Errorf("Expected \"traceable error\", got %s", err.Error())
}
}
// Tests converting error causes.
func TestErrCauses(t *testing.T) {
errTraceableError := fmt.Errorf("traceable error")
var errs = []error{
errTraceableError,
errTraceableError,
errTraceableError,
}
var terrs []error
for _, err := range errs {
terrs = append(terrs, Trace(err))
}
cerrs := Causes(terrs)
if !reflect.DeepEqual(errs, cerrs) {
t.Errorf("Expected %#v, got %#v", errs, cerrs)
}
}