Create logger package and rename errorIf to LogIf (#5678)

Removing message from error logging
Replace errors.Trace with LogIf
This commit is contained in:
kannappanr 2018-04-05 15:04:40 -07:00 committed by GitHub
parent 91fd8ffeb7
commit f8a3fd0c2a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
119 changed files with 2608 additions and 1860 deletions

View file

@ -18,7 +18,9 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -27,6 +29,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/madmin"
@ -71,7 +74,7 @@ func (a adminAPIHandlers) VersionHandler(w http.ResponseWriter, r *http.Request)
jsonBytes, err := json.Marshal(adminAPIVersionInfo)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal Admin API Version to JSON.")
logger.LogIf(context.Background(), err)
return
}
@ -99,7 +102,7 @@ func (a adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Re
uptime, err := getPeerUptimes(globalAdminPeers)
if err != nil {
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
errorIf(err, "Possibly failed to get uptime from majority of servers.")
logger.LogIf(context.Background(), err)
return
}
@ -113,7 +116,7 @@ func (a adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Re
jsonBytes, err := json.Marshal(serverStatus)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal storage info into json.")
logger.LogIf(context.Background(), err)
return
}
// Reply with storage information (across nodes in a
@ -136,7 +139,7 @@ func (a adminAPIHandlers) ServiceStopNRestartHandler(w http.ResponseWriter, r *h
var sa madmin.ServiceAction
err := json.NewDecoder(r.Body).Decode(&sa)
if err != nil {
errorIf(err, "Error parsing body JSON")
logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return
}
@ -149,7 +152,7 @@ func (a adminAPIHandlers) ServiceStopNRestartHandler(w http.ResponseWriter, r *h
serviceSig = serviceStop
default:
writeErrorResponseJSON(w, ErrMalformedPOSTRequest, r.URL)
errorIf(err, "Invalid service action received")
logger.LogIf(context.Background(), errors.New("Invalid service action received"))
return
}
@ -243,7 +246,9 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
serverInfoData, err := peer.cmdRunner.ServerInfoData()
if err != nil {
errorIf(err, "Unable to get server info from %s.", peer.addr)
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peer.addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
reply[idx].Error = err.Error()
return
}
@ -258,7 +263,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
jsonBytes, err := json.Marshal(reply)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal storage info into json.")
logger.LogIf(context.Background(), err)
return
}
@ -292,7 +297,7 @@ func validateLockQueryParams(vars url.Values) (string, string, time.Duration,
}
duration, err := time.ParseDuration(olderThanStr)
if err != nil {
errorIf(err, "Failed to parse duration passed as query value.")
logger.LogIf(context.Background(), err)
return "", "", time.Duration(0), ErrInvalidDuration
}
@ -325,7 +330,7 @@ func (a adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Reques
duration)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to fetch lock information from remote nodes.")
logger.LogIf(context.Background(), err)
return
}
@ -333,7 +338,7 @@ func (a adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Reques
jsonBytes, err := json.Marshal(volLocks)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal lock information into json.")
logger.LogIf(context.Background(), err)
return
}
@ -369,7 +374,7 @@ func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Reque
duration)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to fetch lock information from remote nodes.")
logger.LogIf(ctx, err)
return
}
@ -377,7 +382,7 @@ func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Reque
jsonBytes, err := json.Marshal(volLocks)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal lock information into json.")
logger.LogIf(ctx, err)
return
}
@ -425,7 +430,7 @@ func extractHealInitParams(r *http.Request) (bucket, objPrefix string,
if clientToken == "" {
jerr := json.NewDecoder(r.Body).Decode(&hs)
if jerr != nil {
errorIf(jerr, "Error parsing body JSON")
logger.LogIf(context.Background(), jerr)
err = ErrRequestBodyParse
return
}
@ -583,7 +588,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
// occurring on a quorum of the servers is returned.
configBytes, err := getPeerConfig(globalAdminPeers)
if err != nil {
errorIf(err, "Failed to get config from peers")
logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
@ -655,6 +660,7 @@ func writeSetConfigResponse(w http.ResponseWriter, peers adminPeers,
// SetConfigHandler - PUT /minio/admin/v1/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
@ -678,7 +684,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
return
}
if err != io.ErrUnexpectedEOF {
errorIf(err, "Failed to read config from request body.")
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -688,7 +694,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Validate JSON provided in the request body: check the
// client has not sent JSON objects with duplicate keys.
if err = checkDupJSONKeys(string(configBytes)); err != nil {
errorIf(err, "config contains duplicate JSON entries.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrAdminConfigBadJSON, r.URL)
return
}
@ -696,7 +702,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
var config serverConfig
err = json.Unmarshal(configBytes, &config)
if err != nil {
errorIf(err, "Failed to unmarshal JSON configuration", err)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -718,7 +724,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
errs := writeTmpConfigPeers(globalAdminPeers, tmpFileName, configBytes)
// Check if the operation succeeded in quorum or more nodes.
rErr := reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1)
rErr := reduceWriteQuorumErrs(ctx, errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return
@ -736,7 +742,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Rename the temporary config file to config.json
errs = commitConfigPeers(globalAdminPeers, tmpFileName)
rErr = reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1)
rErr = reduceWriteQuorumErrs(ctx, errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return
@ -777,7 +783,7 @@ func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
var req madmin.SetCredsReq
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
errorIf(err, "Error parsing body JSON")
logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return
}
@ -804,7 +810,9 @@ func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
// Notify all other Minio peers to update credentials
updateErrs := updateCredsOnPeers(creds)
for peer, err := range updateErrs {
errorIf(err, "Unable to update credentials on peer %s.", peer)
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peer)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
// Update local credentials in memory.

View file

@ -33,7 +33,6 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/madmin"
)
@ -264,7 +263,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
return nil, nil, err
}
endpoints := mustGetNewEndpointList(xlDirs...)
format, err := waitForFormatXL(true, endpoints, 1, 16)
format, err := waitForFormatXL(context.Background(), true, endpoints, 1, 16)
if err != nil {
removeRoots(xlDirs)
return nil, nil, err
@ -762,13 +761,13 @@ func buildAdminRequest(queryVal url.Values, method, path string,
"/minio/admin/v1"+path+"?"+queryVal.Encode(),
contentLength, bodySeeker)
if err != nil {
return nil, errors.Trace(err)
return nil, err
}
cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, errors.Trace(err)
return nil, err
}
return req, nil

View file

@ -222,7 +222,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
StartTime: h.startTime,
})
if err != nil {
errorIf(err, "Failed to marshal heal result into json.")
logger.LogIf(context.Background(), err)
return nil, ErrInternalError, ""
}
return b, ErrNone, ""
@ -270,7 +270,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
errorIf(err, "Failed to marshal heal result into json.")
logger.LogIf(context.Background(), err)
return nil, ErrInternalError
}
@ -321,7 +321,9 @@ type healSequence struct {
func newHealSequence(bucket, objPrefix, clientAddr string,
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
ctx := logger.SetContext(context.Background(), &logger.ReqInfo{clientAddr, "", "", "Heal", bucket, objPrefix, nil})
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
return &healSequence{
bucket: bucket,

View file

@ -29,7 +29,7 @@ import (
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
const (
@ -201,7 +201,7 @@ func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byt
err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{})
if err != nil {
errorIf(err, "Failed to write temporary config file.")
logger.LogIf(context.Background(), err)
return err
}
@ -215,7 +215,10 @@ func (lc localAdminClient) CommitConfig(tmpFileName string) error {
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile))
reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
reqInfo.AppendTags("configFile", configFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
}
@ -228,7 +231,7 @@ func (rc remoteAdminClient) CommitConfig(tmpFileName string) error {
cReply := CommitConfigReply{}
err := rc.Call(commitConfigRPC, &cArgs, &cReply)
if err != nil {
errorIf(err, "Failed to rename config file.")
logger.LogIf(context.Background(), err)
return err
}
@ -436,7 +439,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
latestUptime := time.Duration(0)
for _, uptime := range uptimes {
if uptime.err != nil {
errorIf(uptime.err, "Unable to fetch uptime")
logger.LogIf(context.Background(), uptime.err)
continue
}
@ -489,15 +492,17 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
// Unmarshal the received config files.
err := json.Unmarshal(configBytes, &serverConfigs[i])
if err != nil {
errorIf(err, "Failed to unmarshal serverConfig from ", peers[i].addr)
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return nil, err
}
}
configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil {
errorIf(err, "Unable to find a valid server config")
return nil, errors.Trace(err)
logger.LogIf(context.Background(), err)
return nil, err
}
// Return the config.json that was present quorum or more

View file

@ -26,7 +26,7 @@ import (
"time"
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
const adminPath = "/admin"
@ -176,7 +176,9 @@ type WriteConfigReply struct {
func writeTmpConfigCommon(tmpFileName string, configBytes []byte) error {
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := ioutil.WriteFile(tmpConfigFile, configBytes, 0666)
errorIf(err, fmt.Sprintf("Failed to write to temporary config file %s", tmpConfigFile))
reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
}
@ -209,7 +211,10 @@ func (s *adminCmd) CommitConfig(cArgs *CommitConfigArgs, cReply *CommitConfigRep
tmpConfigFile := filepath.Join(getConfigDir(), cArgs.FileName)
err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile))
reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
reqInfo.AppendTags("configFile", configFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
}
@ -220,7 +225,8 @@ func registerAdminRPCRouter(mux *router.Router) error {
adminRPCServer := newRPCServer()
err := adminRPCServer.RegisterName("Admin", adminRPCHandler)
if err != nil {
return errors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer)

View file

@ -793,7 +793,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
HTTPStatusCode: http.StatusBadRequest,
},
// Generic Invalid-Request error. Should be used for response errors only for unlikely
// corner case errors for which introducing new APIErrorCode is not worth it. errorIf()
// corner case errors for which introducing new APIErrorCode is not worth it. LogIf()
// should be used to log the error at the source of the error for debugging purposes.
ErrInvalidRequest: {
Code: "InvalidRequest",

View file

@ -20,6 +20,7 @@ import (
"net/http"
router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
)
// objectAPIHandler implements and provides http handlers for S3 API.
@ -35,7 +36,7 @@ func registerAPIRouter(mux *router.Router) {
if len(cacheConfig.Drives) > 0 {
// initialize the new disk cache objects.
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
fatalIf(err, "Unable to initialize disk caching")
logger.FatalIf(err, "Unable to initialize disk caching")
}
// Initialize API.

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"errors"
@ -25,6 +26,7 @@ import (
"net/http"
"strings"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/handlers"
)
@ -114,12 +116,14 @@ func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
s3Err = isReqAuthenticated(r, region)
}
if s3Err != ErrNone {
errorIf(errors.New(getAPIError(s3Err).Description), "%s", dumpRequest(r))
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
}
return s3Err
}
func checkRequestAuthType(r *http.Request, bucket, policyAction, region string) APIErrorCode {
func checkRequestAuthType(ctx context.Context, r *http.Request, bucket, policyAction, region string) APIErrorCode {
reqAuthType := getRequestAuthType(r)
switch reqAuthType {
@ -136,7 +140,7 @@ func checkRequestAuthType(r *http.Request, bucket, policyAction, region string)
if err != nil {
return ErrInternalError
}
return enforceBucketPolicy(bucket, policyAction, resource,
return enforceBucketPolicy(ctx, bucket, policyAction, resource,
r.Referer(), handlers.GetSourceIP(r), r.URL.Query())
}
@ -176,7 +180,7 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
payload, err := ioutil.ReadAll(r.Body)
if err != nil {
errorIf(err, "Unable to read request body for signature verification")
logger.LogIf(context.Background(), err)
return ErrInternalError
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bufio"
"context"
"crypto/tls"
"crypto/x509"
"errors"
@ -29,6 +30,8 @@ import (
"strings"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
)
// Attempt to retry only this many number of times before
@ -264,7 +267,9 @@ func rpcDial(serverAddr, serviceEndpoint string, secureConn bool) (netRPCClient
// Print RPC connection errors that are worthy to display in log.
switch err.(type) {
case x509.HostnameError:
errorIf(err, "Unable to establish secure connection to %s", serverAddr)
reqInfo := (&logger.ReqInfo{}).AppendTags("serverAddr", serverAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
return nil, &net.OpError{

View file

@ -17,11 +17,13 @@
package cmd
import (
"context"
"fmt"
"path"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
)
@ -62,7 +64,7 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR
// Save the current creds when failed to update.
globalServerConfig.SetCredential(prevCred)
errorIf(err, "Unable to update the config with new credentials sent from browser RPC.")
logger.LogIf(context.Background(), err)
return err
}

View file

@ -17,9 +17,10 @@
package cmd
import (
router "github.com/gorilla/mux"
"context"
"github.com/minio/minio/pkg/errors"
router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
)
// Set up an RPC endpoint that receives browser related calls. The
@ -42,7 +43,8 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
bpRPCServer := newRPCServer()
err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers)
if err != nil {
return errors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View file

@ -64,7 +64,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -134,7 +134,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}

View file

@ -33,6 +33,7 @@ import (
mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
@ -40,10 +41,10 @@ import (
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
// Enforces bucket policies for a bucket for a given tatusaction.
func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
func enforceBucketPolicy(ctx context.Context, bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
// Verify if bucket actually exists
objAPI := newObjectLayerFn()
if err := checkBucketExist(bucket, objAPI); err != nil {
if err := checkBucketExist(ctx, bucket, objAPI); err != nil {
err = errors.Cause(err)
switch err.(type) {
case BucketNameInvalid:
@ -53,13 +54,12 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
// For no bucket found we return NoSuchBucket instead.
return ErrNoSuchBucket
}
errorIf(err, "Unable to read bucket policy.")
// Return internal error for any other errors so that we can investigate.
return ErrInternalError
}
// Fetch bucket policy, if policy is not set return access denied.
p, err := objAPI.GetBucketPolicy(context.Background(), bucket)
p, err := objAPI.GetBucketPolicy(ctx, bucket)
if err != nil {
return ErrAccessDenied
}
@ -92,7 +92,10 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
// Check if the action is allowed on the bucket/prefix.
func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool {
bp, err := objectAPI.GetBucketPolicy(context.Background(), bucket)
reqInfo := &logger.ReqInfo{BucketName: bucket}
reqInfo.AppendTags("prefix", prefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
bp, err := objectAPI.GetBucketPolicy(ctx, bucket)
if err != nil {
return false
}
@ -120,10 +123,10 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
return
}
s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion)
s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send getBucketLocation() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "s3:GetBucketLocation", globalServerConfig.GetRegion())
s3Error = checkRequestAuthType(ctx, r, "", "s3:GetBucketLocation", globalServerConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
@ -179,7 +182,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucketMultipartUploads", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucketMultipartUploads", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -228,10 +231,10 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
listBuckets = api.CacheAPI().ListBuckets
}
// ListBuckets does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
s3Error := checkRequestAuthType(ctx, r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send listBuckets() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", globalServerConfig.GetRegion())
s3Error = checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
@ -266,7 +269,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
var authError APIErrorCode
if authError = checkRequestAuthType(r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); authError != ErrNone {
if authError = checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); authError != ErrNone {
// In the event access is denied, a 200 response should still be returned
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if authError != ErrAccessDenied {
@ -294,7 +297,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "Unable to read HTTP body.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@ -302,7 +305,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "Unable to unmarshal delete objects request XML.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
@ -411,7 +414,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
}
// PutBucket does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion())
s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion())
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
@ -490,7 +493,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader()
if err != nil {
errorIf(err, "Unable to initialize multipart reader.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
@ -498,7 +501,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Read multipart data and save in memory and in the disk if needed
form, err := reader.ReadForm(maxFormMemory)
if err != nil {
errorIf(err, "Unable to initialize multipart reader.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
@ -507,9 +510,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
defer form.RemoveAll()
// Extract all form fields
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
if err != nil {
errorIf(err, "Unable to parse form values.")
logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
@ -584,16 +587,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
// Extract metadata to be saved from received Form.
metadata, err := extractMetadataFromHeader(formValues)
metadata, err := extractMetadataFromHeader(ctx, formValues)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
if err != nil {
errorIf(err, "Unable to initialize hashReader.")
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -690,7 +692,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error)
return
}
@ -717,7 +719,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
}
// DeleteBucket does not have any bucket action.
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -736,12 +738,14 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
// Notify all peers (including self) to update in-memory state
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
globalNotificationSys.RemoveNotification(bucket)
for addr, err := range globalNotificationSys.DeleteBucket(bucket) {
errorIf(err, "unable to delete bucket in remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
// Write success response.

View file

@ -23,6 +23,7 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
xerrors "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
@ -53,7 +54,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(w, ErrNotImplemented, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -63,17 +64,15 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
_, err := objAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Attempt to successfully load notification config.
nConfig, err := readNotificationConfig(objAPI, bucketName)
nConfig, err := readNotificationConfig(ctx, objAPI, bucketName)
if err != nil {
// Ignore errNoSuchNotifications to comply with AWS S3.
if xerrors.Cause(err) != errNoSuchNotifications {
errorIf(err, "Unable to read notification configuration.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -83,7 +82,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
notificationBytes, err := xml.Marshal(nConfig)
if err != nil {
errorIf(err, "Unable to marshal notification configuration into XML.", err)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -106,7 +105,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(w, ErrNotImplemented, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -146,7 +145,8 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
rulesMap := config.ToRulesMap()
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
for addr, err := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) {
errorIf(err, "unable to put bucket notification to remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
writeSuccessResponseHeadersOnly(w)
@ -167,7 +167,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
writeErrorResponse(w, ErrNotImplemented, r.URL)
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -217,7 +217,6 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
}
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
errorIf(err, "Unable to get bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -227,7 +226,8 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
errorIf(err, "Unable to add httpclient target %v to globalNotificationSys.targetList.", target)
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -236,20 +236,23 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints))
if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
errorIf(err, "Unable to save HTTP listener %v", target)
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
errors := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr)
for addr, err := range errors {
errorIf(err, "unable to call listen bucket notification to remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
<-target.DoneCh
if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
errorIf(err, "Unable to save HTTP listener %v", target)
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}

View file

@ -28,6 +28,7 @@ import (
humanize "github.com/dustin/go-humanize"
mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/wildcard"
)
@ -228,7 +229,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -260,7 +261,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// bucket policies are limited to 20KB in size, using a limit reader.
policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if err != nil {
errorIf(err, "Unable to read from client.")
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -288,7 +289,8 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
}
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
// Success.
@ -308,7 +310,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -331,7 +333,8 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
}
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr)
logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
}
// Success.
@ -351,7 +354,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
return
}
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -375,7 +378,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
policyBytes, err := json.Marshal(&policy)
if err != nil {
errorIf(err, "Unable to marshal bucket policy.")
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}

View file

@ -25,6 +25,7 @@ import (
"sync"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -119,12 +120,12 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
var buffer bytes.Buffer
err = objAPI.GetObject(context.Background(), minioMetaBucket, policyPath, 0, -1, &buffer, "")
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucket})
err = objAPI.GetObject(ctx, minioMetaBucket, policyPath, 0, -1, &buffer, "")
if err != nil {
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, PolicyNotFound{Bucket: bucket}
}
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, errors.Cause(err)
}
@ -151,9 +152,9 @@ func ReadBucketPolicy(bucket string, objAPI ObjectLayer) (policy.BucketAccessPol
// removeBucketPolicy - removes any previously written bucket policy. Returns BucketPolicyNotFound
// if no policies are found.
func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
func removeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer) error {
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
err := objAPI.DeleteObject(context.Background(), minioMetaBucket, policyPath)
err := objAPI.DeleteObject(ctx, minioMetaBucket, policyPath)
if err != nil {
err = errors.Cause(err)
if _, ok := err.(ObjectNotFound); ok {
@ -165,21 +166,21 @@ func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
}
// writeBucketPolicy - save a bucket policy that is assumed to be validated.
func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAccessPolicy) error {
func writeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer, bpy policy.BucketAccessPolicy) error {
buf, err := json.Marshal(bpy)
if err != nil {
errorIf(err, "Unable to marshal bucket policy '%#v' to JSON", bpy)
logger.LogIf(ctx, err)
return err
}
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
if err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
logger.LogIf(ctx, err)
return errors.Cause(err)
}
if _, err = objAPI.PutObject(context.Background(), minioMetaBucket, policyPath, hashReader, nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
if _, err = objAPI.PutObject(ctx, minioMetaBucket, policyPath, hashReader, nil); err != nil {
return errors.Cause(err)
}
return nil
@ -188,9 +189,9 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
// persistAndNotifyBucketPolicyChange - takes a policyChange argument,
// persists it to storage, and notify nodes in the cluster about the
// change. In-memory state is updated in response to the notification.
func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error {
func persistAndNotifyBucketPolicyChange(ctx context.Context, bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error {
if isRemove {
err := removeBucketPolicy(bucket, objAPI)
err := removeBucketPolicy(ctx, bucket, objAPI)
if err != nil {
return err
}
@ -198,7 +199,7 @@ func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy
if reflect.DeepEqual(bktPolicy, emptyBucketPolicy) {
return errInvalidArgument
}
if err := writeBucketPolicy(bucket, objAPI, bktPolicy); err != nil {
if err := writeBucketPolicy(ctx, bucket, objAPI, bktPolicy); err != nil {
return err
}
}

View file

@ -25,6 +25,7 @@ import (
"time"
"github.com/minio/cli"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
)
@ -33,9 +34,9 @@ func checkUpdate(mode string) {
// Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
if globalInplaceUpdateDisabled {
log.Println(updateMsg)
logger.Println(updateMsg)
} else {
log.Println(prepareUpdateMessage("Run `minio update`", latestReleaseTime.Sub(currentReleaseTime)))
logger.Println(prepareUpdateMessage("Run `minio update`", latestReleaseTime.Sub(currentReleaseTime)))
}
}
}
@ -43,11 +44,11 @@ func checkUpdate(mode string) {
func initConfig() {
// Config file does not exist, we create it fresh and return upon success.
if isFile(getConfigFile()) {
fatalIf(migrateConfig(), "Config migration failed.")
fatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
logger.FatalIf(migrateConfig(), "Config migration failed.")
logger.FatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
} else {
fatalIf(newConfig(), "Unable to initialize minio config for the first time.")
log.Println("Created minio configuration file successfully at " + getConfigDir())
logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
logger.Println("Created minio configuration file successfully at " + getConfigDir())
}
}
@ -70,17 +71,17 @@ func handleCommonCmdArgs(ctx *cli.Context) {
// default config directory.
configDir = getConfigDir()
if configDir == "" {
fatalIf(errors.New("missing option"), "config-dir option must be provided.")
logger.FatalIf(errors.New("missing option"), "config-dir option must be provided.")
}
}
if configDir == "" {
fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.")
logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.")
}
// Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir)
fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs)
}
@ -94,7 +95,7 @@ func handleCommonEnvVars() {
secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" {
cred, err := auth.CreateCredentials(accessKey, secretKey)
fatalIf(err, "Invalid access/secret Key set in environment.")
logger.FatalIf(err, "Invalid access/secret Key set in environment.")
// credential Envs are set globally.
globalIsEnvCreds = true
@ -104,7 +105,7 @@ func handleCommonEnvVars() {
if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBrowserFlag(browser)
if err != nil {
fatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
logger.FatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
}
// browser Envs are set globally, this does not represent
@ -117,7 +118,7 @@ func handleCommonEnvVars() {
if traceFile != "" {
var err error
globalHTTPTraceFile, err = os.OpenFile(traceFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
fatalIf(err, "error opening file %s", traceFile)
logger.FatalIf(err, "error opening file %s", traceFile)
}
globalDomainName = os.Getenv("MINIO_DOMAIN")
@ -127,18 +128,18 @@ func handleCommonEnvVars() {
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES %s.", drives)
logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES %s.", drives)
globalCacheDrives = driveList
globalIsDiskCacheEnabled = true
}
if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" {
excludeList, err := parseCacheExcludes(strings.Split(excludes, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE %s.", excludes)
logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE %s.", excludes)
globalCacheExcludes = excludeList
}
if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" {
expiry, err := strconv.Atoi(expiryStr)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY %s.", expiryStr)
logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY %s.", expiryStr)
globalCacheExpiry = expiry
}
@ -154,25 +155,25 @@ func handleCommonEnvVars() {
// Check for environment variables and parse into storageClass struct
if ssc := os.Getenv(standardStorageClassEnv); ssc != "" {
globalStandardStorageClass, err = parseStorageClass(ssc)
fatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
logger.FatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
}
if rrsc := os.Getenv(reducedRedundancyStorageClassEnv); rrsc != "" {
globalRRStorageClass, err = parseStorageClass(rrsc)
fatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
logger.FatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
}
// Validation is done after parsing both the storage classes. This is needed because we need one
// storage class value to deduce the correct value of the other storage class.
if globalRRStorageClass.Scheme != "" {
err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity)
fatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
logger.FatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
globalIsStorageClass = true
}
if globalStandardStorageClass.Scheme != "" {
err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity)
fatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
logger.FatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
globalIsStorageClass = true
}
}

View file

@ -21,6 +21,7 @@ import (
"os"
"path/filepath"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
@ -194,7 +195,7 @@ func purgeV1() error {
}
os.RemoveAll(configFile)
log.Println("Removed unsupported config version 1.")
logger.Println("Removed unsupported config version 1.")
return nil
}
@ -252,7 +253,7 @@ func migrateV2ToV3() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv2.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv2.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv2.Version, srvConfig.Version)
return nil
}
@ -290,7 +291,7 @@ func migrateV3ToV4() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv3.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv3.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv3.Version, srvConfig.Version)
return nil
}
@ -331,7 +332,7 @@ func migrateV4ToV5() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv4.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv4.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv4.Version, srvConfig.Version)
return nil
}
@ -420,7 +421,7 @@ func migrateV5ToV6() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv5.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv5.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv5.Version, srvConfig.Version)
return nil
}
@ -476,7 +477,7 @@ func migrateV6ToV7() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv6.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv6.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv6.Version, srvConfig.Version)
return nil
}
@ -539,7 +540,7 @@ func migrateV7ToV8() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv7.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv7.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv7.Version, srvConfig.Version)
return nil
}
@ -609,7 +610,7 @@ func migrateV8ToV9() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv8.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv8.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv8.Version, srvConfig.Version)
return nil
}
@ -677,7 +678,7 @@ func migrateV9ToV10() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv9.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv9.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv9.Version, srvConfig.Version)
return nil
}
@ -748,7 +749,7 @@ func migrateV10ToV11() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv10.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv10.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv10.Version, srvConfig.Version)
return nil
}
@ -846,7 +847,7 @@ func migrateV11ToV12() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv11.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv11.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv11.Version, srvConfig.Version)
return nil
}
@ -926,7 +927,7 @@ func migrateV12ToV13() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv12.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv12.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv12.Version, srvConfig.Version)
return nil
}
@ -1011,7 +1012,7 @@ func migrateV13ToV14() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv13.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv13.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv13.Version, srvConfig.Version)
return nil
}
@ -1100,7 +1101,7 @@ func migrateV14ToV15() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv14.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv14.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv14.Version, srvConfig.Version)
return nil
}
@ -1190,7 +1191,7 @@ func migrateV15ToV16() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv15.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv15.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv15.Version, srvConfig.Version)
return nil
}
@ -1311,7 +1312,7 @@ func migrateV16ToV17() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv16.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv16.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv16.Version, srvConfig.Version)
return nil
}
@ -1415,7 +1416,7 @@ func migrateV17ToV18() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv17.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version)
return nil
}
@ -1521,7 +1522,7 @@ func migrateV18ToV19() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv18.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version)
return nil
}
@ -1626,7 +1627,7 @@ func migrateV19ToV20() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv19.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv19.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv19.Version, srvConfig.Version)
return nil
}
@ -1730,7 +1731,7 @@ func migrateV20ToV21() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv20.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv20.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv20.Version, srvConfig.Version)
return nil
}
@ -1834,7 +1835,7 @@ func migrateV21ToV22() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv21.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version)
return nil
}
@ -1947,6 +1948,6 @@ func migrateV22ToV23() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv22.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version)
logger.Printf(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version)
return nil
}

View file

@ -28,6 +28,7 @@ import (
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
@ -93,7 +94,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
appendFileMap: make(map[string]*fsAppendFile),
}
go fsObjects.cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
cacheFS := cacheFSObjects{
FSObjects: fsObjects,
@ -116,7 +117,9 @@ func (cfs *cacheFSObjects) diskUsageLow() bool {
minUsage := cfs.maxDiskUsagePct * 80 / 100
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return false
}
usedPercent := (di.Total - di.Free) * 100 / di.Total
@ -128,7 +131,9 @@ func (cfs *cacheFSObjects) diskUsageLow() bool {
func (cfs *cacheFSObjects) diskUsageHigh() bool {
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return true
}
usedPercent := (di.Total - di.Free) * 100 / di.Total
@ -140,7 +145,9 @@ func (cfs *cacheFSObjects) diskUsageHigh() bool {
func (cfs *cacheFSObjects) diskAvailable(size int64) bool {
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return false
}
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
@ -163,14 +170,15 @@ func (cfs *cacheFSObjects) purgeTrash() {
return
}
for _, entry := range entries {
fi, err := fsStatVolume(pathJoin(trashPath, entry))
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
fi, err := fsStatVolume(ctx, pathJoin(trashPath, entry))
if err != nil {
continue
}
dir := path.Join(trashPath, fi.Name())
// Delete all expired cache content.
fsRemoveAll(dir)
fsRemoveAll(ctx, dir)
}
}
}
@ -193,7 +201,7 @@ func (cfs *cacheFSObjects) purge() {
deletedCount := 0
buckets, err := cfs.ListBuckets(ctx)
if err != nil {
errorIf(err, "Unable to list buckets.")
logger.LogIf(ctx, err)
}
// Reset cache online status if drive was offline earlier.
if !cfs.IsOnline() {
@ -221,7 +229,7 @@ func (cfs *cacheFSObjects) purge() {
continue
}
if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil {
errorIf(err, "Unable to remove cache entry in dir %s/%s", bucket.Name, object.Name)
logger.LogIf(ctx, err)
continue
}
deletedCount++
@ -313,7 +321,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
var err error
// Validate if bucket name is valid and exists.
if _, err = fs.statBucketDir(bucket); err != nil {
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
}
@ -325,31 +333,32 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// and return success.
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object)
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
var fi os.FileInfo
if fi, err = fsStatDir(pathJoin(fs.fsPath, bucket, object)); err != nil {
if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object)
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, errors2.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
}
var wlk *lock.LockedFile
@ -359,7 +368,8 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(errors2.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@ -367,7 +377,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Remove meta file when PutObject encounters any error
if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir)
fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
}
}()
}
@ -385,10 +395,9 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
buf := make([]byte, int(bufSize))
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size())
if err != nil {
fsRemoveFile(fsTmpObjPath)
errorIf(err, "Failed to create object %s/%s", bucket, object)
fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fsMeta.Meta["etag"] == "" {
@ -397,18 +406,18 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, errors2.Trace(IncompleteBody{})
fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, IncompleteBody{}
}
// Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be
// nothing to delete.
defer fsRemoveFile(fsTmpObjPath)
defer fsRemoveFile(ctx, fsTmpObjPath)
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -420,7 +429,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
}
// Stat the file to fetch timestamp, size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -447,11 +456,11 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
}
}
fs := cfs.FSObjects
if err := checkNewMultipartArgs(bucket, object, fs); err != nil {
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket)
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return "", toObjectErr(err, bucket)
}
@ -459,7 +468,8 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
err := mkdirAll(uploadIDDir, 0755)
if err != nil {
return "", errors2.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
// Initialize fs.json values.
@ -468,11 +478,13 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil {
return "", errors2.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors2.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
return uploadID, nil
}
@ -485,7 +497,7 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
return err
}
defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket)
bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil {
return toObjectErr(err, bucket)
}
@ -493,12 +505,13 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
expiredDir := path.Join(trashPath, bucket)
// Attempt to move regular bucket to expired directory.
if err = fsRenameDir(bucketDir, expiredDir); err != nil {
logger.LogIf(ctx, err)
return toObjectErr(err, bucket)
}
// Cleanup all the bucket metadata.
ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID())
_ = fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir)
logger.LogIf(ctx, fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir))
return nil
}
@ -506,22 +519,22 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
// paths for windows automatically.
func fsRenameDir(dirPath, newPath string) (err error) {
if dirPath == "" || newPath == "" {
return errors2.Trace(errInvalidArgument)
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return errors2.Trace(err)
return err
}
if err = checkPathLength(newPath); err != nil {
return errors2.Trace(err)
return err
}
if err = os.Rename(dirPath, newPath); err != nil {
if os.IsNotExist(err) {
return errors2.Trace(errVolumeNotFound)
return errVolumeNotFound
} else if isSysErrNotEmpty(err) {
return errors2.Trace(errVolumeNotEmpty)
return errVolumeNotEmpty
}
return errors2.Trace(err)
return err
}
return nil
}

View file

@ -31,6 +31,7 @@ import (
"github.com/djherbis/atime"
"github.com/minio/minio/cmd/logger"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/wildcard"
@ -345,12 +346,12 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
if err != nil {
return false
}
_, err = fs.getObjectInfo(bucket, object)
_, err = fs.getObjectInfo(ctx, bucket, object)
return err == nil
}
listDir := listDirCacheFactory(isLeaf, cacheTreeWalkIgnoredErrs, c.cache.cfs)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
}
for i := 0; i < maxKeys; {
@ -383,7 +384,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
}
return result, toObjectErr(err, bucket, prefix)
}
objInfo, err = fs.getObjectInfo(bucket, entry)
objInfo, err = fs.getObjectInfo(ctx, bucket, entry)
if err != nil {
// Ignore errFileNotFound
if errors2.Cause(err) == errFileNotFound {
@ -754,7 +755,8 @@ func (c cacheObjects) StorageInfo(ctx context.Context) (storageInfo StorageInfo)
continue
}
info, err := getDiskInfo((cfs.fsPath))
errorIf(err, "Unable to get disk info %#v", cfs.fsPath)
logger.GetReqInfo(ctx).AppendTags("cachePath", cfs.fsPath)
logger.LogIf(ctx, err)
total += info.Total
free += info.Free
}
@ -791,7 +793,8 @@ func (c cacheObjects) DeleteBucket(ctx context.Context, bucket string) (err erro
// or the global env overrides.
func newCache(config CacheConfig) (*diskCache, error) {
var cfsObjects []*cacheFSObjects
formats, err := loadAndValidateCacheFormat(config.Drives)
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
formats, err := loadAndValidateCacheFormat(ctx, config.Drives)
if err != nil {
return nil, err
}

View file

@ -27,6 +27,7 @@ import (
"strings"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/mountinfo"
)
@ -399,7 +400,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
}
ipList, err := getHostIP4(host)
fatalIf(err, "unexpected error when resolving host '%s'", host)
logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
// Filter ipList by IPs those start with '127.'.
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {

View file

@ -17,18 +17,20 @@
package cmd
import (
"context"
"hash"
"io"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
// CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create
// the file at the given volume and path. It will read from src until an io.EOF occurs. The given algorithm will
// be used to protect the erasure encoded file.
func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) {
func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) {
if !algorithm.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid)
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
}
f.Checksums = make([][]byte, len(s.disks))
hashers := make([]hash.Hash, len(s.disks))
@ -50,21 +52,22 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
}
blocks = make([][]byte, len(s.disks)) // write empty block
} else if err == nil || (n > 0 && err == io.ErrUnexpectedEOF) {
blocks, err = s.ErasureEncode(buffer[:n])
blocks, err = s.ErasureEncode(ctx, buffer[:n])
if err != nil {
return f, err
}
} else {
return f, errors.Trace(err)
logger.LogIf(ctx, err)
return f, err
}
for i := range errChans { // span workers
go erasureAppendFile(s.disks[i], volume, path, hashers[i], blocks[i], errChans[i])
go erasureAppendFile(ctx, s.disks[i], volume, path, hashers[i], blocks[i], errChans[i])
}
for i := range errChans { // wait until all workers are finished
errs[i] = <-errChans[i]
}
if err = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum); err != nil {
if err = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum); err != nil {
return f, err
}
s.disks = evalDisks(s.disks, errs)
@ -83,9 +86,10 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
// erasureAppendFile appends the content of buf to the file on the given disk and updates computes
// the hash of the written data. It sends the write error (or nil) over the error channel.
func erasureAppendFile(disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) {
func erasureAppendFile(ctx context.Context, disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) {
if disk == OfflineDisk {
errChan <- errors.Trace(errDiskNotFound)
logger.LogIf(ctx, errDiskNotFound)
errChan <- errDiskNotFound
return
}
err := disk.AppendFile(volume, path, buf)

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"testing"
@ -70,7 +71,7 @@ func TestErasureCreateFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -82,7 +83,7 @@ func TestErasureCreateFile(t *testing.T) {
setup.Remove()
t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
}
file, err := storage.CreateFile(bytes.NewReader(data[test.offset:]), "testbucket", "object", buffer, test.algorithm, test.dataBlocks+1)
file, err := storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), "testbucket", "object", buffer, test.algorithm, test.dataBlocks+1)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@ -100,7 +101,7 @@ func TestErasureCreateFile(t *testing.T) {
if test.offDisks > 0 {
storage.disks[0] = OfflineDisk
}
file, err = storage.CreateFile(bytes.NewReader(data[test.offset:]), "testbucket", "object2", buffer, test.algorithm, test.dataBlocks+1)
file, err = storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), "testbucket", "object2", buffer, test.algorithm, test.dataBlocks+1)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@ -125,7 +126,7 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err)
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1)
storage, err := NewErasureStorage(context.Background(), setup.disks, data, parity, blockSizeV1)
if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err)
}
@ -143,7 +144,7 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := storage.CreateFile(bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
_, err := storage.CreateFile(context.Background(), bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
if err != nil {
panic(err)
}

View file

@ -17,11 +17,12 @@
package cmd
import (
"context"
"fmt"
"hash"
"strings"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
// HealFile tries to reconstruct an erasure-coded file spread over all
@ -45,12 +46,13 @@ import (
//
// It returns bitrot checksums for the non-nil staleDisks on which
// healing succeeded.
func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, blocksize int64,
func (s ErasureStorage) HealFile(ctx context.Context, staleDisks []StorageAPI, volume, path string, blocksize int64,
dstVol, dstPath string, size int64, alg BitrotAlgorithm, checksums [][]byte) (
f ErasureFileInfo, err error) {
if !alg.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid)
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
}
// Initialization
@ -84,7 +86,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
}
readLen += lastChunkSize
var buffers [][]byte
buffers, _, err = s.readConcurrent(volume, path, 0, readLen, verifiers)
buffers, _, err = s.readConcurrent(ctx, volume, path, 0, readLen, verifiers)
if err != nil {
return f, err
}
@ -131,7 +133,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
}
buffOffset += csize
if err = s.ErasureDecodeDataAndParityBlocks(blocks); err != nil {
if err = s.ErasureDecodeDataAndParityBlocks(ctx, blocks); err != nil {
return f, err
}
@ -155,7 +157,9 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
// If all disks had write errors we quit.
if !writeSucceeded {
// build error from all write errors
return f, errors.Trace(joinWriteErrors(writeErrors))
err := joinWriteErrors(writeErrors)
logger.LogIf(ctx, err)
return f, err
}
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"reflect"
@ -74,7 +75,7 @@ func TestErasureHealFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -89,7 +90,7 @@ func TestErasureHealFile(t *testing.T) {
algorithm = DefaultBitrotAlgorithm
}
buffer := make([]byte, test.blocksize, 2*test.blocksize)
file, err := storage.CreateFile(bytes.NewReader(data), "testbucket", "testobject", buffer, algorithm, test.dataBlocks+1)
file, err := storage.CreateFile(context.Background(), bytes.NewReader(data), "testbucket", "testobject", buffer, algorithm, test.dataBlocks+1)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
@ -113,7 +114,7 @@ func TestErasureHealFile(t *testing.T) {
}
// test case setup is complete - now call Healfile()
info, err := storage.HealFile(staleDisks, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums)
info, err := storage.HealFile(context.Background(), staleDisks, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but it failed with: %v", i, err)
}

View file

@ -17,9 +17,10 @@
package cmd
import (
"context"
"io"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
type errIdx struct {
@ -27,7 +28,7 @@ type errIdx struct {
err error
}
func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64,
func (s ErasureStorage) readConcurrent(ctx context.Context, volume, path string, offset, length int64,
verifiers []*BitrotVerifier) (buffers [][]byte, needsReconstruction bool,
err error) {
@ -39,7 +40,8 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
stageBuffers[i] = make([]byte, length)
disk := s.disks[i]
if disk == OfflineDisk {
errChan <- errIdx{i, errors.Trace(errDiskNotFound)}
logger.LogIf(ctx, errDiskNotFound)
errChan <- errIdx{i, errDiskNotFound}
return
}
_, rerr := disk.ReadFile(volume, path, offset, stageBuffers[i], verifiers[i])
@ -75,7 +77,8 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
}
if successCount != s.dataBlocks {
// Not enough disks returns data.
err = errors.Trace(errXLReadQuorum)
err = errXLReadQuorum
logger.LogIf(ctx, err)
}
return
}
@ -86,18 +89,21 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
// integrity of the given file. ReadFile will read data from the given
// offset up to the given length. If parts of the file are corrupted
// ReadFile tries to reconstruct the data.
func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, volume, path string, offset,
length, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm,
blocksize int64) (f ErasureFileInfo, err error) {
if offset < 0 || length < 0 {
return f, errors.Trace(errUnexpected)
logger.LogIf(ctx, errUnexpected)
return f, errUnexpected
}
if offset+length > totalLength {
return f, errors.Trace(errUnexpected)
logger.LogIf(ctx, errUnexpected)
return f, errUnexpected
}
if !algorithm.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid)
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
}
f.Checksums = make([][]byte, len(s.disks))
@ -145,7 +151,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
var buffers [][]byte
var needsReconstruction bool
buffers, needsReconstruction, err = s.readConcurrent(volume, path,
buffers, needsReconstruction, err = s.readConcurrent(ctx, volume, path,
partDataStartIndex, partDataLength, verifiers)
if err != nil {
// Could not read enough disks.
@ -194,7 +200,8 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
if needsReconstruction {
if err = s.ErasureDecodeDataBlocks(blocks); err != nil {
return f, errors.Trace(err)
logger.LogIf(ctx, err)
return f, err
}
}
@ -210,7 +217,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
writeLength = lastBlockLength - writeStart
}
}
n, err := writeDataBlocks(writer, blocks, s.dataBlocks, writeStart, writeLength)
n, err := writeDataBlocks(ctx, writer, blocks, s.dataBlocks, writeStart, writeLength)
if err != nil {
return f, err
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
crand "crypto/rand"
"io"
"math/rand"
@ -86,7 +87,7 @@ func TestErasureReadFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -102,13 +103,13 @@ func TestErasureReadFile(t *testing.T) {
writeAlgorithm = DefaultBitrotAlgorithm
}
buffer := make([]byte, test.blocksize, 2*test.blocksize)
file, err := storage.CreateFile(bytes.NewReader(data[:]), "testbucket", "object", buffer, writeAlgorithm, test.dataBlocks+1)
file, err := storage.CreateFile(context.Background(), bytes.NewReader(data[:]), "testbucket", "object", buffer, writeAlgorithm, test.dataBlocks+1)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
}
writer := bytes.NewBuffer(nil)
readInfo, err := storage.ReadFile(writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
readInfo, err := storage.ReadFile(context.Background(), writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@ -134,7 +135,7 @@ func TestErasureReadFile(t *testing.T) {
if test.offDisks > 0 {
storage.disks[0] = OfflineDisk
}
readInfo, err = storage.ReadFile(writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
readInfo, err = storage.ReadFile(context.Background(), writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
@ -174,7 +175,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, dataBlocks, parityBlocks, blockSize)
storage, err := NewErasureStorage(context.Background(), setup.disks, dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err)
}
@ -191,7 +192,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize)
file, err := storage.CreateFile(bytes.NewReader(data), "testbucket", "testobject", buffer, DefaultBitrotAlgorithm, dataBlocks+1)
file, err := storage.CreateFile(context.Background(), bytes.NewReader(data), "testbucket", "testobject", buffer, DefaultBitrotAlgorithm, dataBlocks+1)
if err != nil {
t.Fatal(err)
}
@ -211,7 +212,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
expected := data[offset : offset+readLen]
_, err = storage.ReadFile(buf, "testbucket", "testobject", offset, readLen, length, file.Checksums, DefaultBitrotAlgorithm, blockSize)
_, err = storage.ReadFile(context.Background(), buf, "testbucket", "testobject", offset, readLen, length, file.Checksums, DefaultBitrotAlgorithm, blockSize)
if err != nil {
t.Fatal(err, offset, readLen)
}
@ -231,14 +232,14 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err)
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1)
storage, err := NewErasureStorage(context.Background(), setup.disks, data, parity, blockSizeV1)
if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err)
}
content := make([]byte, size)
buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
file, err := storage.CreateFile(bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
file, err := storage.CreateFile(context.Background(), bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
if err != nil {
b.Fatalf("failed to create erasure test file: %v", err)
}
@ -255,7 +256,7 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if file, err = storage.ReadFile(bytes.NewBuffer(content[:0]), "testbucket", "object", 0, size, size, checksums, DefaultBitrotAlgorithm, blockSizeV1); err != nil {
if file, err = storage.ReadFile(context.Background(), bytes.NewBuffer(content[:0]), "testbucket", "object", 0, size, size, checksums, DefaultBitrotAlgorithm, blockSizeV1); err != nil {
panic(err)
}
}

View file

@ -18,10 +18,11 @@ package cmd
import (
"bytes"
"context"
"io"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
// getDataBlockLen - get length of data blocks from encoded blocks.
@ -36,20 +37,23 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
// Writes all the data blocks from encoded blocks until requested
// outSize length. Provides a way to skip bytes until the offset.
func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative.
if offset < 0 || length < 0 {
return 0, errors.Trace(errUnexpected)
logger.LogIf(ctx, errUnexpected)
return 0, errUnexpected
}
// Do we have enough blocks?
if len(enBlocks) < dataBlocks {
return 0, errors.Trace(reedsolomon.ErrTooFewShards)
logger.LogIf(ctx, reedsolomon.ErrTooFewShards)
return 0, reedsolomon.ErrTooFewShards
}
// Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
return 0, errors.Trace(reedsolomon.ErrShortData)
logger.LogIf(ctx, reedsolomon.ErrShortData)
return 0, reedsolomon.ErrShortData
}
// Counter to decrement total left to write.
@ -77,7 +81,8 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
totalWritten += n
break
@ -85,7 +90,8 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
// Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
// Decrement output size.

View file

@ -17,11 +17,12 @@
package cmd
import (
"context"
"crypto/subtle"
"hash"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
// OfflineDisk represents an unavailable disk.
@ -44,11 +45,12 @@ type ErasureStorage struct {
// NewErasureStorage creates a new ErasureStorage. The storage erasure codes and protects all data written to
// the disks.
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) {
func NewErasureStorage(ctx context.Context, disks []StorageAPI, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) {
shardsize := (int(blockSize) + dataBlocks - 1) / dataBlocks
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
if err != nil {
return s, errors.Tracef("failed to create erasure coding: %v", err)
logger.LogIf(ctx, err)
return s, err
}
s = ErasureStorage{
disks: make([]StorageAPI, len(disks)),
@ -62,13 +64,15 @@ func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int, blockSi
// ErasureEncode encodes the given data and returns the erasure-coded data.
// It returns an error if the erasure coding failed.
func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
func (s *ErasureStorage) ErasureEncode(ctx context.Context, data []byte) ([][]byte, error) {
encoded, err := s.erasure.Split(data)
if err != nil {
return nil, errors.Tracef("failed to split data: %v", err)
logger.LogIf(ctx, err)
return nil, err
}
if err = s.erasure.Encode(encoded); err != nil {
return nil, errors.Tracef("failed to encode data: %v", err)
logger.LogIf(ctx, err)
return nil, err
}
return encoded, nil
}
@ -78,16 +82,17 @@ func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
// It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
if err := s.erasure.ReconstructData(data); err != nil {
return errors.Tracef("failed to reconstruct data: %v", err)
return err
}
return nil
}
// ErasureDecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(data [][]byte) error {
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := s.erasure.Reconstruct(data); err != nil {
return errors.Tracef("failed to reconstruct data: %v", err)
logger.LogIf(ctx, err)
return err
}
return nil
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"os"
@ -52,11 +53,11 @@ func TestErasureDecode(t *testing.T) {
copy(buffer, data)
disks := make([]StorageAPI, test.dataBlocks+test.parityBlocks)
storage, err := NewErasureStorage(disks, test.dataBlocks, test.parityBlocks, blockSizeV1)
storage, err := NewErasureStorage(context.Background(), disks, test.dataBlocks, test.parityBlocks, blockSizeV1)
if err != nil {
t.Fatalf("Test %d: failed to create erasure storage: %v", i, err)
}
encoded, err := storage.ErasureEncode(buffer)
encoded, err := storage.ErasureEncode(context.Background(), buffer)
if err != nil {
t.Fatalf("Test %d: failed to encode data: %v", i, err)
}
@ -69,7 +70,7 @@ func TestErasureDecode(t *testing.T) {
}
if test.reconstructParity {
err = storage.ErasureDecodeDataAndParityBlocks(encoded)
err = storage.ErasureDecodeDataAndParityBlocks(context.Background(), encoded)
} else {
err = storage.ErasureDecodeDataBlocks(encoded)
}
@ -98,7 +99,7 @@ func TestErasureDecode(t *testing.T) {
}
decodedData := new(bytes.Buffer)
if _, err = writeDataBlocks(decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
t.Errorf("Test %d: failed to write data blocks: %v", i, err)
}
if !bytes.Equal(decodedData.Bytes(), data) {

View file

@ -17,13 +17,14 @@
package cmd
import (
"context"
"errors"
"fmt"
"io"
"os"
"reflect"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
const (
@ -100,14 +101,14 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// open file using READ & WRITE permission
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors2.Trace(err)
return err
}
// Close the locked file upon return.
defer file.Close()
fi, err := file.Stat()
if err != nil {
return errors2.Trace(err)
return err
}
if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatCache()
@ -118,7 +119,7 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// This function creates a cache format file on disk and returns a slice
// of format cache config
func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV1, err error) {
nformats := newFormatCacheV1(drives)
for _, drive := range drives {
_, err = os.Stat(drive)
@ -126,28 +127,36 @@ func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
continue
}
if !os.IsNotExist(err) {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
if err = os.Mkdir(drive, 0777); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
}
for i, drive := range drives {
if err = os.Mkdir(pathJoin(drive, minioMetaBucket), 0777); err != nil {
if !os.IsExist(err) {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
}
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
// Fresh disk - create format.json for this cfs
if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
}
return nformats, nil
}
func loadFormatCache(drives []string) ([]*formatCacheV1, error) {
func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV1, error) {
formats := make([]*formatCacheV1, len(drives))
for i, drive := range drives {
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
@ -156,6 +165,7 @@ func loadFormatCache(drives []string) ([]*formatCacheV1, error) {
if os.IsNotExist(err) {
continue
}
logger.LogIf(ctx, err)
return nil, err
}
defer f.Close()
@ -268,7 +278,7 @@ func findCacheDiskIndex(disk string, disks []string) int {
}
// validate whether cache drives order has changed
func validateCacheFormats(formats []*formatCacheV1) error {
func validateCacheFormats(ctx context.Context, formats []*formatCacheV1) error {
count := 0
for _, format := range formats {
if format == nil {
@ -279,12 +289,16 @@ func validateCacheFormats(formats []*formatCacheV1) error {
return errors.New("Cache format files missing on all drives")
}
if _, err := checkFormatCacheValues(formats); err != nil {
logger.LogIf(ctx, err)
return err
}
if err := checkCacheDisksSliceConsistency(formats); err != nil {
logger.LogIf(ctx, err)
return err
}
return checkCacheDiskConsistency(formats)
err := checkCacheDiskConsistency(formats)
logger.LogIf(ctx, err)
return err
}
// return true if all of the list of cache drives are
@ -303,16 +317,16 @@ func cacheDrivesUnformatted(drives []string) bool {
// create format.json for each cache drive if fresh disk or load format from disk
// Then validate the format for all drives in the cache to ensure order
// of cache drives has not changed.
func loadAndValidateCacheFormat(drives []string) (formats []*formatCacheV1, err error) {
func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats []*formatCacheV1, err error) {
if cacheDrivesUnformatted(drives) {
formats, err = initFormatCache(drives)
formats, err = initFormatCache(ctx, drives)
} else {
formats, err = loadFormatCache(drives)
formats, err = loadFormatCache(ctx, drives)
}
if err != nil {
return nil, err
}
if err = validateCacheFormats(formats); err != nil {
if err = validateCacheFormats(ctx, formats); err != nil {
return nil, err
}
return formats, nil

View file

@ -17,18 +17,20 @@
package cmd
import (
"context"
"os"
"testing"
)
// TestDiskCacheFormat - tests initFormatCache, formatMetaGetFormatBackendCache, formatCacheGetVersion.
func TestDiskCacheFormat(t *testing.T) {
ctx := context.Background()
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
_, err = initFormatCache(fsDirs)
_, err = initFormatCache(ctx, fsDirs)
if err != nil {
t.Fatal(err)
}
@ -56,7 +58,7 @@ func TestDiskCacheFormat(t *testing.T) {
t.Fatal(err)
}
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil {
if _, err = loadAndValidateCacheFormat(context.Background(), fsDirs); err == nil {
t.Fatal("expected to fail")
}
@ -69,7 +71,7 @@ func TestDiskCacheFormat(t *testing.T) {
t.Fatal(err)
}
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil {
if _, err = loadAndValidateCacheFormat(context.Background(), fsDirs); err == nil {
t.Fatal("expected to fail")
}
}
@ -307,7 +309,7 @@ func TestFormatCache(t *testing.T) {
}
for i, testCase := range testCases {
err := validateCacheFormats(testCase.formatConfigs)
err := validateCacheFormats(context.Background(), testCase.formatConfigs)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass but failed with %s", i+1, err)
}

View file

@ -17,13 +17,14 @@
package cmd
import (
"context"
"fmt"
"io"
"os"
"path"
"time"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock"
)
@ -97,7 +98,7 @@ func formatFSGetVersion(r io.ReadSeeker) (string, error) {
// Migrate from V1 to V2. V2 implements new backend format for multipart
// uploads. Delete the previous multipart directory.
func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error {
func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath string) error {
version, err := formatFSGetVersion(wlk)
if err != nil {
return err
@ -107,11 +108,12 @@ func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error {
return fmt.Errorf(`format.json version expected %s, found %s`, formatFSVersionV1, version)
}
if err = fsRemoveAll(path.Join(fsPath, minioMetaMultipartBucket)); err != nil {
if err = fsRemoveAll(ctx, path.Join(fsPath, minioMetaMultipartBucket)); err != nil {
return err
}
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil {
logger.LogIf(ctx, err)
return err
}
@ -122,7 +124,7 @@ func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error {
// Migration should happen when formatFSV1.FS.Version changes. This version
// can change when there is a change to the struct formatFSV1.FS or if there
// is any change in the backend file system tree structure.
func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error {
func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) error {
// Add any migration code here in case we bump format.FS.Version
version, err := formatFSGetVersion(wlk)
if err != nil {
@ -131,7 +133,7 @@ func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error {
switch version {
case formatFSVersionV1:
if err = formatFSMigrateV1ToV2(wlk, fsPath); err != nil {
if err = formatFSMigrateV1ToV2(ctx, wlk, fsPath); err != nil {
return err
}
fallthrough
@ -151,19 +153,21 @@ func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error {
}
// Creates a new format.json if unformatted.
func createFormatFS(fsFormatPath string) error {
func createFormatFS(ctx context.Context, fsFormatPath string) error {
// Attempt a write lock on formatConfigFile `format.json`
// file stored in minioMetaBucket(.minio.sys) directory.
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors2.Trace(err)
logger.LogIf(ctx, err)
return err
}
// Close the locked file upon return.
defer lk.Close()
fi, err := lk.Stat()
if err != nil {
return errors2.Trace(err)
logger.LogIf(ctx, err)
return err
}
if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatFS()
@ -177,7 +181,7 @@ func createFormatFS(fsFormatPath string) error {
// The file descriptor should be kept open throughout the life
// of the process so that another minio process does not try to
// migrate the backend when we are actively working on the backend.
func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
func initFormatFS(ctx context.Context, fsPath string) (rlk *lock.RLockedFile, err error) {
fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
// Any read on format.json should be done with read-lock.
// Any write on format.json should be done with write-lock.
@ -191,7 +195,8 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
var fi os.FileInfo
fi, err = rlk.Stat()
if err != nil {
return nil, errors2.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
isEmpty = fi.Size() == 0
}
@ -200,7 +205,7 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
rlk.Close()
}
// Fresh disk - create format.json
err = createFormatFS(fsFormatPath)
err = createFormatFS(ctx, fsFormatPath)
if err == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again.
// Can happen in a rare situation when a parallel minio process
@ -209,19 +214,22 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
continue
}
if err != nil {
return nil, errors2.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
// After successfully creating format.json try to hold a read-lock on
// the file.
continue
}
if err != nil {
return nil, errors2.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
formatBackend, err := formatMetaGetFormatBackendFS(rlk)
if err != nil {
return nil, errors2.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
if formatBackend != formatBackendFS {
return nil, fmt.Errorf(`%s file: expected format-type: %s, found: %s`, formatConfigFile, formatBackendFS, formatBackend)
@ -244,7 +252,7 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
if err != nil {
return nil, err
}
err = formatFSMigrate(wlk, fsPath)
err = formatFSMigrate(ctx, wlk, fsPath)
wlk.Close()
if err != nil {
// Migration failed, bail out so that the user can observe what happened.

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"os"
"path/filepath"
"testing"
@ -38,7 +39,7 @@ func TestFSFormatFS(t *testing.T) {
t.Fatal(err)
}
rlk, err := initFormatFS(disk)
rlk, err := initFormatFS(context.Background(), disk)
if err != nil {
t.Fatal(err)
}
@ -81,7 +82,7 @@ func TestFSFormatFS(t *testing.T) {
if _, err = formatFSGetVersion(rlk); err == nil {
t.Fatal("expected to fail")
}
if _, err = initFormatFS(disk); err == nil {
if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail")
}
@ -96,7 +97,7 @@ func TestFSFormatFS(t *testing.T) {
if _, err = formatMetaGetFormatBackendFS(f); err == nil {
t.Fatal("expected to fail")
}
if _, err = initFormatFS(disk); err == nil {
if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail")
}
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -520,7 +521,7 @@ func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error {
}
// saveFormatXLAll - populates `format.json` on disks in its order.
func saveFormatXLAll(storageDisks []StorageAPI, formats []*formatXLV3) error {
func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error {
var errs = make([]error, len(storageDisks))
var wg = &sync.WaitGroup{}
@ -542,7 +543,7 @@ func saveFormatXLAll(storageDisks []StorageAPI, formats []*formatXLV3) error {
wg.Wait()
writeQuorum := len(storageDisks)/2 + 1
return reduceWriteQuorumErrs(errs, nil, writeQuorum)
return reduceWriteQuorumErrs(ctx, errs, nil, writeQuorum)
}
// relinquishes the underlying connection for all storage disks.
@ -614,7 +615,7 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints EndpointList, formats []
}
// initFormatXL - save XL format configuration on all disks.
func initFormatXL(storageDisks []StorageAPI, setCount, disksPerSet int) (format *formatXLV3, err error) {
func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, disksPerSet int) (format *formatXLV3, err error) {
format = newFormatXLV3(setCount, disksPerSet)
formats := make([]*formatXLV3, len(storageDisks))
@ -632,7 +633,7 @@ func initFormatXL(storageDisks []StorageAPI, setCount, disksPerSet int) (format
}
// Save formats `format.json` across all disks.
if err = saveFormatXLAll(storageDisks, formats); err != nil {
if err = saveFormatXLAll(ctx, storageDisks, formats); err != nil {
return nil, err
}

View file

@ -17,11 +17,13 @@
package cmd
import (
"context"
"io"
"os"
pathutil "path"
"runtime"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock"
)
@ -29,17 +31,23 @@ import (
// Removes only the file at given path does not remove
// any parent directories, handles long paths for
// windows automatically.
func fsRemoveFile(filePath string) (err error) {
func fsRemoveFile(ctx context.Context, filePath string) (err error) {
if filePath == "" {
return errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
if err = checkPathLength(filePath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err = os.Remove((filePath)); err != nil {
return osErrToFSFileErr(err)
fsErr := osErrToFSFileErr(err)
if fsErr != errFileNotFound {
logger.LogIf(ctx, err)
}
return fsErr
}
return nil
@ -47,22 +55,27 @@ func fsRemoveFile(filePath string) (err error) {
// Removes all files and folders at a given path, handles
// long paths for windows automatically.
func fsRemoveAll(dirPath string) (err error) {
func fsRemoveAll(ctx context.Context, dirPath string) (err error) {
if dirPath == "" {
return errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err = os.RemoveAll(dirPath); err != nil {
if os.IsPermission(err) {
return errors.Trace(errVolumeAccessDenied)
logger.LogIf(ctx, errVolumeAccessDenied)
return errVolumeAccessDenied
} else if isSysErrNotEmpty(err) {
return errors.Trace(errVolumeNotEmpty)
logger.LogIf(ctx, errVolumeNotEmpty)
return errVolumeNotEmpty
}
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return nil
@ -70,22 +83,27 @@ func fsRemoveAll(dirPath string) (err error) {
// Removes a directory only if its empty, handles long
// paths for windows automatically.
func fsRemoveDir(dirPath string) (err error) {
func fsRemoveDir(ctx context.Context, dirPath string) (err error) {
if dirPath == "" {
return errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err = os.Remove((dirPath)); err != nil {
if os.IsNotExist(err) {
return errors.Trace(errVolumeNotFound)
logger.LogIf(ctx, errVolumeNotFound)
return errVolumeNotFound
} else if isSysErrNotEmpty(err) {
return errors.Trace(errVolumeNotEmpty)
logger.LogIf(ctx, errVolumeNotEmpty)
return errVolumeNotEmpty
}
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return nil
@ -95,29 +113,36 @@ func fsRemoveDir(dirPath string) (err error) {
// otherwise returns an error. If directory already
// exists returns an error. Windows long paths
// are handled automatically.
func fsMkdir(dirPath string) (err error) {
func fsMkdir(ctx context.Context, dirPath string) (err error) {
if dirPath == "" {
return errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err = os.Mkdir((dirPath), 0777); err != nil {
if os.IsExist(err) {
return errors.Trace(errVolumeExists)
logger.LogIf(ctx, errVolumeExists)
return errVolumeExists
} else if os.IsPermission(err) {
return errors.Trace(errDiskAccessDenied)
logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
} else if isSysErrNotDir(err) {
// File path cannot be verified since
// one of the parents is a file.
return errors.Trace(errDiskAccessDenied)
logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
} else if isSysErrPathNotFound(err) {
// Add specific case for windows.
return errors.Trace(errDiskAccessDenied)
logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
}
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return nil
@ -128,36 +153,40 @@ func fsMkdir(dirPath string) (err error) {
// not perform any higher layer interpretation of files v/s
// directories. For higher level interpretation look at
// fsStatFileDir, fsStatFile, fsStatDir.
func fsStat(statLoc string) (os.FileInfo, error) {
func fsStat(ctx context.Context, statLoc string) (os.FileInfo, error) {
if statLoc == "" {
return nil, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
}
if err := checkPathLength(statLoc); err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
fi, err := os.Stat((statLoc))
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
return fi, nil
}
// Lookup if volume exists, returns volume attributes upon success.
func fsStatVolume(volume string) (os.FileInfo, error) {
fi, err := fsStat(volume)
func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) {
fi, err := fsStat(ctx, volume)
if err != nil {
err = errors.Cause(err)
if os.IsNotExist(err) {
return nil, errors.Trace(errVolumeNotFound)
return nil, errVolumeNotFound
} else if os.IsPermission(err) {
return nil, errors.Trace(errVolumeAccessDenied)
return nil, errVolumeAccessDenied
}
return nil, errors.Trace(err)
return nil, err
}
if !fi.IsDir() {
return nil, errors.Trace(errVolumeAccessDenied)
logger.LogIf(ctx, errVolumeAccessDenied)
return nil, errVolumeAccessDenied
}
return fi, nil
@ -173,52 +202,55 @@ func osErrToFSFileErr(err error) error {
}
err = errors.Cause(err)
if os.IsNotExist(err) {
return errors.Trace(errFileNotFound)
return errFileNotFound
}
if os.IsPermission(err) {
return errors.Trace(errFileAccessDenied)
return errFileAccessDenied
}
if isSysErrNotDir(err) {
return errors.Trace(errFileAccessDenied)
return errFileAccessDenied
}
if isSysErrPathNotFound(err) {
return errors.Trace(errFileNotFound)
return errFileNotFound
}
return err
}
// Lookup if directory exists, returns directory attributes upon success.
func fsStatDir(statDir string) (os.FileInfo, error) {
fi, err := fsStat(statDir)
func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) {
fi, err := fsStat(ctx, statDir)
if err != nil {
return nil, osErrToFSFileErr(err)
}
if !fi.IsDir() {
return nil, errors.Trace(errFileAccessDenied)
return nil, errFileAccessDenied
}
return fi, nil
}
// Lookup if file exists, returns file attributes upon success.
func fsStatFile(statFile string) (os.FileInfo, error) {
fi, err := fsStat(statFile)
func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) {
fi, err := fsStat(ctx, statFile)
if err != nil {
return nil, osErrToFSFileErr(err)
}
if fi.IsDir() {
return nil, errors.Trace(errFileAccessDenied)
logger.LogIf(ctx, errFileAccessDenied)
return nil, errFileAccessDenied
}
return fi, nil
}
// Opens the file at given path, optionally from an offset. Upon success returns
// a readable stream and the size of the readable stream.
func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadCloser, int64, error) {
if readPath == "" || offset < 0 {
return nil, 0, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return nil, 0, errInvalidArgument
}
if err := checkPathLength(readPath); err != nil {
return nil, 0, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, 0, err
}
fr, err := os.Open((readPath))
@ -229,19 +261,22 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Stat to get the size of the file at path.
st, err := os.Stat((readPath))
if err != nil {
return nil, 0, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, 0, err
}
// Verify if its not a regular file, since subsequent Seek is undefined.
if !st.Mode().IsRegular() {
return nil, 0, errors.Trace(errIsNotRegular)
logger.LogIf(ctx, errIsNotRegular)
return nil, 0, errIsNotRegular
}
// Seek to the requested offset.
if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET)
if err != nil {
return nil, 0, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, 0, err
}
}
@ -250,21 +285,25 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
}
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if filePath == "" || reader == nil {
return 0, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument
}
if err := checkPathLength(filePath); err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
writer, err := lock.Open(filePath, os.O_CREATE|os.O_WRONLY, 0666)
@ -276,7 +315,8 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Fallocate only if the size is final object is known.
if fallocSize > 0 {
if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
}
@ -284,12 +324,14 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
if buf != nil {
bytesWritten, err = io.CopyBuffer(writer, reader, buf)
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
} else {
bytesWritten, err = io.Copy(writer, reader)
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
}
@ -320,12 +362,14 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
// Renames source path to destination path, creates all the
// missing parents if they don't exist.
func fsRenameFile(sourcePath, destPath string) error {
func fsRenameFile(ctx context.Context, sourcePath, destPath string) error {
if err := checkPathLength(sourcePath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err := checkPathLength(destPath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
// Verify if source path exists.
@ -334,27 +378,34 @@ func fsRenameFile(sourcePath, destPath string) error {
}
if err := renameAll(sourcePath, destPath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return nil
}
// fsDeleteFile is a wrapper for deleteFile(), after checking the path length.
func fsDeleteFile(basePath, deletePath string) error {
func fsDeleteFile(ctx context.Context, basePath, deletePath string) error {
if err := checkPathLength(basePath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if err := checkPathLength(deletePath); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return deleteFile(basePath, deletePath)
if err := deleteFile(basePath, deletePath); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil
}
// fsRemoveMeta safely removes a locked file and takes care of Windows special case
func fsRemoveMeta(basePath, deletePath, tmpDir string) error {
func fsRemoveMeta(ctx context.Context, basePath, deletePath, tmpDir string) error {
// Special case for windows please read through.
if runtime.GOOS == globalWindowsOSName {
// Ordinarily windows does not permit deletion or renaming of files still
@ -388,13 +439,13 @@ func fsRemoveMeta(basePath, deletePath, tmpDir string) error {
tmpPath := pathJoin(tmpDir, mustGetUUID())
fsRenameFile(deletePath, tmpPath)
fsRenameFile(ctx, deletePath, tmpPath)
// Proceed to deleting the directory if empty
fsDeleteFile(basePath, pathutil.Dir(deletePath))
fsDeleteFile(ctx, basePath, pathutil.Dir(deletePath))
// Finally delete the renamed file.
return fsDeleteFile(tmpDir, tmpPath)
return fsDeleteFile(ctx, tmpDir, tmpPath)
}
return fsDeleteFile(basePath, deletePath)
return fsDeleteFile(ctx, basePath, deletePath)
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"io"
"io/ioutil"
"os"
@ -36,19 +37,19 @@ func TestFSRenameFile(t *testing.T) {
}
defer os.RemoveAll(path)
if err = fsMkdir(pathJoin(path, "testvolume1")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "testvolume1")); err != nil {
t.Fatal(err)
}
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
t.Fatal(err)
}
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound {
if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound {
t.Fatal(err)
}
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong {
if err = fsRenameFile(context.Background(), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
}
@ -63,30 +64,30 @@ func TestFSStats(t *testing.T) {
// Setup test environment.
if err = fsMkdir(""); errors.Cause(err) != errInvalidArgument {
if err = fsMkdir(context.Background(), ""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
if err = fsMkdir(context.Background(), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err)
}
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
reader.Seek(0, 0)
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists {
t.Fatal("Unexpected error", err)
}
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
@ -169,12 +170,12 @@ func TestFSStats(t *testing.T) {
for i, testCase := range testCases {
if testCase.srcPath != "" {
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol,
if _, err := fsStatFile(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol,
testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
} else {
if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr {
if _, err := fsStatVolume(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
@ -189,20 +190,20 @@ func TestFSCreateAndOpen(t *testing.T) {
}
defer os.RemoveAll(path)
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
if _, err = fsCreateFile("", nil, nil, 0); errors.Cause(err) != errInvalidArgument {
if _, err = fsCreateFile(context.Background(), "", nil, nil, 0); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
if _, _, err = fsOpenFile("", -1); errors.Cause(err) != errInvalidArgument {
if _, _, err = fsOpenFile(context.Background(), "", -1); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err)
}
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
@ -230,18 +231,18 @@ func TestFSCreateAndOpen(t *testing.T) {
}
for i, testCase := range testCases {
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
_, err = fsCreateFile(context.Background(), pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
_, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
_, _, err = fsOpenFile(context.Background(), pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
// Attempt to open a directory.
if _, _, err = fsOpenFile(pathJoin(path), 0); errors.Cause(err) != errIsNotRegular {
if _, _, err = fsOpenFile(context.Background(), pathJoin(path), 0); errors.Cause(err) != errIsNotRegular {
t.Fatal("Unexpected error", err)
}
}
@ -255,20 +256,20 @@ func TestFSDeletes(t *testing.T) {
defer os.RemoveAll(path)
// Setup test environment.
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
var buf = make([]byte, 4096)
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
reader.Seek(0, io.SeekStart)
// folder is not empty
err = fsMkdir(pathJoin(path, "success-vol", "not-empty"))
err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "not-empty"))
if err != nil {
t.Fatal(err)
}
@ -278,10 +279,10 @@ func TestFSDeletes(t *testing.T) {
}
// recursive
if err = fsMkdir(pathJoin(path, "success-vol", "parent")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "parent")); err != nil {
t.Fatal(err)
}
if err = fsMkdir(pathJoin(path, "success-vol", "parent", "dir")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "parent", "dir")); err != nil {
t.Fatal(err)
}
@ -343,7 +344,7 @@ func TestFSDeletes(t *testing.T) {
}
for i, testCase := range testCases {
if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
if err = fsDeleteFile(context.Background(), testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
@ -358,7 +359,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
defer os.RemoveAll(path)
// Setup test environment.
if err = fsMkdir(pathJoin(path, "benchmark")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "benchmark")); err != nil {
b.Fatalf("Unable to create directory, %s", err)
}
@ -375,7 +376,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
}
b.StartTimer()
err = fsDeleteFile(benchDir, filename)
err = fsDeleteFile(context.Background(), benchDir, filename)
if err != nil {
b.Fatal(err)
}
@ -392,18 +393,18 @@ func TestFSRemoves(t *testing.T) {
defer os.RemoveAll(path)
// Setup test environment.
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
reader.Seek(0, 0)
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Seek back.
@ -477,25 +478,25 @@ func TestFSRemoves(t *testing.T) {
for i, testCase := range testCases {
if testCase.srcPath != "" {
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
if err = fsRemoveFile(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
} else {
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
if err = fsRemoveDir(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Error(err)
}
}
}
if err = fsRemoveAll(pathJoin(path, "success-vol")); err != nil {
if err = fsRemoveAll(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatal(err)
}
if err = fsRemoveAll(""); errors.Cause(err) != errInvalidArgument {
if err = fsRemoveAll(context.Background(), ""); errors.Cause(err) != errInvalidArgument {
t.Fatal(err)
}
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong {
if err = fsRemoveAll(context.Background(), "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong {
t.Fatal(err)
}
}
@ -509,14 +510,14 @@ func TestFSRemoveMeta(t *testing.T) {
defer os.RemoveAll(fsPath)
// Setup test environment.
if err = fsMkdir(pathJoin(fsPath, "success-vol")); err != nil {
if err = fsMkdir(context.Background(), pathJoin(fsPath, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
filePath := pathJoin(fsPath, "success-vol", "success-file")
var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(filePath, reader, nil, 0); err != nil {
if _, err = fsCreateFile(context.Background(), filePath, reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
@ -535,7 +536,7 @@ func TestFSRemoveMeta(t *testing.T) {
t.Fatal(tmpErr)
}
if err := fsRemoveMeta(fsPath, filePath, tmpDir); err != nil {
if err := fsRemoveMeta(context.Background(), fsPath, filePath, tmpDir); err != nil {
t.Fatalf("Unable to remove file, %s", err)
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"encoding/hex"
"encoding/json"
"io"
@ -25,7 +26,7 @@ import (
pathutil "path"
"strings"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson"
@ -237,20 +238,23 @@ func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
return partsArray
}
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64, err error) {
var fsMetaBuf []byte
fi, err := lk.Stat()
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, errors.Trace(err)
logger.LogIf(ctx, err)
return 0, err
}
if len(fsMetaBuf) == 0 {
return 0, errors.Trace(io.EOF)
logger.LogIf(ctx, io.EOF)
return 0, io.EOF
}
// obtain version.
@ -259,7 +263,9 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
// Verify if the format is valid, return corrupted format
// for unrecognized formats.
if !isFSMetaValid(m.Version) {
return 0, errors.Trace(errCorruptedFormat)
logger.GetReqInfo(ctx).AppendTags("file", lk.Name())
logger.LogIf(ctx, errCorruptedFormat)
return 0, errCorruptedFormat
}
// obtain parts information

View file

@ -70,7 +70,7 @@ func TestReadFSMetadata(t *testing.T) {
// Regular fs metadata reading, no errors expected
fsMeta := fsMetaV1{}
if _, err = fsMeta.ReadFrom(rlk.LockedFile); err != nil {
if _, err = fsMeta.ReadFrom(context.Background(), rlk.LockedFile); err != nil {
t.Fatal("Unexpected error ", err)
}
}
@ -105,7 +105,7 @@ func TestWriteFSMetadata(t *testing.T) {
// FS metadata reading, no errors expected (healthy disk)
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(rlk.LockedFile)
_, err = fsMeta.ReadFrom(context.Background(), rlk.LockedFile)
if err != nil {
t.Fatal("Unexpected error ", err)
}

View file

@ -29,6 +29,7 @@ import (
"strings"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
mioutil "github.com/minio/minio/pkg/ioutil"
@ -64,8 +65,9 @@ func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, e
}
// Appends parts to an appendFile sequentially.
func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) {
fs.appendFileMapMu.Lock()
logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
file := fs.appendFileMap[uploadID]
if file == nil {
file = &fsAppendFile{
@ -84,7 +86,8 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
entries, err := readDir(uploadIDDir)
if err != nil {
errorIf(err, "error reading directory %s", uploadIDDir)
logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir)
logger.LogIf(ctx, err)
return
}
sort.Strings(entries)
@ -95,7 +98,8 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
}
partNumber, etag, err := fs.decodePartFile(entry)
if err != nil {
errorIf(err, "unable to split the file name into partNumber and etag: %s", entry)
logger.GetReqInfo(ctx).AppendTags("entry", entry)
logger.LogIf(ctx, err)
return
}
if partNumber < nextPartNumber {
@ -110,7 +114,9 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
partPath := pathJoin(uploadIDDir, entry)
err = mioutil.AppendFile(file.filePath, partPath)
if err != nil {
errorIf(err, "Unable to append %s to %s", partPath, file.filePath)
reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath)
reqInfo.AppendTags("filepath", file.filePath)
logger.LogIf(ctx, err)
return
}
@ -122,12 +128,12 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
// ListMultipartUploads - lists all the uploadIDs for the specified object.
// We do not support prefix based listing.
func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return result, toObjectErr(errors.Trace(err))
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return result, toObjectErr(err)
}
if _, err := fs.statBucketDir(bucket); err != nil {
return result, toObjectErr(errors.Trace(err), bucket)
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return result, toObjectErr(err, bucket)
}
result.MaxUploads = maxUploads
@ -143,7 +149,8 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
result.IsTruncated = false
return result, nil
}
return result, toObjectErr(errors.Trace(err))
logger.LogIf(ctx, err)
return result, toObjectErr(err)
}
// S3 spec says uploaIDs should be sorted based on initiated time. ModTime of fs.json
@ -151,7 +158,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
var uploads []MultipartInfo
for _, uploadID := range uploadIDs {
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile)
fi, err := fsStatFile(metaFilePath)
fi, err := fsStatFile(ctx, metaFilePath)
if err != nil {
return result, toObjectErr(err, bucket, object)
}
@ -204,11 +211,11 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
//
// Implements S3 compatible initiate multipart API.
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
if err := checkNewMultipartArgs(bucket, object, fs); err != nil {
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket)
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return "", toObjectErr(err, bucket)
}
@ -217,7 +224,8 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
err := mkdirAll(uploadIDDir, 0755)
if err != nil {
return "", errors.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
// Initialize fs.json values.
@ -226,11 +234,13 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil {
return "", errors.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
return uploadID, nil
@ -242,22 +252,22 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
return pi, toObjectErr(errors.Trace(err))
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
return pi, toObjectErr(err)
}
// Initialize pipe.
go func() {
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
return
}
// Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}()
@ -275,26 +285,27 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return pi, toObjectErr(errors.Trace(err), bucket)
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
return pi, toObjectErr(err, bucket)
}
if _, err := fs.statBucketDir(bucket); err != nil {
return pi, toObjectErr(errors.Trace(err), bucket)
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return pi, toObjectErr(err, bucket)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(errors.Trace(errInvalidArgument))
logger.LogIf(ctx, errInvalidArgument)
return pi, toObjectErr(errInvalidArgument)
}
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
return pi, InvalidUploadID{UploadID: uploadID}
}
return pi, toObjectErr(err, bucket, object)
}
@ -306,23 +317,23 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
buf := make([]byte, bufSize)
tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID))
bytesWritten, err := fsCreateFile(tmpPartPath, data, buf, data.Size())
bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, buf, data.Size())
if err != nil {
fsRemoveFile(tmpPartPath)
fsRemoveFile(ctx, tmpPartPath)
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(tmpPartPath)
return pi, errors.Trace(IncompleteBody{})
fsRemoveFile(ctx, tmpPartPath)
return pi, IncompleteBody{}
}
// Delete temporary part in case of failure. If
// PutObjectPart succeeds then there would be nothing to
// delete in which case we just ignore the error.
defer fsRemoveFile(tmpPartPath)
defer fsRemoveFile(ctx, tmpPartPath)
etag := hex.EncodeToString(data.MD5Current())
if etag == "" {
@ -330,13 +341,13 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
}
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag))
if err = fsRenameFile(tmpPartPath, partPath); err != nil {
if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
go fs.backgroundAppend(bucket, object, uploadID)
go fs.backgroundAppend(ctx, bucket, object, uploadID)
fi, err := fsStatFile(partPath)
fi, err := fsStatFile(ctx, partPath)
if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
@ -356,8 +367,8 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, fs); err != nil {
return result, toObjectErr(errors.Trace(err))
if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
return result, toObjectErr(err)
}
result.Bucket = bucket
result.Object = object
@ -366,22 +377,23 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
result.PartNumberMarker = partNumberMarker
// Check if bucket exists
if _, err := fs.statBucketDir(bucket); err != nil {
return result, toObjectErr(errors.Trace(err), bucket)
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return result, toObjectErr(err, bucket)
}
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return result, errors.Trace(InvalidUploadID{UploadID: uploadID})
return result, InvalidUploadID{UploadID: uploadID}
}
return result, toObjectErr(errors.Trace(err), bucket, object)
return result, toObjectErr(err, bucket, object)
}
entries, err := readDir(uploadIDDir)
if err != nil {
return result, toObjectErr(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return result, toObjectErr(err, bucket)
}
partsMap := make(map[int]string)
@ -391,20 +403,21 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
}
partNumber, etag1, derr := fs.decodePartFile(entry)
if derr != nil {
return result, toObjectErr(errors.Trace(derr))
logger.LogIf(ctx, derr)
return result, toObjectErr(derr)
}
etag2, ok := partsMap[partNumber]
if !ok {
partsMap[partNumber] = etag1
continue
}
stat1, serr := fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag1)))
stat1, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag1)))
if serr != nil {
return result, toObjectErr(errors.Trace(serr))
return result, toObjectErr(serr)
}
stat2, serr := fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag2)))
stat2, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag2)))
if serr != nil {
return result, toObjectErr(errors.Trace(serr))
return result, toObjectErr(serr)
}
if stat1.ModTime().After(stat2.ModTime()) {
partsMap[partNumber] = etag1
@ -443,9 +456,9 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
}
for i, part := range result.Parts {
var stat os.FileInfo
stat, err = fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)))
stat, err = fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)))
if err != nil {
return result, toObjectErr(errors.Trace(err))
return result, toObjectErr(err)
}
result.Parts[i].LastModified = stat.ModTime()
result.Parts[i].Size = stat.Size()
@ -453,7 +466,8 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
result.UserDefined = parseFSMetaMap(fsMetaBytes)
@ -467,31 +481,31 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
//
// Implements S3 compatible Complete multipart API.
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil {
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
return oi, toObjectErr(err)
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) {
return oi, toObjectErr(errFileAccessDenied, bucket, object)
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return oi, toObjectErr(err, bucket)
}
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
return oi, InvalidUploadID{UploadID: uploadID}
}
return oi, toObjectErr(err, bucket, object)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts)
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
if err != nil {
return oi, err
}
@ -507,12 +521,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
for i, part := range parts {
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
var fi os.FileInfo
fi, err = fsStatFile(partPath)
fi, err = fsStatFile(ctx, partPath)
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return oi, errors.Trace(InvalidPart{})
return oi, InvalidPart{}
}
return oi, errors.Trace(err)
return oi, err
}
if partSize == -1 {
partSize = fi.Size()
@ -530,11 +544,13 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// All parts except the last part has to be atleast 5MB.
if !isMinAllowedPartSize(fi.Size()) {
return oi, errors.Trace(PartTooSmall{
err = PartTooSmall{
PartNumber: part.PartNumber,
PartSize: fi.Size(),
PartETag: part.ETag,
})
}
logger.LogIf(ctx, err)
return oi, err
}
// TODO: Make necessary changes in future as explained in the below comment.
@ -545,7 +561,8 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// CompleteMultipartUpload we already have the full file available which can be
// renamed to the main name-space.
if partSize != fi.Size() {
return oi, errors.Trace(PartsSizeUnequal{})
logger.LogIf(ctx, PartsSizeUnequal{})
return oi, PartsSizeUnequal{}
}
}
@ -557,7 +574,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet.
// 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending
// from the beginning
fs.backgroundAppend(bucket, object, uploadID)
fs.backgroundAppend(ctx, bucket, object, uploadID)
fs.appendFileMapMu.Lock()
file := fs.appendFileMap[uploadID]
@ -585,12 +602,13 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
if appendFallback {
fsRemoveFile(file.filePath)
fsRemoveFile(ctx, file.filePath)
for _, part := range parts {
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
err = mioutil.AppendFile(appendFilePath, partPath)
if err != nil {
return oi, toObjectErr(errors.Trace(err))
logger.LogIf(ctx, err)
return oi, toObjectErr(err)
}
}
}
@ -604,18 +622,21 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
defer metaFile.Close()
// Read saved fs metadata for ongoing multipart.
fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
err = json.Unmarshal(fsMetaBuf, &fsMeta)
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
// Save additional metadata.
if len(fsMeta.Meta) == 0 {
@ -623,24 +644,26 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
fsMeta.Meta["etag"] = s3MD5
if _, err = fsMeta.WriteTo(metaFile); err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
// Deny if WORM is enabled
if globalWORMEnabled {
if _, err = fsStatFile(pathJoin(fs.fsPath, bucket, object)); err == nil {
return ObjectInfo{}, errors.Trace(ObjectAlreadyExists{Bucket: bucket, Object: object})
if _, err = fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil {
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
}
}
err = fsRenameFile(appendFilePath, pathJoin(fs.fsPath, bucket, object))
err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
fsRemoveAll(uploadIDDir)
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
fsRemoveAll(ctx, uploadIDDir)
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
return oi, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
@ -659,12 +682,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// no affect and further requests to the same uploadID would not be
// honored.
func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
if err := checkAbortMultipartArgs(bucket, object, fs); err != nil {
if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil {
return err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return toObjectErr(errors.Trace(err), bucket)
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket)
}
fs.appendFileMapMu.Lock()
@ -673,16 +696,16 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return errors.Trace(InvalidUploadID{UploadID: uploadID})
return InvalidUploadID{UploadID: uploadID}
}
return toObjectErr(errors.Trace(err), bucket, object)
return toObjectErr(err, bucket, object)
}
// Ignore the error returned as Windows fails to remove directory if a file in it
// is Open()ed by the backgroundAppend()
fsRemoveAll(uploadIDDir)
fsRemoveAll(ctx, uploadIDDir)
return nil
}
@ -690,7 +713,7 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
// Removes multipart uploads if any older than `expiry` duration
// on all buckets for every `cleanupInterval`, this function is
// blocking and should be run in a go-routine.
func (fs *FSObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
func (fs *FSObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
ticker := time.NewTicker(cleanupInterval)
for {
select {
@ -710,12 +733,12 @@ func (fs *FSObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.D
continue
}
for _, uploadID := range uploadIDs {
fi, err := fsStatDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
if err != nil {
continue
}
if now.Sub(fi.ModTime()) > expiry {
fsRemoveAll(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
}
}
}

View file

@ -49,7 +49,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
t.Fatal("Unexpected err: ", err)
}
go fs.cleanupStaleMultipartUploads(20*time.Millisecond, 0, globalServiceDoneCh)
go fs.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, globalServiceDoneCh)
// Wait for 40ms such that - we have given enough time for
// cleanup routine to kick in.

View file

@ -17,10 +17,12 @@
package cmd
import (
"context"
"os"
pathutil "path"
"sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock"
)
@ -48,7 +50,9 @@ func (fsi *fsIOPool) lookupToRead(path string) (*lock.RLockedFile, bool) {
// If the file is closed and not removed from map is a bug.
if rlkFile.IsClosed() {
// Log this as an error.
errorIf(errUnexpected, "Unexpected entry found on the map %s", path)
reqInfo := (&logger.ReqInfo{}).AppendTags("path", path)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errUnexpected)
// Purge the cached lock path from map.
delete(fsi.readersMap, path)

View file

@ -32,6 +32,7 @@ import (
"time"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
@ -97,6 +98,7 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
// NewFSObjectLayer - initialize new fs object layer.
func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
ctx := context.Background()
if fsPath == "" {
return nil, errInvalidArgument
}
@ -142,7 +144,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
}
// Initialize `format.json`, this function also returns.
rlk, err := initFormatFS(fsPath)
rlk, err := initFormatFS(ctx, fsPath)
if err != nil {
return nil, err
}
@ -177,7 +179,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
return nil, fmt.Errorf("Unable to initialize event notification. %s", err)
}
go fs.cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go fs.cleanupStaleMultipartUploads(ctx, globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
// Return successfully initialized object layer.
return fs, nil
@ -188,13 +190,14 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
fs.fsFormatRlk.Close()
// Cleanup and delete tmp uuid.
return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
return fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
}
// StorageInfo - returns underlying storage statistics.
func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
info, err := getDiskInfo((fs.fsPath))
errorIf(err, "Unable to get disk info %#v", fs.fsPath)
logger.GetReqInfo(ctx).AppendTags("path", fs.fsPath)
logger.LogIf(ctx, err)
storageInfo := StorageInfo{
Total: info.Total,
Free: info.Free,
@ -220,22 +223,24 @@ func (fs *FSObjects) ClearLocks(ctx context.Context, info []VolumeLockInfo) erro
// getBucketDir - will convert incoming bucket names to
// corresponding valid bucket names on the backend in a platform
// compatible way for all operating systems.
func (fs *FSObjects) getBucketDir(bucket string) (string, error) {
func (fs *FSObjects) getBucketDir(ctx context.Context, bucket string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", errors.Trace(BucketNameInvalid{Bucket: bucket})
err := BucketNameInvalid{Bucket: bucket}
logger.LogIf(ctx, err)
return "", err
}
bucketDir := pathJoin(fs.fsPath, bucket)
return bucketDir, nil
}
func (fs *FSObjects) statBucketDir(bucket string) (os.FileInfo, error) {
bucketDir, err := fs.getBucketDir(bucket)
func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileInfo, error) {
bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil {
return nil, err
}
st, err := fsStatVolume(bucketDir)
st, err := fsStatVolume(ctx, bucketDir)
if err != nil {
return nil, err
}
@ -250,12 +255,12 @@ func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
return err
}
defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket)
bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil {
return toObjectErr(err, bucket)
}
if err = fsMkdir(bucketDir); err != nil {
if err = fsMkdir(ctx, bucketDir); err != nil {
return toObjectErr(err, bucket)
}
@ -269,7 +274,7 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
return bi, e
}
defer bucketLock.RUnlock()
st, err := fs.statBucketDir(bucket)
st, err := fs.statBucketDir(ctx, bucket)
if err != nil {
return bi, toObjectErr(err, bucket)
}
@ -285,12 +290,14 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
// ListBuckets - list all s3 compatible buckets (directories) at fsPath.
func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
if err := checkPathLength(fs.fsPath); err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
var bucketInfos []BucketInfo
entries, err := readDir((fs.fsPath))
if err != nil {
return nil, toObjectErr(errors.Trace(errDiskNotFound))
logger.LogIf(ctx, errDiskNotFound)
return nil, toObjectErr(errDiskNotFound)
}
for _, entry := range entries {
@ -299,7 +306,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
continue
}
var fi os.FileInfo
fi, err = fsStatVolume(pathJoin(fs.fsPath, entry))
fi, err = fsStatVolume(ctx, pathJoin(fs.fsPath, entry))
// There seems like no practical reason to check for errors
// at this point, if there are indeed errors we can simply
// just ignore such buckets and list only those which
@ -327,27 +334,28 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return err
}
defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket)
bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil {
return toObjectErr(err, bucket)
}
// Attempt to delete regular bucket.
if err = fsRemoveDir(bucketDir); err != nil {
if err = fsRemoveDir(ctx, bucketDir); err != nil {
return toObjectErr(err, bucket)
}
// Cleanup all the bucket metadata.
minioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
if err = fsRemoveAll(minioMetadataBucketDir); err != nil {
if err = fsRemoveAll(ctx, minioMetadataBucketDir); err != nil {
return toObjectErr(err, bucket)
}
// Delete all bucket metadata.
deleteBucketMetadata(bucket, fs)
deleteBucketMetadata(ctx, bucket, fs)
return nil
}
@ -380,7 +388,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
}
defer objectSRLock.RUnlock()
}
if _, err := fs.statBucketDir(srcBucket); err != nil {
if _, err := fs.statBucketDir(ctx, srcBucket); err != nil {
return oi, toObjectErr(err, srcBucket)
}
@ -391,14 +399,15 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fs.metaJSONFile)
wlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil {
return oi, toObjectErr(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
// Save objects' metadata in `fs.json`.
fsMeta := newFSMetaV1()
if _, err = fsMeta.ReadFrom(wlk); err != nil {
if _, err = fsMeta.ReadFrom(ctx, wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
@ -409,7 +418,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
}
// Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject))
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, srcBucket, srcObject))
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
@ -419,20 +428,20 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
}
go func() {
if gerr := fs.getObject(srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, !cpSrcDstSame); gerr != nil {
if gerr := fs.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, !cpSrcDstSame); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
}
return
}
// Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}()
objInfo, err := fs.putObject(dstBucket, dstObject, srcInfo.Reader, srcInfo.UserDefined)
objInfo, err := fs.putObject(ctx, dstBucket, dstObject, srcInfo.Reader, srcInfo.UserDefined)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
@ -447,39 +456,43 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) {
if err = checkGetObjArgs(bucket, object); err != nil {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return err
}
// Lock the object before reading.
objectLock := fs.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return err
}
defer objectLock.RUnlock()
return fs.getObject(bucket, object, offset, length, writer, etag, true)
return fs.getObject(ctx, bucket, object, offset, length, writer, etag, true)
}
// getObject - wrapper for GetObject
func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
if _, err = fs.statBucketDir(bucket); err != nil {
func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket)
}
// Offset cannot be negative.
if offset < 0 {
return toObjectErr(errors.Trace(errUnexpected), bucket, object)
logger.LogIf(ctx, errUnexpected)
return toObjectErr(errUnexpected, bucket, object)
}
// Writer cannot be nil.
if writer == nil {
return toObjectErr(errors.Trace(errUnexpected), bucket, object)
logger.LogIf(ctx, errUnexpected)
return toObjectErr(errUnexpected, bucket, object)
}
// If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) {
_, err = writer.Write([]byte(""))
return toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
if bucket != minioMetaBucket {
@ -487,25 +500,27 @@ func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64
if lock {
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
return toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
}
}
if etag != "" {
objEtag, perr := fs.getObjectETag(bucket, object, lock)
objEtag, perr := fs.getObjectETag(ctx, bucket, object, lock)
if perr != nil {
return toObjectErr(errors.Trace(perr), bucket, object)
return toObjectErr(perr, bucket, object)
}
if objEtag != etag {
return toObjectErr(errors.Trace(InvalidETag{}), bucket, object)
logger.LogIf(ctx, InvalidETag{})
return toObjectErr(InvalidETag{}, bucket, object)
}
}
// Read the object, doesn't exist returns an s3 compatible error.
fsObjPath := pathJoin(fs.fsPath, bucket, object)
reader, size, err := fsOpenFile(fsObjPath, offset)
reader, size, err := fsOpenFile(ctx, fsObjPath, offset)
if err != nil {
return toObjectErr(err, bucket, object)
}
@ -523,21 +538,23 @@ func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64
// Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size {
return errors.Trace(InvalidRange{offset, length, size})
err = InvalidRange{offset, length, size}
logger.LogIf(ctx, err)
return err
}
// Allocate a staging buffer.
buf := make([]byte, int(bufSize))
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
return toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{}
fi, err := fsStatDir(pathJoin(fs.fsPath, bucket, object))
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil && errors.Cause(err) != errFileAccessDenied {
return oi, toObjectErr(err, bucket, object)
}
@ -547,6 +564,7 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
if hasSuffix(object, slashSeparator) {
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
logger.LogIf(ctx, errFileNotFound)
return oi, toObjectErr(errFileNotFound, bucket, object)
}
@ -558,7 +576,7 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
if err == nil {
// Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath)
if _, rerr := fsMeta.ReadFrom(rlk.LockedFile); rerr != nil {
if _, rerr := fsMeta.ReadFrom(ctx, rlk.LockedFile); rerr != nil {
// `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
@ -570,11 +588,12 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return oi, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
}
// Stat the file to get file size.
fi, err = fsStatFile(pathJoin(fs.fsPath, bucket, object))
fi, err = fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
@ -591,27 +610,27 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string) (
}
defer objectLock.RUnlock()
if err := checkGetObjArgs(bucket, object); err != nil {
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return oi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return oi, toObjectErr(err, bucket)
}
return fs.getObjectInfo(bucket, object)
return fs.getObjectInfo(ctx, bucket, object)
}
// This function does the following check, suppose
// object is "a/b/c/d", stat makes sure that objects ""a/b/c""
// "a/b" and "a" do not exist.
func (fs *FSObjects) parentDirIsObject(bucket, parent string) bool {
func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
var isParentDirObject func(string) bool
isParentDirObject = func(p string) bool {
if p == "." || p == "/" {
return false
}
if _, err := fsStatFile(pathJoin(fs.fsPath, bucket, p)); err == nil {
if _, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, p)); err == nil {
// If there is already a file at prefix "p", return true.
return true
}
@ -627,20 +646,21 @@ func (fs *FSObjects) parentDirIsObject(bucket, parent string) bool {
// Additionally writes `fs.json` which carries the necessary metadata
// for future object operations.
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
if err := checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
if err := checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Lock the object.
objectLock := fs.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return objInfo, err
}
defer objectLock.Unlock()
return fs.putObject(bucket, object, data, metadata)
return fs.putObject(ctx, bucket, object, data, metadata)
}
// putObject - wrapper for PutObject
func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
// No metadata is set, allocate a new one.
meta := make(map[string]string)
for k, v := range metadata {
@ -649,7 +669,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
var err error
// Validate if bucket name is valid and exists.
if _, err = fs.statBucketDir(bucket); err != nil {
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
}
@ -661,31 +681,35 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// and return success.
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
logger.LogIf(ctx, errFileAccessDenied)
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
var fi os.FileInfo
if fi, err = fsStatDir(pathJoin(fs.fsPath, bucket, object)); err != nil {
if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
logger.LogIf(ctx, errFileAccessDenied)
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
}
var wlk *lock.LockedFile
@ -695,7 +719,8 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@ -703,7 +728,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// Remove meta file when PutObject encounters any error
if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir)
fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
}
}()
}
@ -721,9 +746,9 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
buf := make([]byte, int(bufSize))
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size())
if err != nil {
fsRemoveFile(fsTmpObjPath)
fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -732,24 +757,24 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, errors.Trace(IncompleteBody{})
fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, IncompleteBody{}
}
// Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be
// nothing to delete.
defer fsRemoveFile(fsTmpObjPath)
defer fsRemoveFile(ctx, fsTmpObjPath)
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
// Deny if WORM is enabled
if globalWORMEnabled {
if _, err = fsStatFile(fsNSObjPath); err == nil {
return ObjectInfo{}, errors.Trace(ObjectAlreadyExists{Bucket: bucket, Object: object})
if _, err = fsStatFile(ctx, fsNSObjPath); err == nil {
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
}
}
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -761,7 +786,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
}
// Stat the file to fetch timestamp, size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -780,11 +805,11 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
}
defer objectLock.Unlock()
if err := checkDelObjArgs(bucket, object); err != nil {
if err := checkDelObjArgs(ctx, bucket, object); err != nil {
return err
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket)
}
@ -797,18 +822,19 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
defer rwlk.Close()
}
if lerr != nil && lerr != errFileNotFound {
return toObjectErr(errors.Trace(lerr), bucket, object)
logger.LogIf(ctx, lerr)
return toObjectErr(lerr, bucket, object)
}
}
// Delete the object.
if err := fsDeleteFile(pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil {
if err := fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil {
return toObjectErr(err, bucket, object)
}
if bucket != minioMetaBucket {
// Delete the metadata object.
err := fsDeleteFile(minioMetaBucketDir, fsMetaPath)
err := fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath)
if err != nil && errors.Cause(err) != errFileNotFound {
return toObjectErr(err, bucket, object)
}
@ -836,7 +862,7 @@ func (fs *FSObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
// getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk.
func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, error) {
func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lock bool) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile)
var reader io.Reader
@ -848,7 +874,8 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return "", toObjectErr(errors.Trace(err), bucket, entry)
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
// If file is not found, we don't need to proceed forward.
@ -862,16 +889,17 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
// Fetch the size of the underlying file.
fi, err = rlk.LockedFile.Stat()
if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry)
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
size = fi.Size()
reader = io.NewSectionReader(rlk.LockedFile, 0, fi.Size())
} else {
var err error
reader, size, err = fsOpenFile(fsMetaPath, 0)
reader, size, err = fsOpenFile(ctx, fsMetaPath, 0)
if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry)
return "", toObjectErr(err, bucket, entry)
}
}
@ -884,12 +912,14 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
fsMetaBuf, err := ioutil.ReadAll(reader)
if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry)
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
// Check if FS metadata is valid, if not return error.
if !isFSMetaValid(parseFSVersion(fsMetaBuf)) {
return "", toObjectErr(errors.Trace(errCorruptedFormat), bucket, entry)
logger.LogIf(ctx, errCorruptedFormat)
return "", toObjectErr(errCorruptedFormat, bucket, entry)
}
return extractETag(parseFSMetaMap(fsMetaBuf)), nil
@ -898,7 +928,7 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests.
func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
if err := checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, fs); err != nil {
return loi, err
}
// Marker is set validate pre-condition.
@ -908,7 +938,7 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
return ListObjectsInfo{}, e
}
}
if _, err := fs.statBucketDir(bucket); err != nil {
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return loi, err
}
@ -942,10 +972,11 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
// Protect the entry from concurrent deletes, or renames.
objectLock := fs.nsMutex.NewNSLock(bucket, entry)
if err = objectLock.GetRLock(globalListingTimeout); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, err
}
defer objectLock.RUnlock()
return fs.getObjectInfo(bucket, entry)
return fs.getObjectInfo(ctx, bucket, entry)
}
heal := false // true only for xl.ListObjectsHeal()
@ -959,7 +990,7 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
return !hasSuffix(object, slashSeparator)
}
listDir := fs.listDirFactory(isLeaf)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
}
var objInfos []ObjectInfo
@ -984,7 +1015,6 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
}
objInfo, err := entryToObjectInfo(walkResult.entry)
if err != nil {
errorIf(err, "Unable to fetch object info for %s", walkResult.entry)
return loi, nil
}
nextMarker = objInfo.Name
@ -1018,34 +1048,39 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
// HealFormat - no-op for fs, Valid only for XL.
func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{}
}
// HealObject - no-op for fs. Valid only for XL.
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool) (
res madmin.HealResultItem, err error) {
return res, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return res, NotImplemented{}
}
// HealBucket - no-op for fs, Valid only for XL.
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem,
error) {
return nil, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs *FSObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return loi, NotImplemented{}
}
// ListBucketsHeal - list all buckets to be healed. Valid only for XL
func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
return []BucketInfo{}, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return []BucketInfo{}, NotImplemented{}
}
// SetBucketPolicy sets policy on bucket
func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
return persistAndNotifyBucketPolicyChange(bucket, false, policy, fs)
return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, fs)
}
// GetBucketPolicy will get policy on bucket
@ -1059,7 +1094,7 @@ func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy
// DeleteBucketPolicy deletes all policies on bucket
func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, fs)
return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, fs)
}
// ListObjectsV2 lists all blobs in bucket filtered by prefix

View file

@ -91,7 +91,7 @@ func TestFSParentDirIsObject(t *testing.T) {
},
}
for i, testCase := range testCases {
gotValue := fs.parentDirIsObject(bucketName, testCase.objectName)
gotValue := fs.parentDirIsObject(context.Background(), bucketName, testCase.objectName)
if testCase.parentIsObject != gotValue {
t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject)
}

View file

@ -19,7 +19,6 @@ package cmd
import (
"net/http"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio-go"
@ -31,12 +30,6 @@ var (
// MustGetUUID function alias.
MustGetUUID = mustGetUUID
// ErrorIf provides errorIf function alias.
ErrorIf = errorIf
// FatalIf provides fatalIf function alias.
FatalIf = fatalIf
)
// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors.
@ -262,16 +255,6 @@ func ErrorRespToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
if len(params) >= 1 {
@ -282,15 +265,14 @@ func ErrorRespToObjectError(err error, params ...string) error {
}
if isNetworkOrHostDown(err) {
e.Cause = BackendDown{}
return e
return BackendDown{}
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return e
return err
}
switch minioErr.Code {
@ -325,6 +307,5 @@ func ErrorRespToObjectError(err error, params ...string) error {
err = PartTooSmall{}
}
e.Cause = err
return e
return err
}

View file

@ -17,6 +17,8 @@
package cmd
import (
"context"
"errors"
"fmt"
"net/url"
"os"
@ -28,7 +30,7 @@ import (
"github.com/gorilla/mux"
"github.com/minio/cli"
miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
var (
@ -100,10 +102,14 @@ func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
return nil
}
func init() {
logger.Init(GOPATH)
}
// StartGateway - handler for 'minio gateway <name>'.
func StartGateway(ctx *cli.Context, gw Gateway) {
if gw == nil {
fatalIf(errUnexpected, "Gateway implementation not initialized, exiting.")
logger.FatalIf(errUnexpected, "Gateway implementation not initialized, exiting.")
}
// Validate if we have access, secret set through environment.
@ -116,13 +122,13 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag {
log.EnableJSON()
logger.EnableJSON()
}
// Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag {
log.EnableQuiet()
logger.EnableQuiet()
}
// Fetch address option
@ -139,35 +145,34 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Validate if we have access, secret set through environment.
if !globalIsEnvCreds {
errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName)
reqInfo := (&logger.ReqInfo{}).AppendTags("gatewayName", gatewayName)
contxt := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(contxt, errors.New("Access and Secret keys should be set through ENVs for backend"))
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
// Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.")
logger.FatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize gateway config.
initConfig()
// Init the error tracing module.
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates.
var err error
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file")
logger.FatalIf(err, "Invalid SSL certificate file")
// Set system resources to maximum.
errorIf(setMaxResources(), "Unable to change resource limit")
logger.LogIf(context.Background(), setMaxResources())
initNSLock(false) // Enable local namespace lock.
// Initialize notification system.
globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{})
fatalIf(err, "Unable to initialize notification system.")
logger.FatalIf(err, "Unable to initialize notification system.")
newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
fatalIf(err, "Unable to initialize gateway layer")
logger.FatalIf(err, "Unable to initialize gateway layer")
router := mux.NewRouter().SkipClean(true)
@ -176,7 +181,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Register web router when its enabled.
if globalIsBrowserEnabled {
fatalIf(registerWebRouter(router), "Unable to configure web browser")
logger.FatalIf(registerWebRouter(router), "Unable to configure web browser")
}
// Add API router.
@ -204,7 +209,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Print a warning message if gateway is not ready for production before the startup banner.
if !gw.Production() {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
logger.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
}
// Print gateway startup message.

View file

@ -20,6 +20,8 @@ import (
"context"
"fmt"
"strings"
"github.com/minio/minio/cmd/logger"
)
// Prints the formatted startup message.
@ -54,12 +56,12 @@ func printGatewayCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
logger.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
logger.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if globalIsBrowserEnabled {
log.Println(colorBlue("\nBrowser Access:"))
log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
logger.Println(colorBlue("\nBrowser Access:"))
logger.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}
}

View file

@ -21,7 +21,7 @@ import (
"time"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
)
@ -31,105 +31,125 @@ type GatewayUnsupported struct{}
// ListMultipartUploads lists all multipart uploads.
func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return lmi, NotImplemented{}
}
// NewMultipartUpload upload object in multiple parts
func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return "", errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return "", NotImplemented{}
}
// CopyObjectPart copy part of object to uploadID for another object
func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return pi, NotImplemented{}
}
// PutObjectPart puts a part of object in bucket
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return pi, NotImplemented{}
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
return lpi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return lpi, NotImplemented{}
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
return errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
return oi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return oi, NotImplemented{}
}
// SetBucketPolicy sets policy on bucket
func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
return errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// GetBucketPolicy will get policy on bucket
func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bal policy.BucketAccessPolicy, err error) {
return bal, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return bal, NotImplemented{}
}
// DeleteBucketPolicy deletes all policies on bucket
func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// HealFormat - Not implemented stub
func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{}
}
// HealBucket - Not implemented stub
func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem, error) {
return nil, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// ListBucketsHeal - Not implemented stub
func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
return nil, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// HealObject - Not implemented stub
func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, dryRun bool) (h madmin.HealResultItem, e error) {
return h, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return h, NotImplemented{}
}
// ListObjectsV2 - Not implemented stub
func (a GatewayUnsupported) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return result, NotImplemented{}
}
// ListObjectsHeal - Not implemented stub
func (a GatewayUnsupported) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return loi, NotImplemented{}
}
// CopyObject copies a blob from source container to destination container.
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
return objInfo, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return objInfo, NotImplemented{}
}
// Locking operations
// ListLocks lists namespace locks held in object layer
func (a GatewayUnsupported) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
return []VolumeLockInfo{}, errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return []VolumeLockInfo{}, NotImplemented{}
}
// ClearLocks clears namespace locks held in object layer
func (a GatewayUnsupported) ClearLocks(ctx context.Context, info []VolumeLockInfo) error {
return errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// RefreshBucketPolicy refreshes cache policy with what's on disk.
func (a GatewayUnsupported) RefreshBucketPolicy(ctx context.Context, bucket string) error {
return errors.Trace(NotImplemented{})
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.

View file

@ -35,6 +35,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
@ -118,7 +119,7 @@ func azureGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Azure{host})
}
@ -181,11 +182,12 @@ func (g *Azure) Production() bool {
// copied into BlobProperties.
//
// Header names are canonicalized as in http.Header.
func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata,
func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) (storage.BlobMetadata,
storage.BlobProperties, error) {
for k := range s3Metadata {
if strings.Contains(k, "--") {
return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(minio.UnsupportedMetadata{})
logger.LogIf(ctx, minio.UnsupportedMetadata{})
return storage.BlobMetadata{}, storage.BlobProperties{}, minio.UnsupportedMetadata{}
}
}
@ -300,15 +302,6 @@ func azureToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
if len(params) >= 1 {
@ -322,7 +315,7 @@ func azureToObjectError(err error, params ...string) error {
if !ok {
// We don't interpret non Azure errors. As azure errors will
// have StatusCode to help to convert to object errors.
return e
return err
}
switch azureErr.Code {
@ -349,8 +342,7 @@ func azureToObjectError(err error, params ...string) error {
err = minio.BucketNameInvalid{Bucket: bucket}
}
}
e.Cause = err
return e
return err
}
// mustGetAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
@ -371,17 +363,23 @@ func mustGetAzureUploadID() string {
}
// checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(uploadID string) (err error) {
func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
if len(uploadID) != 16 {
return errors.Trace(minio.MalformedUploadID{
logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID,
})
return minio.MalformedUploadID{
UploadID: uploadID,
}
}
if _, err = hex.DecodeString(uploadID); err != nil {
return errors.Trace(minio.MalformedUploadID{
logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID,
})
return minio.MalformedUploadID{
UploadID: uploadID,
}
}
return nil
@ -438,7 +436,8 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
err := container.Create(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate,
})
return azureToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
}
// GetBucketInfo - Get bucket metadata..
@ -448,7 +447,8 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
// in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !minio.IsValidBucketName(bucket) {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
}
// Azure does not have an equivalent call, hence use
@ -457,7 +457,8 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
Prefix: bucket,
})
if err != nil {
return bi, azureToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return bi, azureToObjectError(err, bucket)
}
for _, container := range resp.Containers {
if container.Name == bucket {
@ -470,19 +471,22 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
} // else continue
}
}
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return bi, minio.BucketNotFound{Bucket: bucket}
}
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil {
return nil, azureToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return nil, azureToObjectError(err)
}
for _, container := range resp.Containers {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e != nil {
return nil, errors.Trace(e)
logger.LogIf(ctx, e)
return nil, e
}
buckets = append(buckets, minio.BucketInfo{
Name: container.Name,
@ -495,7 +499,9 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error {
container := a.client.GetContainerReference(bucket)
return azureToObjectError(errors.Trace(container.Delete(nil)), bucket)
err := container.Delete(nil)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
}
// ListObjects - lists all blobs on azure with in a container filtered by prefix
@ -512,7 +518,8 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
MaxResults: uint(maxKeys),
})
if err != nil {
return result, azureToObjectError(errors.Trace(err), bucket, prefix)
logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, prefix)
}
for _, object := range resp.Blobs {
@ -580,7 +587,8 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
// startOffset cannot be negative.
if startOffset < 0 {
return azureToObjectError(errors.Trace(minio.InvalidRange{}), bucket, object)
logger.LogIf(ctx, minio.InvalidRange{})
return azureToObjectError(minio.InvalidRange{}, bucket, object)
}
blobRange := &storage.BlobRange{Start: uint64(startOffset)}
@ -599,11 +607,13 @@ func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, sta
})
}
if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket, object)
}
_, err = io.Copy(writer, rc)
rc.Close()
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
@ -612,7 +622,8 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
@ -631,13 +642,14 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
// uses Azure equivalent CreateBlockBlobFromReader.
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata)
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
if err != nil {
return objInfo, azureToObjectError(err, bucket, object)
}
err = blob.CreateBlockBlobFromReader(data, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
return a.GetObjectInfo(ctx, bucket, object)
}
@ -647,19 +659,21 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, dat
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
azureMeta, props, err := s3MetaToAzureProperties(srcInfo.UserDefined)
azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
if err != nil {
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
destBlob.Metadata = azureMeta
err = destBlob.Copy(srcBlobURL, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
destBlob.Properties = props
err = destBlob.SetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
return a.GetObjectInfo(ctx, destBucket, destObject)
}
@ -670,7 +684,7 @@ func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string)
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err := blob.Delete(nil)
if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object)
return azureToObjectError(err, bucket, object)
}
return nil
}
@ -690,19 +704,21 @@ func getAzureMetadataObjectName(objectName, uploadID string) string {
return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName)))
}
func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID string) (err error) {
func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, objectName, uploadID string) (err error) {
blob := a.client.GetContainerReference(bucketName).GetBlobReference(
getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil)
err = azureToObjectError(errors.Trace(err), bucketName, objectName)
logger.LogIf(ctx, err)
err = azureToObjectError(err, bucketName, objectName)
oerr := minio.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
if errors.Cause(err) == oerr {
err = errors.Trace(minio.InvalidUploadID{
logger.LogIf(ctx, minio.InvalidUploadID{UploadID: uploadID})
err = minio.InvalidUploadID{
UploadID: uploadID,
})
}
}
return err
}
@ -714,13 +730,15 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
var jsonData []byte
if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil {
return "", errors.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
if err != nil {
return "", azureToObjectError(errors.Trace(err), bucket, metadataObject)
logger.LogIf(ctx, err)
return "", azureToObjectError(err, bucket, metadataObject)
}
return uploadID, nil
@ -728,11 +746,11 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return info, err
}
if err = checkAzureUploadID(uploadID); err != nil {
if err = checkAzureUploadID(ctx, uploadID); err != nil {
return info, err
}
@ -756,7 +774,8 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil {
return info, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return info, azureToObjectError(err, bucket, object)
}
subPartNumber++
}
@ -770,7 +789,7 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
// ListObjectParts - Use Azure equivalent GetBlockList.
func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, err
}
@ -787,7 +806,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
return result, nil
}
if err != nil {
return result, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, object)
}
// Build a sorted list of parts and return the requested entries.
partsMap := make(map[int]minio.PartInfo)
@ -796,7 +816,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
var parsedUploadID string
var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
}
if parsedUploadID != uploadID {
continue
@ -813,7 +834,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is.
return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
}
part.Size += block.Size
partsMap[partNumber] = part
@ -856,7 +878,7 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
// gets deleted after one week.
func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return err
}
@ -868,23 +890,25 @@ func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
metadataObject := getAzureMetadataObjectName(object, uploadID)
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return objInfo, err
}
if err = checkAzureUploadID(uploadID); err != nil {
if err = checkAzureUploadID(ctx, uploadID); err != nil {
return objInfo, err
}
var metadataReader io.Reader
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
if metadataReader, err = blob.Get(nil); err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, metadataObject)
}
var metadata azureMultipartMetadata
if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, metadataObject)
}
defer func() {
@ -894,13 +918,15 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
derr := blob.Delete(nil)
minio.ErrorIf(derr, "unable to remove meta data object for upload ID %s", uploadID)
logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
logger.LogIf(ctx, derr)
}()
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) {
@ -936,7 +962,8 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
var size int64
blocks, size, err = getBlocks(part.PartNumber, part.ETag)
if err != nil {
return objInfo, errors.Trace(err)
logger.LogIf(ctx, err)
return objInfo, err
}
allBlocks = append(allBlocks, blocks...)
@ -946,30 +973,39 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < azureS3MinPartSize {
return objInfo, errors.Trace(minio.PartTooSmall{
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
})
return objInfo, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
}
}
err = objBlob.PutBlockList(allBlocks, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
if len(metadata.Metadata) > 0 {
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(metadata.Metadata)
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, metadata.Metadata)
if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
err = objBlob.SetProperties(nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
err = objBlob.SetMetadata(nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
}
}
return a.GetObjectInfo(ctx, bucket, object)
@ -992,13 +1028,16 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer,
@ -1006,7 +1045,8 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic
}
container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil)
return azureToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
}
// GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy
@ -1015,15 +1055,18 @@ func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (poli
container := a.client.GetContainerReference(bucket)
perm, err := container.GetPermissions(nil)
if err != nil {
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, azureToObjectError(err, bucket)
}
switch perm.AccessType {
case storage.ContainerAccessTypePrivate:
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket}
case storage.ContainerAccessTypeContainer:
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default:
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(minio.NotImplemented{}))
logger.LogIf(ctx, minio.NotImplemented{})
return policy.BucketAccessPolicy{}, azureToObjectError(minio.NotImplemented{})
}
return policyInfo, nil
}
@ -1036,5 +1079,6 @@ func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) er
}
container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil)
return azureToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return azureToObjectError(err)
}

View file

@ -17,6 +17,7 @@
package azure
import (
"context"
"fmt"
"net/http"
"reflect"
@ -55,7 +56,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
"X_Amz_Matdesc": "{}",
"X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
meta, _, err := s3MetaToAzureProperties(headers)
meta, _, err := s3MetaToAzureProperties(context.Background(), headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
@ -65,7 +66,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
headers = map[string]string{
"invalid--meta": "value",
}
_, _, err = s3MetaToAzureProperties(headers)
_, _, err = s3MetaToAzureProperties(context.Background(), headers)
if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
@ -75,7 +76,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
headers = map[string]string{
"content-md5": "Dce7bmCX61zvxzP5QmfelQ==",
}
_, props, err := s3MetaToAzureProperties(headers)
_, props, err := s3MetaToAzureProperties(context.Background(), headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
@ -137,53 +138,46 @@ func TestAzureToObjectError(t *testing.T) {
nil, nil, "", "",
},
{
errors.Trace(fmt.Errorf("Non azure error")),
fmt.Errorf("Non azure error"),
fmt.Errorf("Non azure error"), "", "",
},
{
storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, "bucket", "",
}, minio.BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), minio.BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
Code: "InvalidResourceName",
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
Code: "RequestBodyTooLarge",
}), minio.PartTooBig{}, "", "",
}, minio.PartTooBig{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
Code: "InvalidMetadata",
}), minio.UnsupportedMetadata{}, "", "",
}, minio.UnsupportedMetadata{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.ObjectNotFound{
}, minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
}, "bucket", "object",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
}, minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest,
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
}
for i, testCase := range testCases {
@ -307,7 +301,7 @@ func TestCheckAzureUploadID(t *testing.T) {
}
for _, uploadID := range invalidUploadIDs {
if err := checkAzureUploadID(uploadID); err == nil {
if err := checkAzureUploadID(context.Background(), uploadID); err == nil {
t.Fatalf("%s: expected: <error>, got: <nil>", uploadID)
}
}
@ -318,7 +312,7 @@ func TestCheckAzureUploadID(t *testing.T) {
}
for _, uploadID := range validUploadIDs {
if err := checkAzureUploadID(uploadID); err != nil {
if err := checkAzureUploadID(context.Background(), uploadID); err != nil {
t.Fatalf("%s: expected: <nil>, got: %s", uploadID, err)
}
}

View file

@ -31,8 +31,8 @@ import (
b2 "github.com/minio/blazer/base"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
@ -146,16 +146,6 @@ func b2ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
uploadID := ""
@ -177,7 +167,7 @@ func b2ToObjectError(err error, params ...string) error {
if statusCode == 0 {
// We don't interpret non B2 errors. B2 errors have statusCode
// to help us convert them to S3 object errors.
return e
return err
}
switch code {
@ -208,8 +198,7 @@ func b2ToObjectError(err error, params ...string) error {
err = minio.InvalidUploadID{UploadID: uploadID}
}
e.Cause = err
return e
return err
}
// Shutdown saves any gateway metadata to disk
@ -230,7 +219,8 @@ func (l *b2Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
// All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
return b2ToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
}
func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error {
@ -271,14 +261,16 @@ func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, e
func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(ctx, nil)
if err != nil {
return nil, b2ToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return nil, b2ToObjectError(err, bucket)
}
for _, bkt := range bktList {
if bkt.Name == bucket {
return bkt, nil
}
}
return nil, errors.Trace(minio.BucketNotFound{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return nil, minio.BucketNotFound{Bucket: bucket}
}
// GetBucketInfo gets bucket metadata..
@ -315,7 +307,8 @@ func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error {
return err
}
err = bkt.DeleteBucket(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
}
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
@ -326,7 +319,8 @@ func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix strin
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
}
loi.IsTruncated = next != ""
loi.NextMarker = next
@ -359,7 +353,8 @@ func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket)
logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
}
loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken
@ -396,11 +391,13 @@ func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string,
}
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
defer reader.Close()
_, err = io.Copy(writer, reader)
return b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
// GetObjectInfo reads object info and replies back ObjectInfo
@ -411,12 +408,14 @@ func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object str
}
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
Bucket: bucket,
@ -504,20 +503,23 @@ func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string,
var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
hr := newB2Reader(data, data.Size())
var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx)
if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
}
return minio.ObjectInfo{
@ -539,12 +541,14 @@ func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object stri
}
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
io.Copy(ioutil.Discard, reader)
reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
}
// ListMultipartUploads lists all multipart uploads.
@ -563,7 +567,8 @@ func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
}
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil {
return lmi, b2ToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return lmi, b2ToObjectError(err, bucket)
}
lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads,
@ -598,7 +603,8 @@ func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
delete(metadata, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
if err != nil {
return uploadID, b2ToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return uploadID, b2ToObjectError(err, bucket, object)
}
return lf.ID, nil
@ -613,13 +619,15 @@ func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object str
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
}
hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
}
return minio.PartInfo{
@ -647,7 +655,8 @@ func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object s
partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil {
return lpi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return lpi, b2ToObjectError(err, bucket, object, uploadID)
}
if next != 0 {
lpi.IsTruncated = true
@ -670,7 +679,8 @@ func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
return err
}
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
@ -684,7 +694,8 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
// B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(errors.Trace(minio.InvalidPart{}), bucket, object, uploadID)
logger.LogIf(ctx, minio.InvalidPart{})
return oi, b2ToObjectError(minio.InvalidPart{}, bucket, object, uploadID)
}
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
@ -692,7 +703,8 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
return oi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return oi, b2ToObjectError(err, bucket, object, uploadID)
}
return l.GetObjectInfo(ctx, bucket, object)
@ -713,13 +725,16 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
bkt, err := l.Bucket(ctx, bucket)
if err != nil {
@ -727,7 +742,8 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
}
bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}
// GetBucketPolicy, returns the current bucketType from B2 backend and convert
@ -745,7 +761,8 @@ func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket}
}
// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'.
@ -756,5 +773,6 @@ func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error
}
bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return b2ToObjectError(err)
}

View file

@ -21,7 +21,6 @@ import (
"testing"
b2 "github.com/minio/blazer/base"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
@ -40,70 +39,70 @@ func TestB2ObjectError(t *testing.T) {
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
},
{
[]string{}, errors.Trace(fmt.Errorf("Non B2 Error")), fmt.Errorf("Non B2 Error"),
[]string{}, fmt.Errorf("Non B2 Error"), fmt.Errorf("Non B2 Error"),
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "duplicate_bucket_name",
}), minio.BucketAlreadyOwnedByYou{
}, minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "bad_request",
}), minio.BucketNotFound{
}, minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "bad_request",
}), minio.ObjectNameInvalid{
}, minio.ObjectNameInvalid{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "bad_bucket_id",
}), minio.BucketNotFound{Bucket: "bucket"},
}, minio.BucketNotFound{Bucket: "bucket"},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "file_not_present",
}), minio.ObjectNotFound{
}, minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket", "object"}, errors.Trace(b2.Error{
[]string{"bucket", "object"}, b2.Error{
StatusCode: 1,
Code: "not_found",
}), minio.ObjectNotFound{
}, minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
},
},
{
[]string{"bucket"}, errors.Trace(b2.Error{
[]string{"bucket"}, b2.Error{
StatusCode: 1,
Code: "cannot_delete_non_empty_bucket",
}), minio.BucketNotEmpty{
}, minio.BucketNotEmpty{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{
[]string{"bucket", "object", "uploadID"}, b2.Error{
StatusCode: 1,
Message: "No active upload for",
}), minio.InvalidUploadID{
}, minio.InvalidUploadID{
UploadID: "uploadID",
},
},

View file

@ -34,8 +34,8 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"google.golang.org/api/googleapi"
@ -154,11 +154,13 @@ EXAMPLES:
func gcsGatewayMain(ctx *cli.Context) {
projectID := ctx.Args().First()
if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
minio.ErrorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json")
logger.LogIf(context.Background(), errGCSProjectIDNotFound)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
if projectID != "" && !isValidGCSProjectIDFormat(projectID) {
minio.ErrorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First())
reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First())
contxt := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(contxt, errGCSInvalidProjectID)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
@ -237,16 +239,6 @@ func gcsToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
uploadID := ""
@ -266,8 +258,7 @@ func gcsToObjectError(err error, params ...string) error {
err = minio.BucketNotFound{
Bucket: bucket,
}
e.Cause = err
return e
return err
case "storage: object doesn't exist":
if uploadID != "" {
err = minio.InvalidUploadID{
@ -279,21 +270,18 @@ func gcsToObjectError(err error, params ...string) error {
Object: object,
}
}
e.Cause = err
return e
return err
}
googleAPIErr, ok := err.(*googleapi.Error)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
e.Cause = err
return e
return err
}
if len(googleAPIErr.Errors) == 0 {
e.Cause = err
return e
return err
}
reason := googleAPIErr.Errors[0].Reason
@ -337,8 +325,7 @@ func gcsToObjectError(err error, params ...string) error {
err = fmt.Errorf("Unsupported error reason: %s", reason)
}
e.Cause = err
return e
return err
}
// gcsProjectIDRegex defines a valid gcs project id format
@ -381,7 +368,9 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
attrs, err := it.Next()
if err != nil {
if err != iterator.Done {
minio.ErrorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket)
reqInfo := &logger.ReqInfo{BucketName: bucket}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
return
}
@ -389,7 +378,9 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
// Delete files older than 2 weeks.
err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx)
if err != nil {
minio.ErrorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name)
reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return
}
}
@ -404,7 +395,8 @@ func (l *gcsGateway) CleanupGCSMinioSysTmp() {
attrs, err := it.Next()
if err != nil {
if err != iterator.Done {
minio.ErrorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp")
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
logger.LogIf(ctx, err)
}
break
}
@ -438,15 +430,16 @@ func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, locatio
err := bkt.Create(l.ctx, l.projectID, &storage.BucketAttrs{
Location: location,
})
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
// GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil {
return minio.BucketInfo{}, gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return minio.BucketInfo{}, gcsToObjectError(err, bucket)
}
return minio.BucketInfo{
@ -467,7 +460,8 @@ func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
}
if ierr != nil {
return buckets, gcsToObjectError(errors.Trace(ierr))
logger.LogIf(ctx, ierr)
return buckets, gcsToObjectError(ierr)
}
buckets = append(buckets, minio.BucketInfo{
@ -495,7 +489,8 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break
}
if err != nil {
return gcsToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
if objAttrs.Prefix == minio.GatewayMinioSysTmp {
gcsMinioPathFound = true
@ -505,7 +500,8 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break
}
if nonGCSMinioPathFound {
return gcsToObjectError(errors.Trace(minio.BucketNotEmpty{}))
logger.LogIf(ctx, minio.BucketNotEmpty{})
return gcsToObjectError(minio.BucketNotEmpty{})
}
if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket.
@ -516,16 +512,19 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break
}
if err != nil {
return gcsToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil {
return gcsToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
}
}
err := l.client.Bucket(bucket).Delete(l.ctx)
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
func toGCSPageToken(name string) string {
@ -607,7 +606,8 @@ func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix stri
break
}
if err != nil {
return minio.ListObjectsInfo{}, gcsToObjectError(errors.Trace(err), bucket, prefix)
logger.LogIf(ctx, err)
return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix)
}
nextMarker = toGCSPageToken(attrs.Name)
@ -689,7 +689,8 @@ func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continua
}
if err != nil {
return minio.ListObjectsV2Info{}, gcsToObjectError(errors.Trace(err), bucket, prefix)
logger.LogIf(ctx, err)
return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix)
}
if attrs.Prefix == minio.GatewayMinioSysTmp {
@ -733,18 +734,21 @@ func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, s
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
object := l.client.Bucket(bucket).Object(key)
r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
defer r.Close()
if _, err := io.Copy(writer, r); err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
return nil
@ -771,12 +775,14 @@ func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object st
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -787,7 +793,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
object := l.client.Bucket(bucket).Object(key)
@ -801,7 +808,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
w.CloseWithError(err)
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
// Close the object writer upon success.
@ -809,7 +817,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
attrs, err := object.Attrs(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -827,7 +836,8 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject
attrs, err := copier.Run(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), destBucket, destObject)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject)
}
return fromGCSAttrsToObjectInfo(attrs), nil
@ -837,7 +847,8 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject
func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, object)
}
return nil
@ -863,7 +874,8 @@ func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key
bucket,
key,
}); err != nil {
return "", gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return "", gcsToObjectError(err, bucket, key)
}
return uploadID, nil
}
@ -883,7 +895,8 @@ func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, pr
// an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
return gcsToObjectError(errors.Trace(err), bucket, key, uploadID)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key, uploadID)
}
// PutObjectPart puts a part of object in bucket
@ -904,7 +917,8 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error.
w.Close()
return minio.PartInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.PartInfo{}, gcsToObjectError(err, bucket, key)
}
// Make sure to close the object writer upon success.
w.Close()
@ -923,7 +937,7 @@ func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key str
}
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error {
func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error {
prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID)
// iterate through all parts and delete them
@ -935,7 +949,8 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error
break
}
if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
object := l.client.Bucket(bucket).Object(attrs.Name)
@ -951,7 +966,7 @@ func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, ke
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return err
}
return l.cleanupMultipartUpload(bucket, key, uploadID)
return l.cleanupMultipartUpload(ctx, bucket, key, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
@ -968,23 +983,27 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key, uploadID)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID)
}
r, err := object.NewReader(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
defer r.Close()
// Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(errGCSFormat), bucket, key)
logger.LogIf(ctx, errGCSFormat)
return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key)
}
// Validate if the gcs.json stores valid entries for the bucket and key.
@ -1001,7 +1020,8 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(pErr), bucket, key, uploadID)
logger.LogIf(ctx, pErr)
return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID)
}
partSizes[i] = partAttr.Size
}
@ -1009,11 +1029,16 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < 5*humanize.MiByte {
return minio.ObjectInfo{}, errors.Trace(minio.PartTooSmall{
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
})
return minio.ObjectInfo{}, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
}
}
@ -1040,7 +1065,8 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
}
@ -1053,10 +1079,11 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx)
if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key)
if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
@ -1075,16 +1102,19 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == policy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
}
@ -1096,11 +1126,13 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
case policy.BucketPolicyWriteOnly:
role = storage.RoleWriter
default:
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
@ -1110,7 +1142,8 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, gcsToObjectError(err, bucket)
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, r := range rules {
@ -1126,7 +1159,8 @@ func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy
}
// Return NoSuchBucketPolicy error, when policy is not set
if len(policyInfo.Statements) == 0 {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket)
logger.LogIf(ctx, minio.PolicyNotFound{})
return policy.BucketAccessPolicy{}, gcsToObjectError(minio.PolicyNotFound{}, bucket)
}
return policyInfo, nil
}
@ -1135,7 +1169,8 @@ func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy
func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error {
// This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil

View file

@ -24,7 +24,6 @@ import (
"reflect"
"testing"
"github.com/minio/minio/pkg/errors"
"google.golang.org/api/googleapi"
miniogo "github.com/minio/minio-go"
@ -237,14 +236,14 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket"},
errors.Trace(fmt.Errorf("storage: bucket doesn't exist")),
fmt.Errorf("storage: bucket doesn't exist"),
minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")),
fmt.Errorf("storage: object doesn't exist"),
minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
@ -252,76 +251,76 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket", "object", "uploadID"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")),
fmt.Errorf("storage: object doesn't exist"),
minio.InvalidUploadID{
UploadID: "uploadID",
},
},
{
[]string{},
errors.Trace(fmt.Errorf("Unknown error")),
fmt.Errorf("Unknown error"),
fmt.Errorf("Unknown error"),
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Message: "No list of errors",
}),
},
&googleapi.Error{
Message: "No list of errors",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
Message: "You already own this bucket. Please select another name.",
}},
}),
},
minio.BucketAlreadyOwnedByYou{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
Message: "Sorry, that name is not available. Please try a different one.",
}},
}),
},
minio.BucketAlreadyExists{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "conflict",
}},
}),
},
minio.BucketNotEmpty{Bucket: "bucket"},
},
{
[]string{"bucket"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "notFound",
}},
}),
},
minio.BucketNotFound{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "notFound",
}},
}),
},
minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
@ -329,22 +328,22 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "invalid",
}},
}),
},
minio.BucketNameInvalid{
Bucket: "bucket",
},
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "forbidden",
}},
}),
},
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
@ -352,11 +351,11 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "keyInvalid",
}},
}),
},
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
@ -364,11 +363,11 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "required",
}},
}),
},
minio.PrefixAccessDenied{
Bucket: "bucket",
Object: "object",
@ -376,11 +375,11 @@ func TestGCSToObjectError(t *testing.T) {
},
{
[]string{"bucket", "object"},
errors.Trace(&googleapi.Error{
&googleapi.Error{
Errors: []googleapi.ErrorItem{{
Reason: "unknown",
}},
}),
},
fmt.Errorf("Unsupported error reason: unknown"),
},
}

View file

@ -33,8 +33,8 @@ import (
"github.com/joyent/triton-go/storage"
"github.com/minio/cli"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -118,7 +118,7 @@ func mantaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Manta{host})
}
@ -139,6 +139,7 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
var err error
var signer authentication.Signer
var endpoint = defaultMantaURL
ctx := context.Background()
if g.host != "" {
endpoint, _, err = minio.ParseGatewayEndpoint(g.host)
@ -163,7 +164,8 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
}
signer, err = authentication.NewSSHAgentSigner(input)
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
} else {
var keyBytes []byte
@ -200,7 +202,8 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
signer, err = authentication.NewPrivateKeySigner(input)
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
}
@ -352,7 +355,8 @@ func (t *tritonObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
if terrors.IsResourceNotFoundError(err) {
return result, nil
}
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
for _, obj := range objs.Entries {
@ -362,7 +366,8 @@ func (t *tritonObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
objs, err = t.client.Dir().List(ctx, input)
if err != nil {
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
break
}
@ -428,7 +433,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
if terrors.IsResourceNotFoundError(err) {
return result, nil
}
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
for _, obj := range objs.Entries {
@ -436,7 +442,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
objs, err = t.client.Dir().List(ctx, input)
if err != nil {
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
break
}
@ -479,7 +486,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
// Start offset cannot be negative.
if startOffset < 0 {
return errors.Trace(fmt.Errorf("Unexpected error"))
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return fmt.Errorf("Unexpected error")
}
output, err := t.client.Objects().Get(ctx, &storage.GetObjectInput{
@ -555,11 +563,13 @@ func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, da
ObjectReader: dummySeeker{data},
ForceInsert: true,
}); err != nil {
return objInfo, errors.Trace(err)
logger.LogIf(ctx, err)
return objInfo, err
}
if err = data.Verify(); err != nil {
t.DeleteObject(ctx, bucket, object)
return objInfo, errors.Trace(err)
logger.LogIf(ctx, err)
return objInfo, err
}
return t.GetObjectInfo(ctx, bucket, object)
@ -574,7 +584,8 @@ func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, de
SourcePath: path.Join(mantaRoot, srcBucket, srcObject),
LinkPath: path.Join(mantaRoot, destBucket, destObject),
}); err != nil {
return objInfo, errors.Trace(err)
logger.LogIf(ctx, err)
return objInfo, err
}
return t.GetObjectInfo(ctx, destBucket, destObject)
@ -587,7 +598,8 @@ func (t *tritonObjects) DeleteObject(ctx context.Context, bucket, object string)
if err := t.client.Objects().Delete(ctx, &storage.DeleteObjectInput{
ObjectPath: path.Join(mantaRoot, bucket, object),
}); err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
return nil

View file

@ -32,8 +32,8 @@ import (
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -113,7 +113,7 @@ func ossGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &OSS{host})
}
@ -161,7 +161,7 @@ func (g *OSS) Production() bool {
// `X-Amz-Meta-` prefix and converted into `X-Oss-Meta-`.
//
// Header names are canonicalized as in http.Header.
func appendS3MetaToOSSOptions(opts []oss.Option, s3Metadata map[string]string) ([]oss.Option, error) {
func appendS3MetaToOSSOptions(ctx context.Context, opts []oss.Option, s3Metadata map[string]string) ([]oss.Option, error) {
if opts == nil {
opts = make([]oss.Option, 0, len(s3Metadata))
}
@ -173,7 +173,8 @@ func appendS3MetaToOSSOptions(opts []oss.Option, s3Metadata map[string]string) (
metaKey := k[len("X-Amz-Meta-"):]
// NOTE(timonwong): OSS won't allow headers with underscore(_).
if strings.Contains(metaKey, "_") {
return nil, errors.Trace(minio.UnsupportedMetadata{})
logger.LogIf(ctx, minio.UnsupportedMetadata{})
return nil, minio.UnsupportedMetadata{}
}
opts = append(opts, oss.Meta(metaKey, v))
case k == "X-Amz-Acl":
@ -271,15 +272,6 @@ func ossToObjectError(err error, params ...string) error {
return nil
}
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := ""
object := ""
uploadID := ""
@ -298,7 +290,7 @@ func ossToObjectError(err error, params ...string) error {
if !ok {
// We don't interpret non OSS errors. As oss errors will
// have StatusCode to help to convert to object errors.
return e
return err
}
switch ossErr.Code {
@ -330,8 +322,7 @@ func ossToObjectError(err error, params ...string) error {
err = minio.InvalidPart{}
}
e.Cause = err
return e
return err
}
// ossObjects implements gateway for Aliyun Object Storage Service.
@ -366,22 +357,26 @@ func ossIsValidBucketName(bucket string) bool {
// MakeBucketWithLocation creates a new container on OSS backend.
func (l *ossObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
if !ossIsValidBucketName(bucket) {
return errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return minio.BucketNameInvalid{Bucket: bucket}
}
err := l.Client.CreateBucket(bucket)
return ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
}
// ossGeBucketInfo gets bucket metadata.
func ossGeBucketInfo(client *oss.Client, bucket string) (bi minio.BucketInfo, err error) {
func ossGeBucketInfo(ctx context.Context, client *oss.Client, bucket string) (bi minio.BucketInfo, err error) {
if !ossIsValidBucketName(bucket) {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
}
bgir, err := client.GetBucketInfo(bucket)
if err != nil {
return bi, ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return bi, ossToObjectError(err, bucket)
}
return minio.BucketInfo{
@ -392,7 +387,7 @@ func ossGeBucketInfo(client *oss.Client, bucket string) (bi minio.BucketInfo, er
// GetBucketInfo gets bucket metadata.
func (l *ossObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
return ossGeBucketInfo(l.Client, bucket)
return ossGeBucketInfo(ctx, l.Client, bucket)
}
// ListBuckets lists all OSS buckets.
@ -401,7 +396,8 @@ func (l *ossObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
for {
lbr, err := l.Client.ListBuckets(marker)
if err != nil {
return nil, ossToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return nil, ossToObjectError(err)
}
for _, bi := range lbr.Buckets {
@ -424,7 +420,8 @@ func (l *ossObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
func (l *ossObjects) DeleteBucket(ctx context.Context, bucket string) error {
err := l.Client.DeleteBucket(bucket)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
}
return nil
}
@ -462,10 +459,11 @@ func fromOSSClientListObjectsResult(bucket string, lor oss.ListObjectsResult) mi
}
// ossListObjects lists all blobs in OSS bucket filtered by prefix.
func ossListObjects(client *oss.Client, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
func ossListObjects(ctx context.Context, client *oss.Client, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
buck, err := client.Bucket(bucket)
if err != nil {
return loi, ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return loi, ossToObjectError(err, bucket)
}
// maxKeys should default to 1000 or less.
@ -475,19 +473,20 @@ func ossListObjects(client *oss.Client, bucket, prefix, marker, delimiter string
lor, err := buck.ListObjects(oss.Prefix(prefix), oss.Marker(marker), oss.Delimiter(delimiter), oss.MaxKeys(maxKeys))
if err != nil {
return loi, ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return loi, ossToObjectError(err, bucket)
}
return fromOSSClientListObjectsResult(bucket, lor), nil
}
// ossListObjectsV2 lists all blobs in OSS bucket filtered by prefix.
func ossListObjectsV2(client *oss.Client, bucket, prefix, continuationToken, delimiter string, maxKeys int,
func ossListObjectsV2(ctx context.Context, client *oss.Client, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner and startAfter are not supported and unused.
marker := continuationToken
resultV1, err := ossListObjects(client, bucket, prefix, marker, delimiter, maxKeys)
resultV1, err := ossListObjects(ctx, client, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, err
}
@ -503,13 +502,13 @@ func ossListObjectsV2(client *oss.Client, bucket, prefix, continuationToken, del
// ListObjects lists all blobs in OSS bucket filtered by prefix.
func (l *ossObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
return ossListObjects(l.Client, bucket, prefix, marker, delimiter, maxKeys)
return ossListObjects(ctx, l.Client, bucket, prefix, marker, delimiter, maxKeys)
}
// ListObjectsV2 lists all blobs in OSS bucket filtered by prefix
func (l *ossObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
return ossListObjectsV2(l.Client, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter)
return ossListObjectsV2(ctx, l.Client, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter)
}
// ossGetObject reads an object on OSS. Supports additional
@ -518,14 +517,16 @@ func (l *ossObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continua
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func ossGetObject(client *oss.Client, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
func ossGetObject(ctx context.Context, client *oss.Client, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
if length < 0 && length != -1 {
return ossToObjectError(errors.Trace(fmt.Errorf("Invalid argument")), bucket, key)
logger.LogIf(ctx, fmt.Errorf("Invalid argument"))
return ossToObjectError(fmt.Errorf("Invalid argument"), bucket, key)
}
bkt, err := client.Bucket(bucket)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
}
var opts []oss.Option
@ -535,12 +536,14 @@ func ossGetObject(client *oss.Client, bucket, key string, startOffset, length in
object, err := bkt.GetObject(key, opts...)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return ossToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
}
return nil
}
@ -552,7 +555,7 @@ func ossGetObject(client *oss.Client, bucket, key string, startOffset, length in
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *ossObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
return ossGetObject(l.Client, bucket, key, startOffset, length, writer, etag)
return ossGetObject(ctx, l.Client, bucket, key, startOffset, length, writer, etag)
}
func translatePlainError(err error) error {
@ -569,15 +572,17 @@ func translatePlainError(err error) error {
}
// ossGetObjectInfo reads object info and replies back ObjectInfo.
func ossGetObjectInfo(client *oss.Client, bucket, object string) (objInfo minio.ObjectInfo, err error) {
func ossGetObjectInfo(ctx context.Context, client *oss.Client, bucket, object string) (objInfo minio.ObjectInfo, err error) {
bkt, err := client.Bucket(bucket)
if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
}
header, err := bkt.GetObjectDetailedMeta(object)
if err != nil {
return objInfo, ossToObjectError(errors.Trace(translatePlainError(err)), bucket, object)
logger.LogIf(ctx, translatePlainError(err))
return objInfo, ossToObjectError(translatePlainError(err), bucket, object)
}
// Build S3 metadata from OSS metadata
@ -600,40 +605,43 @@ func ossGetObjectInfo(client *oss.Client, bucket, object string) (objInfo minio.
// GetObjectInfo reads object info and replies back ObjectInfo.
func (l *ossObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
return ossGetObjectInfo(l.Client, bucket, object)
return ossGetObjectInfo(ctx, l.Client, bucket, object)
}
// ossPutObject creates a new object with the incoming data.
func ossPutObject(client *oss.Client, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
func ossPutObject(ctx context.Context, client *oss.Client, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
bkt, err := client.Bucket(bucket)
if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
}
// Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, metadata)
opts, err := appendS3MetaToOSSOptions(ctx, nil, metadata)
if err != nil {
return objInfo, ossToObjectError(err, bucket, object)
}
err = bkt.PutObject(object, data, opts...)
if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
}
return ossGetObjectInfo(client, bucket, object)
return ossGetObjectInfo(ctx, client, bucket, object)
}
// PutObject creates a new object with the incoming data.
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
return ossPutObject(l.Client, bucket, object, data, metadata)
return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
}
// CopyObject copies an object from source bucket to a destination bucket.
func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Client.Bucket(srcBucket)
if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, srcBucket, srcObject)
}
opts := make([]oss.Option, 0, len(srcInfo.UserDefined)+1)
@ -644,13 +652,14 @@ func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
opts = append(opts, oss.MetadataDirective(oss.MetaReplace))
// Build OSS metadata
opts, err = appendS3MetaToOSSOptions(opts, srcInfo.UserDefined)
opts, err = appendS3MetaToOSSOptions(ctx, opts, srcInfo.UserDefined)
if err != nil {
return objInfo, ossToObjectError(err, srcBucket, srcObject)
}
if _, err = bkt.CopyObjectTo(dstBucket, dstObject, srcObject, opts...); err != nil {
return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, srcBucket, srcObject)
}
return l.GetObjectInfo(ctx, dstBucket, dstObject)
}
@ -659,12 +668,14 @@ func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
func (l *ossObjects) DeleteObject(ctx context.Context, bucket, object string) error {
bkt, err := l.Client.Bucket(bucket)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
}
err = bkt.DeleteObject(object)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
}
return nil
}
@ -701,13 +712,15 @@ func fromOSSClientListMultipartsInfo(lmur oss.ListMultipartUploadResult) minio.L
func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
bkt, err := l.Client.Bucket(bucket)
if err != nil {
return lmi, ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return lmi, ossToObjectError(err, bucket)
}
lmur, err := bkt.ListMultipartUploads(oss.Prefix(prefix), oss.KeyMarker(keyMarker), oss.UploadIDMarker(uploadIDMarker),
oss.Delimiter(delimiter), oss.MaxUploads(maxUploads))
if err != nil {
return lmi, ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return lmi, ossToObjectError(err, bucket)
}
return fromOSSClientListMultipartsInfo(lmur), nil
@ -717,18 +730,20 @@ func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, k
func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
bkt, err := l.Client.Bucket(bucket)
if err != nil {
return uploadID, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return uploadID, ossToObjectError(err, bucket, object)
}
// Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, metadata)
opts, err := appendS3MetaToOSSOptions(ctx, nil, metadata)
if err != nil {
return uploadID, ossToObjectError(err, bucket, object)
}
lmur, err := bkt.InitiateMultipartUpload(object, opts...)
if err != nil {
return uploadID, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return uploadID, ossToObjectError(err, bucket, object)
}
return lmur.UploadID, nil
@ -738,7 +753,8 @@ func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, err error) {
bkt, err := l.Client.Bucket(bucket)
if err != nil {
return pi, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return pi, ossToObjectError(err, bucket, object)
}
imur := oss.InitiateMultipartUploadResult{
@ -749,7 +765,8 @@ func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
size := data.Size()
up, err := bkt.UploadPart(imur, data, size, partID)
if err != nil {
return pi, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return pi, ossToObjectError(err, bucket, object)
}
return minio.PartInfo{
@ -820,11 +837,12 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
bkt, err := l.Client.Bucket(destBucket)
if err != nil {
return p, ossToObjectError(errors.Trace(err), destBucket)
logger.LogIf(ctx, err)
return p, ossToObjectError(err, destBucket)
}
// Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, srcInfo.UserDefined)
opts, err := appendS3MetaToOSSOptions(ctx, nil, srcInfo.UserDefined)
if err != nil {
return p, ossToObjectError(err, srcBucket, srcObject)
}
@ -835,7 +853,8 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
}, srcBucket, srcObject, startOffset, length, partID, opts...)
if err != nil {
return p, ossToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return p, ossToObjectError(err, srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
@ -847,7 +866,8 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi minio.ListPartsInfo, err error) {
lupr, err := ossListObjectParts(l.Client, bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return lpi, ossToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return lpi, ossToObjectError(err, bucket, object, uploadID)
}
return fromOSSClientListPartsInfo(lupr, partNumberMarker), nil
@ -857,7 +877,8 @@ func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, upload
func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
bkt, err := l.Client.Bucket(bucket)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
}
err = bkt.AbortMultipartUpload(oss.InitiateMultipartUploadResult{
@ -866,7 +887,8 @@ func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
UploadID: uploadID,
})
if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
}
return nil
}
@ -876,7 +898,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
client := l.Client
bkt, err := client.Bucket(bucket)
if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object)
}
// Error out if uploadedParts except last part sizing < 5MiB.
@ -886,7 +909,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
for lupr.IsTruncated {
lupr, err = ossListObjectParts(client, bucket, object, uploadID, partNumberMarker, ossMaxParts)
if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object, uploadID)
logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object, uploadID)
}
uploadedParts := lupr.UploadedParts
@ -900,11 +924,16 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
for _, part := range uploadedParts {
if part.Size < ossS3MinPartSize {
return oi, errors.Trace(minio.PartTooSmall{
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: part.PartNumber,
PartSize: int64(part.Size),
PartETag: minio.ToS3ETag(part.ETag),
})
return oi, minio.PartTooSmall{
PartNumber: part.PartNumber,
PartSize: int64(part.Size),
PartETag: minio.ToS3ETag(part.ETag),
}
}
}
@ -926,7 +955,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
_, err = bkt.CompleteMultipartUpload(imur, parts)
if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object)
}
return l.GetObjectInfo(ctx, bucket, object)
@ -940,13 +970,15 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket, "")
if len(bucketPolicies) != 1 {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
prefix := bucket + "/*" // For all objects inside the bucket.
for policyPrefix, bucketPolicy := range bucketPolicies {
if policyPrefix != prefix {
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
var acl oss.ACLType
@ -958,12 +990,14 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI
case policy.BucketPolicyReadWrite:
acl = oss.ACLPublicReadWrite
default:
return errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
err := l.Client.SetBucketACL(bucket, acl)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
}
}
@ -974,20 +1008,23 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI
func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
result, err := l.Client.GetBucketACL(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, ossToObjectError(err)
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
switch result.ACL {
case string(oss.ACLPrivate):
// By default, all buckets starts with a "private" policy.
return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket)
logger.LogIf(ctx, minio.PolicyNotFound{})
return policy.BucketAccessPolicy{}, ossToObjectError(minio.PolicyNotFound{}, bucket)
case string(oss.ACLPublicRead):
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
case string(oss.ACLPublicReadWrite):
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadWrite, bucket, "")
default:
return policy.BucketAccessPolicy{}, errors.Trace(minio.NotImplemented{})
logger.LogIf(ctx, minio.NotImplemented{})
return policy.BucketAccessPolicy{}, minio.NotImplemented{}
}
return policyInfo, nil
@ -997,7 +1034,8 @@ func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy
func (l *ossObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
err := l.Client.SetBucketACL(bucket, oss.ACLPrivate)
if err != nil {
return ossToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
}
return nil
}

View file

@ -17,6 +17,7 @@
package oss
import (
"context"
"fmt"
"net/http"
"reflect"
@ -29,9 +30,9 @@ import (
)
func ossErrResponse(code string) error {
return errors.Trace(oss.ServiceError{
return oss.ServiceError{
Code: code,
})
}
}
func TestOSSToObjectError(t *testing.T) {
@ -116,7 +117,7 @@ func TestS3MetaToOSSOptions(t *testing.T) {
headers = map[string]string{
"x-amz-meta-invalid_meta": "value",
}
_, err = appendS3MetaToOSSOptions(nil, headers)
_, err = appendS3MetaToOSSOptions(context.Background(), nil, headers)
if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
@ -133,7 +134,7 @@ func TestS3MetaToOSSOptions(t *testing.T) {
"X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
opts, err := appendS3MetaToOSSOptions(nil, headers)
opts, err := appendS3MetaToOSSOptions(context.Background(), nil, headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}

View file

@ -24,8 +24,8 @@ import (
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"
@ -101,7 +101,7 @@ func s3GatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &S3{host})
}
@ -173,7 +173,8 @@ func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket)
}
return err
}
@ -188,12 +189,14 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
// access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
}
buckets, err := l.Client.ListBuckets()
if err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return bi, minio.ErrorRespToObjectError(err, bucket)
}
for _, bi := range buckets {
@ -207,14 +210,16 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
}, nil
}
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return bi, minio.BucketNotFound{Bucket: bucket}
}
// ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, minio.ErrorRespToObjectError(errors.Trace(err))
logger.LogIf(ctx, err)
return nil, minio.ErrorRespToObjectError(err)
}
b := make([]minio.BucketInfo, len(buckets))
@ -232,7 +237,8 @@ func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error)
func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket)
}
return nil
}
@ -241,7 +247,8 @@ func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket)
}
return minio.FromMinioClientListBucketResult(bucket, result), nil
@ -251,7 +258,8 @@ func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix strin
func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket)
}
return minio.FromMinioClientListBucketV2Result(bucket, result), nil
@ -265,23 +273,27 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
// length indicates the total length of the object.
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
if length < 0 && length != -1 {
return minio.ErrorRespToObjectError(errors.Trace(minio.InvalidRange{}), bucket, key)
logger.LogIf(ctx, minio.InvalidRange{})
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
}
opts := miniogo.GetObjectOptions{}
if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
}
}
object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
}
defer object.Close()
if _, err := io.Copy(writer, object); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
}
return nil
}
@ -290,7 +302,8 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil {
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
@ -300,7 +313,8 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
}
return minio.FromMinioClientObjectInfo(bucket, oi), nil
@ -315,7 +329,8 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE"
srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
}
return l.GetObjectInfo(ctx, dstBucket, dstObject)
}
@ -324,7 +339,8 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object)
}
return nil
@ -346,7 +362,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
opts := miniogo.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil {
return uploadID, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
}
return uploadID, nil
}
@ -355,7 +372,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString())
if err != nil {
return pi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return pi, minio.ErrorRespToObjectError(err, bucket, object)
}
return minio.FromMinioClientObjectPart(info), nil
@ -372,7 +390,8 @@ func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, de
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, srcInfo.UserDefined)
if err != nil {
return p, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject)
logger.LogIf(ctx, err)
return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag
@ -392,14 +411,16 @@ func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object s
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
if err != nil {
return oi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
logger.LogIf(ctx, err)
return oi, minio.ErrorRespToObjectError(err, bucket, object)
}
return l.GetObjectInfo(ctx, bucket, object)
@ -408,7 +429,8 @@ func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
// SetBucketPolicy sets policy on bucket
func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, "")
}
return nil
@ -418,7 +440,8 @@ func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(err, bucket, "")
}
return policyInfo, nil
}
@ -426,7 +449,8 @@ func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.
// DeleteBucketPolicy deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, "")
}
return nil
}

View file

@ -119,9 +119,9 @@ func TestS3ToObjectError(t *testing.T) {
}
for i, tc := range testCases {
actualErr := minio.ErrorRespToObjectError(errors.Trace(tc.inputErr), tc.bucket, tc.object)
actualErr := minio.ErrorRespToObjectError(tc.inputErr, tc.bucket, tc.object)
if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e)
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, actualErr)
}
}
}

View file

@ -24,7 +24,6 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
@ -38,8 +37,8 @@ import (
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
)
@ -112,7 +111,7 @@ func siaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Sia{host})
}
@ -164,9 +163,9 @@ func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error)
colorBlue := color.New(color.FgBlue).SprintfFunc()
colorBold := color.New(color.Bold).SprintFunc()
log.Println(colorBlue("\nSia Gateway Configuration:"))
log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
logger.Println(colorBlue("\nSia Gateway Configuration:"))
logger.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
logger.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
return sia, nil
}
@ -217,10 +216,11 @@ func (s MethodNotSupported) Error() string {
// apiGet wraps a GET request with a status code check, such that if the GET does
// not return 2xx, the error will be read and returned. The response body is
// not closed.
func apiGet(addr, call, apiPassword string) (*http.Response, error) {
func apiGet(ctx context.Context, addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" {
@ -228,15 +228,18 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
logger.LogIf(ctx, MethodNotSupported{call})
return nil, MethodNotSupported{call}
}
if non2xx(resp.StatusCode) {
err := decodeError(resp)
resp.Body.Close()
logger.LogIf(ctx, err)
return nil, err
}
return resp, nil
@ -245,7 +248,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
// apiPost wraps a POST request with a status code check, such that if the POST
// does not return 2xx, the error will be read and returned. The response body
// is not closed.
func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
func apiPost(ctx context.Context, addr, call, vals, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals))
if err != nil {
return nil, err
@ -257,7 +260,8 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
if resp.StatusCode == http.StatusNotFound {
@ -275,8 +279,8 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
// post makes an API call and discards the response. An error is returned if
// the response status is not 2xx.
func post(addr, call, vals, apiPassword string) error {
resp, err := apiPost(addr, call, vals, apiPassword)
func post(ctx context.Context, addr, call, vals, apiPassword string) error {
resp, err := apiPost(ctx, addr, call, vals, apiPassword)
if err != nil {
return err
}
@ -285,24 +289,26 @@ func post(addr, call, vals, apiPassword string) error {
}
// list makes a lists all the uploaded files, decodes the json response.
func list(addr string, apiPassword string, obj *renterFiles) error {
resp, err := apiGet(addr, "/renter/files", apiPassword)
func list(ctx context.Context, addr string, apiPassword string, obj *renterFiles) error {
resp, err := apiGet(ctx, addr, "/renter/files", apiPassword)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
logger.LogIf(ctx, fmt.Errorf("Expecting a response, but API returned %s", resp.Status))
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
}
return json.NewDecoder(resp.Body).Decode(obj)
err = json.NewDecoder(resp.Body).Decode(obj)
logger.LogIf(ctx, err)
return err
}
// get makes an API call and discards the response. An error is returned if the
// responsee status is not 2xx.
func get(addr, call, apiPassword string) error {
resp, err := apiGet(addr, call, apiPassword)
func get(ctx context.Context, addr, call, apiPassword string) error {
resp, err := apiGet(ctx, addr, call, apiPassword)
if err != nil {
return err
}
@ -336,7 +342,7 @@ func (s *siaObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
return post(ctx, s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
}
// GetBucketInfo gets bucket metadata.
@ -347,7 +353,7 @@ func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return bi, err
}
return minio.BucketInfo{Name: bucket}, nil
@ -355,7 +361,7 @@ func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio
// ListBuckets will detect and return existing buckets on Sia.
func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
sObjs, serr := s.listRenterFiles("")
sObjs, serr := s.listRenterFiles(ctx, "")
if serr != nil {
return buckets, serr
}
@ -388,11 +394,11 @@ func (s *siaObjects) DeleteBucket(ctx context.Context, bucket string) error {
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
}
func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
siaObjs, siaErr := s.listRenterFiles(bucket)
siaObjs, siaErr := s.listRenterFiles(ctx, bucket)
if siaErr != nil {
return loi, siaErr
}
@ -429,7 +435,7 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
defer os.Remove(dstFile)
var siaObj = path.Join(s.RootDir, bucket, object)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return err
}
@ -459,11 +465,16 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size {
return errors.Trace(minio.InvalidRange{
logger.LogIf(ctx, minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
})
return minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
}
}
// Allocate a staging buffer.
@ -476,10 +487,10 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given
// Sia path name.
func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error) {
func (s *siaObjects) findSiaObject(ctx context.Context, bucket, object string) (siaObjectInfo, error) {
siaPath := path.Join(s.RootDir, bucket, object)
sObjs, err := s.listRenterFiles("")
sObjs, err := s.listRenterFiles(ctx, "")
if err != nil {
return siaObjectInfo{}, err
}
@ -489,16 +500,19 @@ func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error)
return sObj, nil
}
}
return siaObjectInfo{}, errors.Trace(minio.ObjectNotFound{
logger.LogIf(ctx, minio.ObjectNotFound{
Bucket: bucket,
Object: object,
})
return siaObjectInfo{}, minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
so, err := s.findSiaObject(bucket, object)
so, err := s.findSiaObject(ctx, bucket, object)
if err != nil {
return minio.ObjectInfo{}, err
}
@ -527,11 +541,11 @@ func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string
return objInfo, err
}
if err = post(s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
if err = post(ctx, s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
os.Remove(srcFile)
return objInfo, err
}
defer s.deleteTempFileWhenUploadCompletes(srcFile, bucket, object)
defer s.deleteTempFileWhenUploadCompletes(ctx, srcFile, bucket, object)
return minio.ObjectInfo{
Name: object,
@ -546,7 +560,7 @@ func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string
func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
// Tell Sia daemon to delete the object
var siaObj = path.Join(s.RootDir, bucket, object)
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
}
// siaObjectInfo represents object info stored on Sia
@ -565,10 +579,10 @@ type renterFiles struct {
}
// listRenterFiles will return a list of existing objects in the bucket provided
func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, err error) {
func (s *siaObjects) listRenterFiles(ctx context.Context, bucket string) (siaObjs []siaObjectInfo, err error) {
// Get list of all renter files
var rf renterFiles
if err = list(s.Address, s.password, &rf); err != nil {
if err = list(ctx, s.Address, s.password, &rf); err != nil {
return siaObjs, err
}
@ -592,16 +606,15 @@ func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, er
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
// until it reaches 100% upload progress, then deletes the local temp copy from
// the filesystem.
func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, bucket, object string) {
func (s *siaObjects) deleteTempFileWhenUploadCompletes(ctx context.Context, tempFile string, bucket, object string) {
var soi siaObjectInfo
// Wait until 100% upload instead of 1x redundancy because if we delete
// after 1x redundancy, the user has to pay the cost of other hosts
// redistributing the file.
for soi.UploadProgress < 100.0 {
var err error
soi, err = s.findSiaObject(bucket, object)
soi, err = s.findSiaObject(ctx, bucket, object)
if err != nil {
minio.ErrorIf(err, "Unable to find file uploaded to Sia path %s/%s", bucket, object)
break
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"io"
"mime/multipart"
"net"
@ -24,7 +25,7 @@ import (
"net/url"
"strings"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
httptracer "github.com/minio/minio/pkg/handlers"
)
@ -36,7 +37,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err != nil && err != io.EOF {
errorIf(err, "Unable to xml decode location constraint")
logger.LogIf(context.Background(), err)
// Treat all other failures as XML parsing errors.
return "", ErrMalformedXML
} // else for both err as nil or io.EOF
@ -113,9 +114,10 @@ var userMetadataKeyPrefixes = []string{
}
// extractMetadataFromHeader extracts metadata from HTTP header.
func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
func extractMetadataFromHeader(ctx context.Context, header http.Header) (map[string]string, error) {
if header == nil {
return nil, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
}
metadata := make(map[string]string)
@ -134,7 +136,8 @@ func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
// Go through all other headers for any additional headers that needs to be saved.
for key := range header {
if key != http.CanonicalHeaderKey(key) {
return nil, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
}
for _, prefix := range userMetadataKeyPrefixes {
if strings.HasPrefix(key, prefix) {
@ -187,12 +190,13 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
}
// Validate form field size for s3 specification requirement.
func validateFormFieldSize(formValues http.Header) error {
func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Iterate over form values
for k := range formValues {
// Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize {
return errors.Trace(errSizeUnexpected)
logger.LogIf(ctx, errSizeUnexpected)
return errSizeUnexpected
}
}
@ -201,7 +205,7 @@ func validateFormFieldSize(formValues http.Header) error {
}
// Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values
fileName = ""
@ -212,7 +216,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
}
// Validate form values.
if err = validateFormFieldSize(formValues); err != nil {
if err = validateFormFieldSize(ctx, formValues); err != nil {
return nil, "", 0, nil, err
}
@ -221,7 +225,8 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" {
if len(v) == 0 {
return nil, "", 0, nil, errors.Trace(errInvalidArgument)
logger.LogIf(ctx, errInvalidArgument)
return nil, "", 0, nil, errInvalidArgument
}
// Fetch fileHeader which has the uploaded file information
fileHeader := v[0]
@ -230,17 +235,20 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
// Open the uploaded part
filePart, err = fileHeader.Open()
if err != nil {
return nil, "", 0, nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil {
return nil, "", 0, nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil {
return nil, "", 0, nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// File found and ready for reading
break
@ -276,7 +284,10 @@ func getResource(path string, host string, domain string) (string, error) {
// In bucket.mydomain.com:9000, strip out :9000
var err error
if host, _, err = net.SplitHostPort(host); err != nil {
errorIf(err, "Unable to split %s", host)
reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
reqInfo.AppendTags("path", path)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return "", err
}
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"net/http"
@ -114,7 +115,7 @@ func TestValidateFormFieldSize(t *testing.T) {
// Run validate form field size check under all test cases.
for i, testCase := range testCases {
err := validateFormFieldSize(testCase.header)
err := validateFormFieldSize(context.Background(), testCase.header)
if err != nil {
if errors.Cause(err).Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err)
@ -180,7 +181,7 @@ func TestExtractMetadataHeaders(t *testing.T) {
// Validate if the extracting headers.
for i, testCase := range testCases {
metadata, err := extractMetadataFromHeader(testCase.header)
metadata, err := extractMetadataFromHeader(context.Background(), testCase.header)
if err != nil && !testCase.shouldFail {
t.Fatalf("Test %d failed to extract metadata: %v", i+1, err)
}

View file

@ -17,8 +17,8 @@
package http
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
@ -28,6 +28,8 @@ import (
"sync"
"syscall"
"time"
"github.com/minio/minio/cmd/logger"
)
var sslRequiredErrMsg = []byte("HTTP/1.0 403 Forbidden\r\n\r\nSSL required")
@ -85,9 +87,8 @@ type httpListener struct {
tcpKeepAliveTimeout time.Duration
readTimeout time.Duration
writeTimeout time.Duration
updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn.
updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn.
errorLogFunc func(error, string, ...interface{}) // function to be called on errors.
updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn.
updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn.
}
// isRoutineNetErr returns true if error is due to a network timeout,
@ -139,17 +140,16 @@ func (listener *httpListener) start() {
// Peek bytes of maximum length of all HTTP methods.
data, err := bufconn.Peek(methodMaxLen)
if err != nil {
if listener.errorLogFunc != nil {
// Peek could fail legitimately when clients abruptly close
// connection. E.g. Chrome browser opens connections speculatively to
// speed up loading of a web page. Peek may also fail due to network
// saturation on a transport with read timeout set. All other kind of
// errors should be logged for further investigation. Thanks @brendanashworth.
if !isRoutineNetErr(err) {
listener.errorLogFunc(err,
"Error in reading from new connection %s at server %s",
bufconn.RemoteAddr(), bufconn.LocalAddr())
}
// Peek could fail legitimately when clients abruptly close
// connection. E.g. Chrome browser opens connections speculatively to
// speed up loading of a web page. Peek may also fail due to network
// saturation on a transport with read timeout set. All other kind of
// errors should be logged for further investigation. Thanks @brendanashworth.
if !isRoutineNetErr(err) {
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
bufconn.Close()
return
@ -172,12 +172,11 @@ func (listener *httpListener) start() {
if listener.tlsConfig != nil {
// As the listener is configured with TLS, try to do TLS handshake, drop the connection if it fails.
tlsConn := tls.Server(bufconn, listener.tlsConfig)
if err := tlsConn.Handshake(); err != nil {
if listener.errorLogFunc != nil {
listener.errorLogFunc(err,
"TLS handshake failed with new connection %s at server %s",
bufconn.RemoteAddr(), bufconn.LocalAddr())
}
if err = tlsConn.Handshake(); err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
bufconn.Close()
return
}
@ -187,12 +186,13 @@ func (listener *httpListener) start() {
listener.updateBytesReadFunc, listener.updateBytesWrittenFunc)
// Peek bytes of maximum length of all HTTP methods.
data, err := bufconn.Peek(methodMaxLen)
data, err = bufconn.Peek(methodMaxLen)
if err != nil {
if !isRoutineNetErr(err) && listener.errorLogFunc != nil {
listener.errorLogFunc(err,
"Error in reading from new TLS connection %s at server %s",
bufconn.RemoteAddr(), bufconn.LocalAddr())
if !isRoutineNetErr(err) {
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
bufconn.Close()
return
@ -205,12 +205,10 @@ func (listener *httpListener) start() {
return
}
}
if listener.errorLogFunc != nil {
listener.errorLogFunc(errors.New("junk message"),
"Received non-HTTP message from new connection %s at server %s",
bufconn.RemoteAddr(), bufconn.LocalAddr())
}
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
bufconn.Close()
return
@ -299,8 +297,7 @@ func newHTTPListener(serverAddrs []string,
readTimeout time.Duration,
writeTimeout time.Duration,
updateBytesReadFunc func(int),
updateBytesWrittenFunc func(int),
errorLogFunc func(error, string, ...interface{})) (listener *httpListener, err error) {
updateBytesWrittenFunc func(int)) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener
// Close all opened listeners on error
@ -337,7 +334,6 @@ func newHTTPListener(serverAddrs []string,
writeTimeout: writeTimeout,
updateBytesReadFunc: updateBytesReadFunc,
updateBytesWrittenFunc: updateBytesWrittenFunc,
errorLogFunc: errorLogFunc,
}
listener.start()

View file

@ -19,6 +19,7 @@ package http
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
@ -205,7 +206,7 @@ func TestNewHTTPListener(t *testing.T) {
writeTimeout time.Duration
updateBytesReadFunc func(int)
updateBytesWrittenFunc func(int)
errorLogFunc func(error, string, ...interface{})
errorLogFunc func(context.Context, error)
expectedErr error
}{
{[]string{"93.184.216.34:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)},
@ -227,7 +228,6 @@ func TestNewHTTPListener(t *testing.T) {
testCase.writeTimeout,
testCase.updateBytesReadFunc,
testCase.updateBytesWrittenFunc,
testCase.errorLogFunc,
)
if testCase.expectedErr == nil {
@ -279,7 +279,6 @@ func TestHTTPListenerStartClose(t *testing.T) {
time.Duration(0),
nil,
nil,
nil,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -327,7 +326,6 @@ func TestHTTPListenerAddr(t *testing.T) {
time.Duration(0),
nil,
nil,
nil,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -372,7 +370,6 @@ func TestHTTPListenerAddrs(t *testing.T) {
time.Duration(0),
nil,
nil,
nil,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -419,7 +416,6 @@ func TestHTTPListenerAccept(t *testing.T) {
time.Duration(0),
nil,
nil,
nil,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -480,11 +476,6 @@ func TestHTTPListenerAccept(t *testing.T) {
func TestHTTPListenerAcceptPeekError(t *testing.T) {
tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct {
serverAddrs []string
@ -504,7 +495,6 @@ func TestHTTPListenerAcceptPeekError(t *testing.T) {
time.Duration(0),
nil,
nil,
errorFunc,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -540,11 +530,6 @@ func TestHTTPListenerAcceptPeekError(t *testing.T) {
func TestHTTPListenerAcceptTLSError(t *testing.T) {
tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct {
serverAddrs []string
@ -563,7 +548,6 @@ func TestHTTPListenerAcceptTLSError(t *testing.T) {
time.Duration(0),
nil,
nil,
errorFunc,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -609,11 +593,6 @@ func TestHTTPListenerAcceptTLSError(t *testing.T) {
func TestHTTPListenerAcceptError(t *testing.T) {
tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct {
serverAddrs []string
@ -635,7 +614,6 @@ func TestHTTPListenerAcceptError(t *testing.T) {
time.Duration(0),
nil,
nil,
errorFunc,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -761,7 +739,6 @@ func TestHTTPListenerAcceptParallel(t *testing.T) {
time.Duration(0),
nil,
nil,
nil,
)
if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)

View file

@ -50,16 +50,15 @@ const (
// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling.
type Server struct {
http.Server
Addrs []string // addresses on which the server listens for new connection.
ShutdownTimeout time.Duration // timeout used for graceful server shutdown.
TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection.
UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn.
UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn.
ErrorLogFunc func(error, string, ...interface{}) // function to be called on errors.
listenerMutex *sync.Mutex // to guard 'listener' field.
listener *httpListener // HTTP listener for all 'Addrs' field.
inShutdown uint32 // indicates whether the server is in shutdown or not
requestCount int32 // counter holds no. of request in process.
Addrs []string // addresses on which the server listens for new connection.
ShutdownTimeout time.Duration // timeout used for graceful server shutdown.
TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection.
UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn.
UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn.
listenerMutex *sync.Mutex // to guard 'listener' field.
listener *httpListener // HTTP listener for all 'Addrs' field.
inShutdown uint32 // indicates whether the server is in shutdown or not
requestCount int32 // counter holds no. of request in process.
}
// Start - start HTTP server
@ -77,7 +76,6 @@ func (srv *Server) Start() (err error) {
tcpKeepAliveTimeout := srv.TCPKeepAliveTimeout
updateBytesReadFunc := srv.UpdateBytesReadFunc
updateBytesWrittenFunc := srv.UpdateBytesWrittenFunc
errorLogFunc := srv.ErrorLogFunc // if srv.ErrorLogFunc holds non-synced state -> possible data race
// Create new HTTP listener.
var listener *httpListener
@ -89,7 +87,6 @@ func (srv *Server) Start() (err error) {
writeTimeout,
updateBytesReadFunc,
updateBytesWrittenFunc,
errorLogFunc,
)
if err != nil {
return err

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"errors"
"fmt"
"net/http"
@ -24,6 +25,7 @@ import (
jwtgo "github.com/dgrijalva/jwt-go"
jwtreq "github.com/dgrijalva/jwt-go/request"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
)
@ -97,11 +99,11 @@ func isAuthTokenValid(tokenString string) bool {
var claims jwtgo.StandardClaims
jwtToken, err := jwtgo.ParseWithClaims(tokenString, &claims, keyFuncCallback)
if err != nil {
errorIf(err, "Unable to parse JWT token string")
logger.LogIf(context.Background(), err)
return false
}
if err = claims.Valid(); err != nil {
errorIf(err, "Invalid claims in JWT token string")
logger.LogIf(context.Background(), err)
return false
}
return jwtToken.Valid && claims.Subject == globalServerConfig.GetCredential().AccessKey

View file

@ -17,10 +17,11 @@
package cmd
import (
"context"
"fmt"
"time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
type statusType string
@ -112,29 +113,34 @@ func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) {
// Change the state of the lock from Blocked to Running.
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID string, readLock bool) error {
// This function is called outside nsLockMap.mutex.Lock(), so must be held explicitly.
ctx := context.Background()
n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock()
// Check whether the lock info entry for <volume, path> pair already exists.
_, ok := n.debugLockMap[param]
if !ok {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
}
// Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok {
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
}
// Check whether lockSource is same.
if lockInfo.lockSource != lockSource {
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
logger.LogIf(ctx, LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}
}
// Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus {
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
logger.LogIf(ctx, LockInfoStateNotBlocked{param.volume, param.path, opsID})
return LockInfoStateNotBlocked{param.volume, param.path, opsID}
}
// Change lock status to running and update the time.
n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock)
@ -182,24 +188,29 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string,
// Change the state of the lock from Blocked to none.
func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error {
_, ok := n.debugLockMap[param]
ctx := context.Background()
if !ok {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
}
// Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok {
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
}
// Check whether lockSource is same.
if lockInfo.lockSource != lockSource {
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
logger.LogIf(ctx, LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}
}
// Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus {
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
logger.LogIf(ctx, LockInfoStateNotBlocked{param.volume, param.path, opsID})
return LockInfoStateNotBlocked{param.volume, param.path, opsID}
}
// Update global lock stats.
@ -214,7 +225,8 @@ func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string,
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// delete the lock info for the given operation.
if _, found := n.debugLockMap[param]; !found {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
logger.LogIf(context.Background(), LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
}
// The following stats update is relevant only in case of a
@ -235,17 +247,20 @@ func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// Called when the nsLk ref count for the given (volume, path) is
// not 0.
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error {
ctx := context.Background()
// delete the lock info for the given operation.
infoMap, found := n.debugLockMap[param]
if !found {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
}
// The operation finished holding the lock on the resource, remove
// the entry for the given operation with the operation ID.
opsIDLock, foundInfo := infoMap.lockInfo[opsID]
if !foundInfo {
// Unlock request with invalid operation ID not accepted.
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
}
// Update global and (volume, path) lock status.
granted := opsIDLock.status == runningStatus

View file

@ -17,8 +17,11 @@
package cmd
import (
"context"
"errors"
"time"
"github.com/minio/minio/cmd/logger"
)
// Similar to removeEntry but only removes an entry only if the lock entry exists in map.
@ -29,7 +32,10 @@ func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Remove failed, in case it is a:
if nlrip.lri.writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
errorIf(errors.New(""), "Lock maintenance failed to remove entry for write lock (should never happen) %#v %#v %#v", nlrip.name, nlrip.lri.uid, lri)
reqInfo := (&logger.ReqInfo{}).AppendTags("name", nlrip.name)
reqInfo.AppendTags("uid", nlrip.lri.uid)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New("Lock maintenance failed to remove entry for write lock (should never happen)"))
} // Reader: this can happen if multiple read locks were active and
// the one we are looking for has been released concurrently (so it is fine).
} // Removal went okay, all is fine.

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"fmt"
"math/rand"
"sync"
@ -24,7 +25,7 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/dsync"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
const (
@ -98,7 +99,8 @@ func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error
func registerStorageLockers(mux *router.Router, lkSrv *lockServer) error {
lockRPCServer := newRPCServer()
if err := lockRPCServer.RegisterName(lockServiceName, lkSrv); err != nil {
return errors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(lockServicePath).Handler(lockRPCServer)

View file

@ -16,9 +16,13 @@
package logger
import "context"
import (
"context"
"fmt"
"sync"
)
// Key used for Get/SetContext
// Key used for Get/SetReqInfo
type contextKeyType string
const contextLogKey = contextKeyType("miniolog")
@ -37,27 +41,59 @@ type ReqInfo struct {
API string // API name - GetObject PutObject NewMultipartUpload etc.
BucketName string // Bucket name
ObjectName string // Object name
Tags []KeyVal // Any additional info not accommodated by above fields
tags []KeyVal // Any additional info not accommodated by above fields
sync.RWMutex
}
// AppendTags - appends key/val to ReqInfo.Tags
func (r *ReqInfo) AppendTags(key string, val string) {
// NewReqInfo :
func NewReqInfo(remoteHost, userAgent, requestID, api, bucket, object string) *ReqInfo {
req := ReqInfo{}
req.RemoteHost = remoteHost
req.UserAgent = userAgent
req.API = api
req.RequestID = requestID
req.BucketName = bucket
req.ObjectName = object
return &req
}
// AppendTags - appends key/val to ReqInfo.tags
func (r *ReqInfo) AppendTags(key string, val string) *ReqInfo {
if r == nil {
return
return nil
}
r.Tags = append(r.Tags, KeyVal{key, val})
r.Lock()
defer r.Unlock()
r.tags = append(r.tags, KeyVal{key, val})
return r
}
// SetContext sets ReqInfo in the context.
func SetContext(ctx context.Context, req *ReqInfo) context.Context {
// GetTags - returns the user defined tags
func (r *ReqInfo) GetTags() []KeyVal {
if r == nil {
return nil
}
r.RLock()
defer r.RUnlock()
return append([]KeyVal(nil), r.tags...)
}
// SetReqInfo sets ReqInfo in the context.
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
if ctx == nil {
LogIf(context.Background(), fmt.Errorf("context is nil"))
return nil
}
return context.WithValue(ctx, contextLogKey, req)
}
// GetContext returns ReqInfo if set.
func GetContext(ctx context.Context) *ReqInfo {
r, ok := ctx.Value(contextLogKey).(*ReqInfo)
if ok {
return r
// GetReqInfo returns ReqInfo if set.
func GetReqInfo(ctx context.Context) *ReqInfo {
if ctx != nil {
r, ok := ctx.Value(contextLogKey).(*ReqInfo)
if ok {
return r
}
}
return nil
}

View file

@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,9 +14,10 @@
* limitations under the License.
*/
package cmd
package logger
import (
"context"
"encoding/json"
"fmt"
"go/build"
@ -26,11 +27,17 @@ import (
"strings"
"time"
"github.com/fatih/color"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
)
var log = NewLogger()
// global colors.
var (
colorBold = color.New(color.Bold).SprintFunc()
colorYellow = color.New(color.FgYellow).SprintfFunc()
colorRed = color.New(color.FgRed).SprintfFunc()
)
var trimStrings []string
// Level type
@ -42,6 +49,15 @@ const (
Fatal
)
const loggerTimeFormat string = "15:04:05 MST 01/02/2006"
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
// add more here ..
}
func (level Level) String() string {
var lvlStr string
switch level {
@ -53,61 +69,78 @@ func (level Level) String() string {
return lvlStr
}
type traceEntry struct {
Message string `json:"message"`
Source []string `json:"source"`
Variables map[string]string `json:"variables,omitempty"`
}
type args struct {
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
type api struct {
Name string `json:"name,omitempty"`
Args args `json:"args,omitempty"`
}
type logEntry struct {
Level string `json:"level"`
Message string `json:"message"`
Time string `json:"time"`
Cause string `json:"cause"`
Trace []string `json:"trace"`
Level string `json:"level"`
Time string `json:"time"`
API api `json:"api,omitempty"`
RemoteHost string `json:"remotehost,omitempty"`
RequestID string `json:"requestID,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Cause string `json:"cause,omitempty"`
Trace traceEntry `json:"error"`
}
// Logger - for console messages
type Logger struct {
quiet bool
json bool
}
// NewLogger - to create a new Logger object
func NewLogger() *Logger {
return &Logger{}
}
// quiet: Hide startup messages if enabled
// jsonFlag: Display in JSON format, if enabled
var (
quiet, jsonFlag bool
)
// EnableQuiet - turns quiet option on.
func (log *Logger) EnableQuiet() {
log.quiet = true
func EnableQuiet() {
quiet = true
}
// EnableJSON - outputs logs in json format.
func (log *Logger) EnableJSON() {
log.json = true
log.quiet = true
func EnableJSON() {
jsonFlag = true
quiet = true
}
// Println - wrapper to console.Println() with quiet flag.
func (log *Logger) Println(args ...interface{}) {
if !log.quiet {
func Println(args ...interface{}) {
if !quiet {
console.Println(args...)
}
}
// Printf - wrapper to console.Printf() with quiet flag.
func (log *Logger) Printf(format string, args ...interface{}) {
if !log.quiet {
func Printf(format string, args ...interface{}) {
if !quiet {
console.Printf(format, args...)
}
}
func init() {
// Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string) {
var goPathList []string
var defaultgoPathList []string
// Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type
if runtime.GOOS == "windows" {
goPathList = strings.Split(GOPATH, ";")
goPathList = strings.Split(goPath, ";")
defaultgoPathList = strings.Split(build.Default.GOPATH, ";")
} else {
// All other types of OSs
goPathList = strings.Split(GOPATH, ":")
goPathList = strings.Split(goPath, ":")
defaultgoPathList = strings.Split(build.Default.GOPATH, ":")
}
@ -155,6 +188,13 @@ func getTrace(traceLevel int) []string {
// Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name {
return trace
}
}
}
traceLevel++
// Read stack trace information from PC
@ -165,21 +205,7 @@ func getTrace(traceLevel int) []string {
func logIf(level Level, err error, msg string,
data ...interface{}) {
isErrIgnored := func(err error) (ok bool) {
err = errors.Cause(err)
switch err.(type) {
case BucketNotFound, BucketNotEmpty, BucketExists:
ok = true
case ObjectNotFound, ObjectExistsAsDirectory:
ok = true
case BucketPolicyNotFound, InvalidUploadID:
ok = true
}
return ok
}
if err == nil || isErrIgnored(err) {
if err == nil {
return
}
// Get the cause for the Error
@ -187,17 +213,16 @@ func logIf(level Level, err error, msg string,
// Get full stack trace
trace := getTrace(3)
// Get time
timeOfError := UTCNow().Format(time.RFC3339Nano)
timeOfError := time.Now().UTC().Format(time.RFC3339Nano)
// Output the formatted log message at console
var output string
message := fmt.Sprintf(msg, data...)
if log.json {
if jsonFlag {
logJSON, err := json.Marshal(&logEntry{
Level: level.String(),
Message: message,
Time: timeOfError,
Cause: cause,
Trace: trace,
Level: level.String(),
Time: timeOfError,
Cause: cause,
Trace: traceEntry{Source: trace, Message: message},
})
if err != nil {
panic("json marshal of logEntry failed: " + err.Error())
@ -224,10 +249,102 @@ func logIf(level Level, err error, msg string,
}
}
func errorIf(err error, msg string, data ...interface{}) {
logIf(Error, err, msg, data...)
}
func fatalIf(err error, msg string, data ...interface{}) {
// FatalIf :
func FatalIf(err error, msg string, data ...interface{}) {
logIf(Fatal, err, msg, data...)
}
// LogIf :
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
req := GetReqInfo(ctx)
if req == nil {
req = &ReqInfo{API: "SYSTEM"}
}
API := "SYSTEM"
if req.API != "" {
API = req.API
}
tags := make(map[string]string)
for _, entry := range req.GetTags() {
tags[entry.Key] = entry.Val
}
// Get the cause for the Error
message := err.Error()
// Get full stack trace
trace := getTrace(2)
// Output the formatted log message at console
var output string
if jsonFlag {
logJSON, err := json.Marshal(&logEntry{
Level: Error.String(),
RemoteHost: req.RemoteHost,
RequestID: req.RequestID,
UserAgent: req.UserAgent,
Time: time.Now().UTC().Format(time.RFC3339Nano),
API: api{Name: API, Args: args{Bucket: req.BucketName, Object: req.ObjectName}},
Trace: traceEntry{Message: message, Source: trace, Variables: tags},
})
if err != nil {
panic(err)
}
output = string(logJSON)
} else {
// Add a sequence number and formatting for each stack trace
// No formatting is required for the first entry
for i, element := range trace {
trace[i] = fmt.Sprintf("%8v: %s", i+1, element)
}
tagString := ""
for key, value := range tags {
if value != "" {
if tagString != "" {
tagString += ", "
}
tagString += key + "=" + value
}
}
apiString := "API: " + API + "("
if req.BucketName != "" {
apiString = apiString + "bucket=" + req.BucketName
}
if req.ObjectName != "" {
apiString = apiString + ", object=" + req.ObjectName
}
apiString += ")"
timeString := "Time: " + time.Now().Format(loggerTimeFormat)
var requestID string
if req.RequestID != "" {
requestID = "\nRequestID: " + req.RequestID
}
var remoteHost string
if req.RemoteHost != "" {
remoteHost = "\nRemoteHost: " + req.RemoteHost
}
var userAgent string
if req.UserAgent != "" {
userAgent = "\nUserAgent: " + req.UserAgent
}
if len(tags) > 0 {
tagString = "\n " + tagString
}
output = fmt.Sprintf("\n%s\n%s%s%s%s\nError: %s%s\n%s",
apiString, timeString, requestID, remoteHost, userAgent,
colorRed(colorBold(message)), tagString, strings.Join(trace, "\n"))
}
fmt.Println(output)
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"errors"
pathutil "path"
"runtime"
@ -29,6 +30,7 @@ import (
"github.com/minio/dsync"
"github.com/minio/lsync"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
)
// Global name space lock.
@ -170,9 +172,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
// pair of <volume, path> and <OperationID> till the lock
// unblocks. The lock for accessing `globalNSMutex` is held inside
// the function itself.
if err := n.statusNoneToBlocked(param, lockSource, opsID, readLock); err != nil {
errorIf(err, fmt.Sprintf("Failed to set lock state to blocked (param = %v; opsID = %s)", param, opsID))
}
n.statusNoneToBlocked(param, lockSource, opsID, readLock)
// Unlock map before Locking NS which might block.
n.lockMapMutex.Unlock()
@ -188,26 +188,19 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock()
// Changing the status of the operation from blocked to none
if err := n.statusBlockedToNone(param, lockSource, opsID, readLock); err != nil {
errorIf(err, fmt.Sprintf("Failed to clear the lock state (param = %v; opsID = %s)", param, opsID))
}
n.statusBlockedToNone(param, lockSource, opsID, readLock)
nsLk.ref-- // Decrement ref count since we failed to get the lock
// delete the lock state entry for given operation ID.
err := n.deleteLockInfoEntryForOps(param, opsID)
if err != nil {
errorIf(err, fmt.Sprintf("Failed to delete lock info entry (param = %v; opsID = %s)", param, opsID))
}
n.deleteLockInfoEntryForOps(param, opsID)
if nsLk.ref == 0 {
// Remove from the map if there are no more references.
delete(n.lockMap, param)
// delete the lock state entry for given
// <volume, path> pair.
err := n.deleteLockInfoEntryForVolumePath(param)
if err != nil {
errorIf(err, fmt.Sprintf("Failed to delete lock info entry (param = %v)", param))
}
n.deleteLockInfoEntryForVolumePath(param)
}
return
}
@ -215,9 +208,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
// Changing the status of the operation from blocked to
// running. change the state of the lock to be running (from
// blocked) for the given pair of <volume, path> and <OperationID>.
if err := n.statusBlockedToRunning(param, lockSource, opsID, readLock); err != nil {
errorIf(err, "Failed to set the lock state to running")
}
n.statusBlockedToRunning(param, lockSource, opsID, readLock)
return
}
@ -236,17 +227,13 @@ func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
nsLk.Unlock()
}
if nsLk.ref == 0 {
errorIf(errors.New("Namespace reference count cannot be 0"),
"Invalid reference count detected")
logger.LogIf(context.Background(), errors.New("Namespace reference count cannot be 0"))
}
if nsLk.ref != 0 {
nsLk.ref--
// delete the lock state entry for given operation ID.
err := n.deleteLockInfoEntryForOps(param, opsID)
if err != nil {
errorIf(err, "Failed to delete lock info entry")
}
n.deleteLockInfoEntryForOps(param, opsID)
}
if nsLk.ref == 0 {
// Remove from the map if there are no more references.
@ -254,10 +241,7 @@ func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
// delete the lock state entry for given
// <volume, path> pair.
err := n.deleteLockInfoEntryForVolumePath(param)
if err != nil {
errorIf(err, "Failed to delete lock info entry")
}
n.deleteLockInfoEntryForVolumePath(param)
}
}
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"errors"
"fmt"
"net"
@ -30,6 +31,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
)
// IPv4 addresses of local host.
@ -38,7 +40,7 @@ var localIP4 = mustGetLocalIP4()
// mustSplitHostPort is a wrapper to net.SplitHostPort() where error is assumed to be a fatal.
func mustSplitHostPort(hostPort string) (host, port string) {
host, port, err := net.SplitHostPort(hostPort)
fatalIf(err, "Unable to split host port %s", hostPort)
logger.FatalIf(err, "Unable to split host port %s", hostPort)
return host, port
}
@ -46,7 +48,7 @@ func mustSplitHostPort(hostPort string) (host, port string) {
func mustGetLocalIP4() (ipList set.StringSet) {
ipList = set.NewStringSet()
addrs, err := net.InterfaceAddrs()
fatalIf(err, "Unable to get IP addresses of this host.")
logger.FatalIf(err, "Unable to get IP addresses of this host.")
for _, addr := range addrs {
var ip net.IP
@ -98,8 +100,10 @@ func getHostIP4(host string) (ipList set.StringSet, err error) {
if timeElapsed > time.Second {
// log the message to console about the host not being
// resolveable.
errorIf(err, "Unable to resolve host %s (%s)", host,
humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", ""))
reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
reqInfo.AppendTags("elapsedTime", humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", ""))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
}
}

View file

@ -26,6 +26,7 @@ import (
"path"
"sync"
"github.com/minio/minio/cmd/logger"
xerrors "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
@ -162,7 +163,7 @@ func (sys *NotificationSys) RemoteTargetExist(bucketName string, targetID event.
}
// initListeners - initializes PeerRPC clients available in listener.json.
func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string) error {
func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLayer, bucketName string) error {
// listener.json is available/applicable only in DistXL mode.
if !globalIsDistXL {
return nil
@ -181,7 +182,7 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
}
defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile)
reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err
}
@ -189,8 +190,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
listenerList := []ListenBucketNotificationArgs{}
if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.")
return xerrors.Trace(err)
logger.LogIf(ctx, err)
return err
}
}
@ -203,7 +204,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
for _, args := range listenerList {
var found bool
if found, err = isLocalHost(args.Addr.Name); err != nil {
errorIf(err, "unable to check address %v is local host", args.Addr)
logger.GetReqInfo(ctx).AppendTags("host", args.Addr.Name)
logger.LogIf(ctx, err)
return err
}
if found {
@ -218,6 +220,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
var exist bool
if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", args.TargetID.Name)
logger.LogIf(ctx, err)
return err
}
if !exist {
@ -228,6 +232,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient)
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", target.id.Name)
logger.LogIf(ctx, err)
return err
}
activeListenerList = append(activeListenerList, args)
@ -235,6 +241,7 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
data, err := json.Marshal(activeListenerList)
if err != nil {
logger.LogIf(ctx, err)
return err
}
@ -253,18 +260,17 @@ func (sys *NotificationSys) Init(objAPI ObjectLayer) error {
}
for _, bucket := range buckets {
config, err := readNotificationConfig(objAPI, bucket.Name)
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucket.Name})
config, err := readNotificationConfig(ctx, objAPI, bucket.Name)
if err != nil {
if !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
errorIf(err, "Unable to load notification configuration of bucket %v", bucket.Name)
return err
}
} else {
sys.AddRulesMap(bucket.Name, config.ToRulesMap())
}
if err = sys.initListeners(objAPI, bucket.Name); err != nil {
errorIf(err, "Unable to initialize HTTP listener for bucket %v", bucket.Name)
if err = sys.initListeners(ctx, objAPI, bucket.Name); err != nil {
return err
}
}
@ -325,7 +331,9 @@ func (sys *NotificationSys) RemoveAllRemoteTargets() {
// RemoveRemoteTarget - closes and removes target by target ID.
func (sys *NotificationSys) RemoveRemoteTarget(bucketName string, targetID event.TargetID) {
for id, err := range sys.targetList.Remove(targetID) {
errorIf(err, "unable to close target ID %v", id)
reqInfo := (&logger.ReqInfo{}).AppendTags("targetID", id.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
sys.Lock()
@ -457,8 +465,11 @@ func sendEvent(args eventArgs) {
}
for targetID, err := range globalNotificationSys.Send(args) {
errorIf(err, "unable to send event %v of bucket: %v, object: %v to target %v",
args.EventName, args.BucketName, args.Object.Name, targetID)
reqInfo := &logger.ReqInfo{BucketName: args.BucketName, ObjectName: args.Object.Name}
reqInfo.AppendTags("EventName", args.EventName.String())
reqInfo.AppendTags("targetID", targetID.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
}
@ -472,36 +483,39 @@ func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
return err
}
func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
err := objAPI.GetObject(context.Background(), minioMetaBucket, configFile, 0, -1, &buffer, "")
err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "")
if err != nil {
// Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, xerrors.Trace(errNoSuchNotifications)
return nil, errNoSuchNotifications
}
errorIf(err, "Unable to read file %v", configFile)
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
// Return NoSuchNotifications on empty content.
if buffer.Len() == 0 {
return nil, xerrors.Trace(errNoSuchNotifications)
return nil, errNoSuchNotifications
}
return &buffer, nil
}
func readNotificationConfig(objAPI ObjectLayer, bucketName string) (*event.Config, error) {
func readNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) (*event.Config, error) {
// Construct path to notification.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
reader, err := readConfig(objAPI, configFile)
reader, err := readConfig(ctx, objAPI, configFile)
if err != nil {
return nil, err
}
return event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList)
config, err := event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList)
logger.LogIf(ctx, err)
return config, err
}
func saveNotificationConfig(objAPI ObjectLayer, bucketName string, config *event.Config) error {
@ -521,6 +535,8 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
return nil
}
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucketName})
// Construct path to listener.json for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
transactionConfigFile := configFile + ".transaction"
@ -534,7 +550,7 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
}
defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile)
reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err
}
@ -542,8 +558,8 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
listenerList := []ListenBucketNotificationArgs{}
if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.")
return xerrors.Trace(err)
logger.LogIf(ctx, err)
return err
}
}
@ -556,6 +572,7 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
data, err := json.Marshal(listenerList)
if err != nil {
logger.LogIf(ctx, err)
return err
}
@ -569,6 +586,8 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
return nil
}
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucketName})
// Construct path to listener.json for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
transactionConfigFile := configFile + ".transaction"
@ -582,7 +601,7 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
}
defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile)
reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err
}
@ -590,8 +609,8 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
listenerList := []ListenBucketNotificationArgs{}
if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.")
return xerrors.Trace(err)
logger.LogIf(ctx, err)
return err
}
}
@ -612,6 +631,7 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
data, err := json.Marshal(activeListenerList)
if err != nil {
logger.LogIf(ctx, err)
return err
}

View file

@ -22,7 +22,7 @@ import (
"sync"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
const (
@ -83,15 +83,15 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string
}
}
func deleteBucketMetadata(bucket string, objAPI ObjectLayer) {
func deleteBucketMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) {
// Delete bucket access policy, if present - ignore any errors.
_ = removeBucketPolicy(bucket, objAPI)
removeBucketPolicy(ctx, bucket, objAPI)
// Delete notification config, if present - ignore any errors.
_ = removeNotificationConfig(objAPI, bucket)
removeNotificationConfig(ctx, objAPI, bucket)
// Delete listener config, if present - ignore any errors.
_ = removeListenerConfig(objAPI, bucket)
removeListenerConfig(ctx, objAPI, bucket)
}
// Depending on the disk type network or local, initialize storage API.
@ -104,13 +104,15 @@ func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
}
// Cleanup a directory recursively.
func cleanupDir(storage StorageAPI, volume, dirPath string) error {
func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) error {
var delFunc func(string) error
// Function to delete entries recursively.
delFunc = func(entryPath string) error {
if !hasSuffix(entryPath, slashSeparator) {
// Delete the file entry.
return errors.Trace(storage.DeleteFile(volume, entryPath))
err := storage.DeleteFile(volume, entryPath)
logger.LogIf(ctx, err)
return err
}
// If it's a directory, list and call delFunc() for each entry.
@ -119,12 +121,15 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
if err == errFileNotFound {
return nil
} else if err != nil { // For any other errors fail.
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
} // else on success..
// Entry path is empty, just delete it.
if len(entries) == 0 {
return errors.Trace(storage.DeleteFile(volume, path.Clean(entryPath)))
err = storage.DeleteFile(volume, path.Clean(entryPath))
logger.LogIf(ctx, err)
return err
}
// Recurse and delete all other entries.
@ -140,21 +145,19 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
}
// Removes notification.xml for a given bucket, only used during DeleteBucket.
func removeNotificationConfig(objAPI ObjectLayer, bucket string) error {
func removeNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
// Verify bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
return objAPI.DeleteObject(context.Background(), minioMetaBucket, ncPath)
return objAPI.DeleteObject(ctx, minioMetaBucket, ncPath)
}
// Remove listener configuration from storage layer. Used when a bucket is deleted.
func removeListenerConfig(objAPI ObjectLayer, bucket string) error {
func removeListenerConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
// make the path
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
return objAPI.DeleteObject(context.Background(), minioMetaBucket, lcPath)
return objAPI.DeleteObject(ctx, minioMetaBucket, lcPath)
}

View file

@ -19,165 +19,200 @@ package cmd
import (
"context"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid"
)
// Checks on GetObject arguments, bucket and object.
func checkGetObjArgs(bucket, object string) error {
return checkBucketAndObjectNames(bucket, object)
func checkGetObjArgs(ctx context.Context, bucket, object string) error {
return checkBucketAndObjectNames(ctx, bucket, object)
}
// Checks on DeleteObject arguments, bucket and object.
func checkDelObjArgs(bucket, object string) error {
return checkBucketAndObjectNames(bucket, object)
func checkDelObjArgs(ctx context.Context, bucket, object string) error {
return checkBucketAndObjectNames(ctx, bucket, object)
}
// Checks bucket and object name validity, returns nil if both are valid.
func checkBucketAndObjectNames(bucket, object string) error {
func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return errors.Trace(BucketNameInvalid{Bucket: bucket})
logger.LogIf(ctx, BucketNameInvalid{Bucket: bucket})
return BucketNameInvalid{Bucket: bucket}
}
// Verify if object is valid.
if len(object) == 0 {
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
logger.LogIf(ctx, ObjectNameInvalid{Bucket: bucket, Object: object})
return ObjectNameInvalid{Bucket: bucket, Object: object}
}
if !IsValidObjectPrefix(object) {
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
logger.LogIf(ctx, ObjectNameInvalid{Bucket: bucket, Object: object})
return ObjectNameInvalid{Bucket: bucket, Object: object}
}
return nil
}
// Checks for all ListObjects arguments validity.
func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer) error {
func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is
// important here bucket does not exist error should
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return errors.Trace(err)
if err := checkBucketExist(ctx, bucket, obj); err != nil {
return err
}
// Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) {
return errors.Trace(ObjectNameInvalid{
logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
})
return ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
}
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return errors.Trace(UnsupportedDelimiter{
logger.LogIf(ctx, UnsupportedDelimiter{
Delimiter: delimiter,
})
return UnsupportedDelimiter{
Delimiter: delimiter,
}
}
// Verify if marker has prefix.
if marker != "" && !hasPrefix(marker, prefix) {
return errors.Trace(InvalidMarkerPrefixCombination{
logger.LogIf(ctx, InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
})
return InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
}
}
return nil
}
// Checks for all ListMultipartUploads arguments validity.
func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error {
if err := checkListObjsArgs(bucket, prefix, keyMarker, delimiter, obj); err != nil {
func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error {
if err := checkListObjsArgs(ctx, bucket, prefix, keyMarker, delimiter, obj); err != nil {
return err
}
if uploadIDMarker != "" {
if hasSuffix(keyMarker, slashSeparator) {
return errors.Trace(InvalidUploadIDKeyCombination{
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
return InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
}
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return errors.Trace(err)
logger.LogIf(ctx, err)
return err
}
if id.IsZero() {
return errors.Trace(MalformedUploadID{
logger.LogIf(ctx, MalformedUploadID{
UploadID: uploadIDMarker,
})
return MalformedUploadID{
UploadID: uploadIDMarker,
}
}
}
return nil
}
// Checks for NewMultipartUpload arguments validity, also validates if bucket exists.
func checkNewMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj)
func checkNewMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj)
}
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
func checkPutObjectPartArgs(bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj)
func checkPutObjectPartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj)
}
// Checks for ListParts arguments validity, also validates if bucket exists.
func checkListPartsArgs(bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj)
func checkListPartsArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj)
}
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
func checkCompleteMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj)
func checkCompleteMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj)
}
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
func checkAbortMultipartArgs(bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj)
func checkAbortMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj)
}
// Checks Object arguments validity, also validates if bucket exists.
func checkObjectArgs(bucket, object string, obj ObjectLayer) error {
func checkObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is
// important here bucket does not exist error should
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return errors.Trace(err)
if err := checkBucketExist(ctx, bucket, obj); err != nil {
return err
}
// Validates object name validity after bucket exists.
if !IsValidObjectName(object) {
return errors.Trace(ObjectNameInvalid{
logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
return ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
}
return nil
}
// Checks for PutObject arguments validity, also validates if bucket exists.
func checkPutObjectArgs(bucket, object string, obj ObjectLayer, size int64) error {
func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer, size int64) error {
// Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is
// important here bucket does not exist error should
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil {
return errors.Trace(err)
if err := checkBucketExist(ctx, bucket, obj); err != nil {
return err
}
if len(object) == 0 ||
hasPrefix(object, slashSeparator) ||
(hasSuffix(object, slashSeparator) && size != 0) ||
!IsValidObjectPrefix(object) {
return errors.Trace(ObjectNameInvalid{
logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
return ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
}
return nil
}
// Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(bucket string, obj ObjectLayer) error {
_, err := obj.GetBucketInfo(context.Background(), bucket)
func checkBucketExist(ctx context.Context, bucket string, obj ObjectLayer) error {
_, err := obj.GetBucketInfo(ctx, bucket)
if err != nil {
return errors.Cause(err)
}

View file

@ -1847,7 +1847,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
},
},
}
s3MD5, err := getCompleteMultipartMD5(inputParts[3].parts)
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
if err != nil {
t.Fatalf("Obtaining S3MD5 failed")
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"encoding/hex"
"fmt"
"path"
@ -24,7 +25,7 @@ import (
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
"github.com/skyrings/skyring-common/tools/uuid"
)
@ -174,12 +175,13 @@ func mustGetUUID() string {
}
// Create an s3 compatible MD5sum for complete multipart transaction.
func getCompleteMultipartMD5(parts []CompletePart) (string, error) {
func getCompleteMultipartMD5(ctx context.Context, parts []CompletePart) (string, error) {
var finalMD5Bytes []byte
for _, part := range parts {
md5Bytes, err := hex.DecodeString(part.ETag)
if err != nil {
return "", errors.Trace(err)
logger.LogIf(ctx, err)
return "", err
}
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"reflect"
"testing"
)
@ -148,7 +149,7 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
}
for i, test := range testCases {
result, err := getCompleteMultipartMD5(test.parts)
result, err := getCompleteMultipartMD5(context.Background(), test.parts)
if result != test.expectedResult {
t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result)
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"crypto/hmac"
"encoding/binary"
"encoding/hex"
@ -31,6 +32,7 @@ import (
"strconv"
mux "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
@ -63,12 +65,12 @@ func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
// this is in keeping with the permissions sections of the docs of both:
// HEAD Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
// GET Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
func errAllowableObjectNotFound(bucket string, r *http.Request) APIErrorCode {
func errAllowableObjectNotFound(ctx context.Context, bucket string, r *http.Request) APIErrorCode {
if getRequestAuthType(r) == authTypeAnonymous {
// We care about the bucket as a whole, not a particular resource.
resource := "/" + bucket
sourceIP := handlers.GetSourceIP(r)
if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", resource,
if s3Error := enforceBucketPolicy(ctx, bucket, "s3:ListBucket", resource,
r.Referer(), sourceIP, r.URL.Query()); s3Error != ErrNone {
return ErrAccessDenied
}
@ -95,7 +97,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -109,7 +111,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
if err != nil {
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r)
apiErr = errAllowableObjectNotFound(ctx, bucket, r)
}
writeErrorResponse(w, apiErr, r.URL)
return
@ -135,7 +137,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
}
// log the error.
errorIf(err, "Invalid request range")
logger.LogIf(ctx, err)
}
}
@ -182,7 +184,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Reads the object at startOffset and writes to mw.
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
errorIf(err, "Unable to write to client.")
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
}
@ -232,7 +233,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error)
return
}
@ -246,7 +247,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
if err != nil {
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r)
apiErr = errAllowableObjectNotFound(ctx, bucket, r)
}
writeErrorResponseHeadersOnly(w, apiErr)
return
@ -300,7 +301,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
// Extract metadata relevant for an CopyObject operation based on conditional
// header values specified in X-Amz-Metadata-Directive.
func getCpObjMetadataFromHeader(header http.Header, userMeta map[string]string) (map[string]string, error) {
func getCpObjMetadataFromHeader(ctx context.Context, header http.Header, userMeta map[string]string) (map[string]string, error) {
// Make a copy of the supplied metadata to avoid
// to change the original one.
defaultMeta := make(map[string]string, len(userMeta))
@ -311,7 +312,7 @@ func getCpObjMetadataFromHeader(header http.Header, userMeta map[string]string)
// if x-amz-metadata-directive says REPLACE then
// we extract metadata from the input headers.
if isMetadataReplace(header) {
return extractMetadataFromHeader(header)
return extractMetadataFromHeader(ctx, header)
}
// if x-amz-metadata-directive says COPY then we
@ -340,7 +341,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return
}
if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -501,10 +502,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
}
srcInfo.Writer = writer
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(r.Header, srcInfo.UserDefined)
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r.Header, srcInfo.UserDefined)
if err != nil {
pipeWriter.CloseWithError(err)
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@ -628,9 +628,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
}
// Extract metadata to be saved from incoming HTTP header.
metadata, err := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(ctx, r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@ -670,7 +669,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
sourceIP := handlers.GetSourceIP(r)
if s3Err = enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path, r.Referer(), sourceIP, r.URL.Query()); s3Err != ErrNone {
if s3Err = enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path, r.Referer(), sourceIP, r.URL.Query()); s3Err != ErrNone {
writeErrorResponse(w, s3Err, r.URL)
return
}
@ -782,7 +781,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -825,9 +825,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
}
// Extract metadata that needs to be saved.
metadata, err := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(ctx, r.Header)
if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@ -869,7 +868,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return
}
if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -931,7 +930,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
if hrange, err = parseCopyPartRange(rangeHeader, srcInfo.Size); err != nil {
// Handle only errInvalidRange
// Ignore other parse error and treat it as regular Get request like Amazon S3.
errorIf(err, "Unable to extract range %s", rangeHeader)
logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
logger.LogIf(ctx, err)
writeCopyPartErr(w, err, r.URL)
return
}
@ -1135,7 +1135,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path,
if s3Error := enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path,
r.Referer(), handlers.GetSourceIP(r), r.URL.Query()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
@ -1263,7 +1263,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
if api.CacheAPI() != nil {
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
}
if s3Error := checkRequestAuthType(r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -1278,7 +1278,6 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
uploadID, _, _, _ := getObjectResources(r.URL.Query())
if err := abortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
errorIf(err, "AbortMultipartUpload failed")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -1299,7 +1298,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListMultipartUploadParts", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListMultipartUploadParts", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -1339,7 +1338,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -1449,7 +1448,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
@ -1466,8 +1465,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
// Ignore delete object errors while replying to client, since we are
// suppposed to reply only 204. Additionally log the error for
// investigation.
if err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {
errorIf(err, "Unable to delete an object %s", pathJoin(bucket, object))
}
deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)
writeSuccessNoContent(w)
}

View file

@ -2206,7 +2206,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
}
// on successful complete multipart operation the s3MD5 for the parts uploaded will be returned.
s3MD5, err := getCompleteMultipartMD5(inputParts[3].parts)
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
if err != nil {
t.Fatalf("Obtaining S3MD5 failed")
}

View file

@ -22,7 +22,7 @@ import (
"path"
"github.com/gorilla/mux"
xerrors "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
xnet "github.com/minio/minio/pkg/net"
)
@ -103,7 +103,10 @@ func (receiver *PeerRPCReceiver) ListenBucketNotification(args *ListenBucketNoti
target := NewPeerRPCClientTarget(args.BucketName, args.TargetID, rpcClient)
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil {
errorIf(err, "Unable to add PeerRPCClientTarget %v to globalNotificationSys.targetList.", target)
reqInfo := &logger.ReqInfo{BucketName: target.bucketName}
reqInfo.AppendTags("target", target.id.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
}
return nil
@ -158,7 +161,10 @@ func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEvent
}
if err != nil {
errorIf(err, "unable to send event %v to target %v", args.Event, args.TargetID)
reqInfo := (&logger.ReqInfo{}).AppendTags("Event", args.Event.EventName.String())
reqInfo.AppendTags("target", args.TargetID.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
reply.Error = err
@ -169,7 +175,8 @@ func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEvent
func registerS3PeerRPCRouter(router *mux.Router) error {
peerRPCServer := newRPCServer()
if err := peerRPCServer.RegisterName("Peer", &PeerRPCReceiver{}); err != nil {
return xerrors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
subrouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
@ -250,7 +257,11 @@ func (rpcClient *PeerRPCClient) SendEvent(bucketName string, targetID, remoteTar
}
if reply.Error != nil {
errorIf(reply.Error, "unable to send event %v to rpc target %v of bucket %v", args, targetID, bucketName)
reqInfo := &logger.ReqInfo{BucketName: bucketName}
reqInfo.AppendTags("targetID", targetID.Name)
reqInfo.AppendTags("event", eventData.EventName.String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, reply.Error)
globalNotificationSys.RemoveRemoteTarget(bucketName, targetID)
}

View file

@ -19,10 +19,13 @@
package cmd
import (
"context"
"io"
"os"
"path"
"strings"
"github.com/minio/minio/cmd/logger"
)
// Return all the entries at the directory dirPath.
@ -57,7 +60,9 @@ func readDir(dirPath string) (entries []string, err error) {
var st os.FileInfo
st, err = os.Stat((path.Join(dirPath, fi.Name())))
if err != nil {
errorIf(err, "Unable to stat path %s", path.Join(dirPath, fi.Name()))
reqInfo := (&logger.ReqInfo{}).AppendTags("path", path.Join(dirPath, fi.Name()))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
continue
}
// Append to entries if symbolic link exists and is valid.

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"encoding/hex"
"io"
"io/ioutil"
@ -30,6 +31,7 @@ import (
"syscall"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
)
@ -75,7 +77,7 @@ func isDirEmpty(dirname string) bool {
f, err := os.Open((dirname))
if err != nil {
if !os.IsNotExist(err) {
errorIf(err, "Unable to access directory")
logger.LogIf(context.Background(), err)
}
return false
@ -85,7 +87,7 @@ func isDirEmpty(dirname string) bool {
_, err = f.Readdirnames(1)
if err != io.EOF {
if !os.IsNotExist(err) {
errorIf(err, "Unable to list directory")
logger.LogIf(context.Background(), err)
}
return false

View file

@ -17,11 +17,13 @@
package cmd
import (
"context"
"fmt"
"os"
"time"
"github.com/minio/mc/pkg/console"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
)
@ -29,19 +31,21 @@ var printEndpointError = func() func(Endpoint, error) {
printOnce := make(map[Endpoint]map[string]bool)
return func(endpoint Endpoint, err error) {
reqInfo := (&logger.ReqInfo{}).AppendTags("endpoint", endpoint.Host)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
m, ok := printOnce[endpoint]
if !ok {
m = make(map[string]bool)
m[err.Error()] = true
printOnce[endpoint] = m
errorIf(err, "%s: %s", endpoint, err)
logger.LogIf(ctx, err)
return
}
if m[err.Error()] {
return
}
m[err.Error()] = true
errorIf(err, "%s: %s", endpoint, err)
logger.LogIf(ctx, err)
}
}()
@ -147,7 +151,7 @@ func connectLoadInitFormats(firstDisk bool, endpoints EndpointList, setCount, dr
if !firstDisk {
return nil, errNotFirstDisk
}
return initFormatXL(storageDisks, setCount, drivesPerSet)
return initFormatXL(context.Background(), storageDisks, setCount, drivesPerSet)
}
// Following function is added to fix a regressions which was introduced
@ -178,7 +182,7 @@ func connectLoadInitFormats(firstDisk bool, endpoints EndpointList, setCount, dr
}
// Format disks before initialization of object layer.
func waitForFormatXL(firstDisk bool, endpoints EndpointList, setCount, disksPerSet int) (format *formatXLV3, err error) {
func waitForFormatXL(ctx context.Context, firstDisk bool, endpoints EndpointList, setCount, disksPerSet int) (format *formatXLV3, err error) {
if len(endpoints) == 0 || setCount == 0 || disksPerSet == 0 {
return nil, errInvalidArgument
}

View file

@ -17,11 +17,13 @@
package cmd
import (
"context"
"io"
"net/http"
"net/rpc"
miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
)
// ServeHTTP implements an http.Handler that answers RPC requests,
@ -34,7 +36,9 @@ func (server *rpcServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
errorIf(err, "rpc hijacking failed for: %s", req.RemoteAddr)
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteaddr", req.RemoteAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return
}

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"net/http"
"os"
"os/signal"
@ -26,7 +27,7 @@ import (
"github.com/minio/cli"
"github.com/minio/dsync"
miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
var serverFlags = []cli.Flag{
@ -128,17 +129,17 @@ func serverHandleCmdArgs(ctx *cli.Context) {
// Server address.
serverAddr := ctx.String("address")
fatalIf(CheckLocalServerAddr(serverAddr), "Invalid address %s in command line argument.", serverAddr)
logger.FatalIf(CheckLocalServerAddr(serverAddr), "Invalid address %s in command line argument.", serverAddr)
var setupType SetupType
var err error
if len(ctx.Args()) > serverCommandLineArgsMax {
fatalIf(errInvalidArgument, "Invalid total number of arguments (%d) passed, supported upto 32 unique arguments", len(ctx.Args()))
logger.FatalIf(errInvalidArgument, "Invalid total number of arguments (%d) passed, supported upto 32 unique arguments", len(ctx.Args()))
}
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(serverAddr, ctx.Args()...)
fatalIf(err, "Invalid command line arguments server=%s, args=%s", serverAddr, ctx.Args())
logger.FatalIf(err, "Invalid command line arguments server=%s, args=%s", serverAddr, ctx.Args())
globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr)
if runtime.GOOS == "darwin" {
@ -146,7 +147,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error sutiation we check for port availability only for macOS.
fatalIf(checkPortAvailability(globalMinioPort), "Port %d already in use", globalMinioPort)
logger.FatalIf(checkPortAvailability(globalMinioPort), "Port %d already in use", globalMinioPort)
}
globalIsXL = (setupType == XLSetupType)
@ -168,6 +169,10 @@ func serverHandleEnvVars() {
}
func init() {
logger.Init(GOPATH)
}
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
if (!ctx.IsSet("sets") && !ctx.Args().Present()) || ctx.Args().First() == "help" {
@ -178,13 +183,13 @@ func serverMain(ctx *cli.Context) {
// enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag {
log.EnableJSON()
logger.EnableJSON()
}
// Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag {
log.EnableQuiet()
logger.EnableQuiet()
}
// Handle all server command args.
@ -194,22 +199,19 @@ func serverMain(ctx *cli.Context) {
serverHandleEnvVars()
// Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.")
logger.FatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize server config.
initConfig()
// Init the error tracing module.
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates.
var err error
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file")
logger.FatalIf(err, "Invalid SSL certificate file")
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistXL && globalEndpoints.IsHTTPS() && !globalIsSSL {
fatalIf(errInvalidArgument, "No certificates found for HTTPS endpoints (%s)", globalEndpoints)
logger.FatalIf(errInvalidArgument, "No certificates found for HTTPS endpoints (%s)", globalEndpoints)
}
if !quietFlag {
@ -224,12 +226,12 @@ func serverMain(ctx *cli.Context) {
}
// Set system resources to maximum.
errorIf(setMaxResources(), "Unable to change resource limit")
logger.LogIf(context.Background(), setMaxResources())
// Set nodes for dsync for distributed setup.
if globalIsDistXL {
globalDsync, err = dsync.New(newDsyncNodes(globalEndpoints))
fatalIf(err, "Unable to initialize distributed locking on %s", globalEndpoints)
logger.FatalIf(err, "Unable to initialize distributed locking on %s", globalEndpoints)
}
// Initialize name space lock.
@ -241,11 +243,11 @@ func serverMain(ctx *cli.Context) {
// Configure server.
var handler http.Handler
handler, err = configureServerHandler(globalEndpoints)
fatalIf(err, "Unable to configure one of server's RPC services.")
logger.FatalIf(err, "Unable to configure one of server's RPC services.")
// Initialize notification system.
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
fatalIf(err, "Unable to initialize notification system.")
logger.FatalIf(err, "Unable to initialize notification system.")
// Initialize Admin Peers inter-node communication only in distributed setup.
initGlobalAdminPeers(globalEndpoints)
@ -255,7 +257,6 @@ func serverMain(ctx *cli.Context) {
globalHTTPServer.WriteTimeout = globalConnWriteTimeout
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
globalHTTPServer.ErrorLogFunc = errorIf
go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start()
}()
@ -264,9 +265,9 @@ func serverMain(ctx *cli.Context) {
newObject, err := newObjectLayer(globalEndpoints)
if err != nil {
errorIf(err, "Initializing object layer failed")
logger.LogIf(context.Background(), err)
err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
logger.LogIf(context.Background(), err)
os.Exit(1)
}
@ -294,7 +295,7 @@ func newObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err error) {
return NewFSObjectLayer(endpoints[0].Path)
}
format, err := waitForFormatXL(endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount)
format, err := waitForFormatXL(context.Background(), endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount)
if err != nil {
return nil, err
}

View file

@ -25,6 +25,7 @@ import (
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
)
// Documentation links, these are part of message printing code.
@ -114,17 +115,17 @@ func printServerCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print.
log.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
logger.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
logger.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
logger.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" {
log.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
logger.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
}
printEventNotifiers()
if globalIsBrowserEnabled {
log.Println(colorBlue("\nBrowser Access:"))
log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
logger.Println(colorBlue("\nBrowser Access:"))
logger.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
}
}
@ -140,7 +141,7 @@ func printEventNotifiers() {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
}
log.Println(arnMsg)
logger.Println(arnMsg)
}
// Prints startup message for command line access. Prints link to our documentation
@ -150,24 +151,24 @@ func printCLIAccessMsg(endPoint string, alias string) {
cred := globalServerConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client.
log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
logger.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
logger.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else {
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
logger.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
// Prints startup message for Object API acces, prints link to our SDK documentation.
func printObjectAPIMsg() {
log.Println(colorBlue("\nObject API (Amazon S3 compatible):"))
log.Println(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
log.Println(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
log.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
log.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide)
log.Println(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
logger.Println(colorBlue("\nObject API (Amazon S3 compatible):"))
logger.Println(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
logger.Println(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
logger.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
logger.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide)
logger.Println(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
}
// Get formatted disk/storage info message.
@ -184,15 +185,15 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
// Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) {
log.Println(getStorageInfoMsg(storageInfo))
log.Println()
logger.Println(getStorageInfoMsg(storageInfo))
logger.Println()
}
func printCacheStorageInfo(storageInfo StorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
log.Println(msg)
logger.Println(msg)
}
// Prints certificate expiry date warning
@ -215,5 +216,5 @@ func getCertificateChainMsg(certs []*x509.Certificate) string {
// Prints the certificate expiry message.
func printCertificateMsg(certs []*x509.Certificate) {
log.Println(getCertificateChainMsg(certs))
logger.Println(getCertificateChainMsg(certs))
}

View file

@ -18,6 +18,7 @@ package cmd
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/xml"
@ -2732,7 +2733,7 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *check) {
part.ETag = canonicalizeETag(part.ETag)
parts = append(parts, part)
}
etag, err := getCompleteMultipartMD5(parts)
etag, err := getCompleteMultipartMD5(context.Background(), parts)
c.Assert(err, nil)
c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag)
}

View file

@ -19,6 +19,8 @@ package cmd
import (
"context"
"os"
"github.com/minio/minio/cmd/logger"
)
func handleSignals() {
@ -44,11 +46,11 @@ func handleSignals() {
}
err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
logger.LogIf(context.Background(), err)
if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer")
logger.LogIf(context.Background(), oerr)
}
return (err == nil && oerr == nil)
@ -57,33 +59,32 @@ func handleSignals() {
for {
select {
case err := <-globalHTTPServerErrorCh:
errorIf(err, "http server exited abnormally")
logger.LogIf(context.Background(), err)
var oerr error
if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer")
}
exit(err == nil && oerr == nil)
case osSignal := <-globalOSSignalCh:
stopHTTPTrace()
log.Printf("Exiting on signal %v\n", osSignal)
logger.Printf("Exiting on signal %v\n", osSignal)
exit(stopProcess())
case signal := <-globalServiceSignalCh:
switch signal {
case serviceStatus:
// Ignore this at the moment.
case serviceRestart:
log.Println("Restarting on service signal")
logger.Println("Restarting on service signal")
err := globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server")
logger.LogIf(context.Background(), err)
stopHTTPTrace()
rerr := restartProcess()
errorIf(rerr, "Unable to restart the server")
logger.LogIf(context.Background(), rerr)
exit(err == nil && rerr == nil)
case serviceStop:
log.Println("Stopping on service signal")
logger.Println("Stopping on service signal")
stopHTTPTrace()
exit(stopProcess())
}

View file

@ -193,7 +193,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts1, errs1 := readAllXLMetadata(xlDisks, bucket, object1)
parts1, errs1 := readAllXLMetadata(context.Background(), xlDisks, bucket, object1)
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
object2 := "object2"
@ -204,7 +204,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts2, errs2 := readAllXLMetadata(xlDisks, bucket, object2)
parts2, errs2 := readAllXLMetadata(context.Background(), xlDisks, bucket, object2)
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
object3 := "object3"
@ -215,7 +215,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts3, errs3 := readAllXLMetadata(xlDisks, bucket, object3)
parts3, errs3 := readAllXLMetadata(context.Background(), xlDisks, bucket, object3)
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
object4 := "object4"
@ -231,7 +231,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts4, errs4 := readAllXLMetadata(xlDisks, bucket, object4)
parts4, errs4 := readAllXLMetadata(context.Background(), xlDisks, bucket, object4)
// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
// Reset global storage class flags
@ -249,7 +249,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts5, errs5 := readAllXLMetadata(xlDisks, bucket, object5)
parts5, errs5 := readAllXLMetadata(context.Background(), xlDisks, bucket, object5)
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
// Reset global storage class flags
@ -267,7 +267,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts6, errs6 := readAllXLMetadata(xlDisks, bucket, object6)
parts6, errs6 := readAllXLMetadata(context.Background(), xlDisks, bucket, object6)
// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
// Reset global storage class flags
@ -285,7 +285,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err)
}
parts7, errs7 := readAllXLMetadata(xlDisks, bucket, object7)
parts7, errs7 := readAllXLMetadata(context.Background(), xlDisks, bucket, object7)
tests := []struct {
parts []xlMetaV1

View file

@ -17,13 +17,14 @@
package cmd
import (
"context"
"io"
"path"
"time"
router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
)
// Storage server implements rpc primitives to facilitate exporting a
@ -224,7 +225,8 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
// Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newStorageRPCServer(endpoints)
if err != nil {
return errors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
// Create a unique route for each disk exported from this node.
@ -232,7 +234,8 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
storageRPCServer := newRPCServer()
err = storageRPCServer.RegisterName("Storage", stServer)
if err != nil {
return errors.Trace(err)
logger.LogIf(context.Background(), err)
return err
}
// Add minio storage routes.
storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()

View file

@ -55,6 +55,7 @@ import (
router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/hash"
@ -74,8 +75,7 @@ func init() {
// Set system resources to maximum.
setMaxResources()
log = NewLogger()
log.EnableQuiet()
logger.EnableQuiet()
}
// concurreny level for certain parallel tests.
@ -187,7 +187,7 @@ func prepareXL32() (ObjectLayer, []string, error) {
endpoints := append(endpoints1, endpoints2...)
fsDirs := append(fsDirs1, fsDirs2...)
format, err := waitForFormatXL(true, endpoints, 2, 16)
format, err := waitForFormatXL(context.Background(), true, endpoints, 2, 16)
if err != nil {
removeRoots(fsDirs)
return nil, nil, err
@ -1685,7 +1685,7 @@ func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err erro
return NewFSObjectLayer(endpoints[0].Path)
}
_, err = waitForFormatXL(endpoints[0].IsLocal, endpoints, 1, 16)
_, err = waitForFormatXL(context.Background(), endpoints[0].IsLocal, endpoints, 1, 16)
if err != nil {
return nil, err
}
@ -2423,12 +2423,12 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) {
func mustGetNewEndpointList(args ...string) (endpoints EndpointList) {
if len(args) == 1 {
endpoint, err := NewEndpoint(args[0])
fatalIf(err, "unable to create new endpoint")
logger.FatalIf(err, "unable to create new endpoint")
endpoints = append(endpoints, endpoint)
} else {
var err error
endpoints, err = NewEndpointList(args...)
fatalIf(err, "unable to create new endpoint list")
logger.FatalIf(err, "unable to create new endpoint list")
}
return endpoints
}

View file

@ -17,10 +17,11 @@
package cmd
import (
"context"
"sort"
"strings"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/cmd/logger"
)
// Tree walk result carries results of tree walking.
@ -124,7 +125,7 @@ func filterListEntries(bucket, prefixDir string, entries []string, prefixEntry s
}
// treeWalk walks directory tree recursively pushing treeWalkResult into the channel as and when it encounters files.
func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, resultCh chan treeWalkResult, endWalkCh chan struct{}, isEnd bool) error {
func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, resultCh chan treeWalkResult, endWalkCh chan struct{}, isEnd bool) error {
// Example:
// if prefixDir="one/two/three/" and marker="four/five.txt" treeWalk is recursively
// called with prefixDir="one/two/three/four/" and marker="five.txt"
@ -143,7 +144,8 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
if err != nil {
select {
case <-endWalkCh:
return errors.Trace(errWalkAbort)
logger.LogIf(ctx, errWalkAbort)
return errWalkAbort
case resultCh <- treeWalkResult{err: err}:
return err
}
@ -196,7 +198,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
// markIsEnd is passed to this entry's treeWalk() so that treeWalker.end can be marked
// true at the end of the treeWalk stream.
markIsEnd := i == len(entries)-1 && isEnd
if tErr := doTreeWalk(bucket, pathJoin(prefixDir, entry), prefixMatch, markerArg, recursive, listDir, isLeaf, resultCh, endWalkCh, markIsEnd); tErr != nil {
if tErr := doTreeWalk(ctx, bucket, pathJoin(prefixDir, entry), prefixMatch, markerArg, recursive, listDir, isLeaf, resultCh, endWalkCh, markIsEnd); tErr != nil {
return tErr
}
continue
@ -205,7 +207,8 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
isEOF := ((i == len(entries)-1) && isEnd)
select {
case <-endWalkCh:
return errors.Trace(errWalkAbort)
logger.LogIf(ctx, errWalkAbort)
return errWalkAbort
case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}:
}
}
@ -215,7 +218,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
}
// Initiate a new treeWalk in a goroutine.
func startTreeWalk(bucket, prefix, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, endWalkCh chan struct{}) chan treeWalkResult {
func startTreeWalk(ctx context.Context, bucket, prefix, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, endWalkCh chan struct{}) chan treeWalkResult {
// Example 1
// If prefix is "one/two/three/" and marker is "one/two/three/four/five.txt"
// treeWalk is called with prefixDir="one/two/three/" and marker="four/five.txt"
@ -237,7 +240,7 @@ func startTreeWalk(bucket, prefix, marker string, recursive bool, listDir listDi
marker = strings.TrimPrefix(marker, prefixDir)
go func() {
isEnd := true // Indication to start walking the tree with end as true.
doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker, recursive, listDir, isLeaf, resultCh, endWalkCh, isEnd)
doTreeWalk(ctx, bucket, prefixDir, entryPrefixMatch, marker, recursive, listDir, isLeaf, resultCh, endWalkCh, isEnd)
close(resultCh)
}()
return resultCh

View file

@ -17,6 +17,7 @@
package cmd
import (
"context"
"fmt"
"io/ioutil"
"os"
@ -131,7 +132,7 @@ func testTreeWalkPrefix(t *testing.T, listDir listDirFunc, isLeaf isLeafFunc) {
// Start the tree walk go-routine.
prefix := "d/"
endWalkCh := make(chan struct{})
twResultCh := startTreeWalk(volume, prefix, "", true, listDir, isLeaf, endWalkCh)
twResultCh := startTreeWalk(context.Background(), volume, prefix, "", true, listDir, isLeaf, endWalkCh)
// Check if all entries received on the channel match the prefix.
for res := range twResultCh {
@ -146,7 +147,7 @@ func testTreeWalkMarker(t *testing.T, listDir listDirFunc, isLeaf isLeafFunc) {
// Start the tree walk go-routine.
prefix := ""
endWalkCh := make(chan struct{})
twResultCh := startTreeWalk(volume, prefix, "d/g", true, listDir, isLeaf, endWalkCh)
twResultCh := startTreeWalk(context.Background(), volume, prefix, "d/g", true, listDir, isLeaf, endWalkCh)
// Check if only 3 entries, namely d/g/h, i/j/k, lmn are received on the channel.
expectedCount := 3
@ -186,7 +187,7 @@ func TestTreeWalk(t *testing.T) {
isLeaf := func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator)
}
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk)
listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk)
// Simple test for prefix based walk.
testTreeWalkPrefix(t, listDir, isLeaf)
// Simple test when marker is set.
@ -221,7 +222,7 @@ func TestTreeWalkTimeout(t *testing.T) {
isLeaf := func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator)
}
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk)
listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk)
// TreeWalk pool with 2 seconds timeout for tree-walk go routines.
pool := newTreeWalkPool(2 * time.Second)
@ -230,7 +231,7 @@ func TestTreeWalkTimeout(t *testing.T) {
prefix := ""
marker := ""
recursive := true
resultCh := startTreeWalk(volume, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
resultCh := startTreeWalk(context.Background(), volume, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
params := listParams{
bucket: volume,
@ -294,7 +295,7 @@ func TestListDir(t *testing.T) {
}
// create listDir function.
listDir := listDirFactory(func(volume, prefix string) bool {
listDir := listDirFactory(context.Background(), func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator)
}, xlTreeWalkIgnoredErrs, disk1, disk2)
@ -373,7 +374,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
}
// Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1)
listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace.
var files = []string{
@ -447,7 +448,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
}},
}
for i, testCase := range testCases {
for entry := range startTreeWalk(volume,
for entry := range startTreeWalk(context.Background(), volume,
testCase.prefix, testCase.marker, testCase.recursive,
listDir, isLeaf, endWalkCh) {
if _, found := testCase.expected[entry.entry]; !found {
@ -479,7 +480,7 @@ func TestSortedness(t *testing.T) {
return !hasSuffix(prefix, slashSeparator)
}
// Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1)
listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace.
var files = []string{
@ -519,7 +520,7 @@ func TestSortedness(t *testing.T) {
}
for i, test := range testCases {
var actualEntries []string
for entry := range startTreeWalk(volume,
for entry := range startTreeWalk(context.Background(), volume,
test.prefix, test.marker, test.recursive,
listDir, isLeaf, endWalkCh) {
actualEntries = append(actualEntries, entry.entry)
@ -553,7 +554,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
return !hasSuffix(prefix, slashSeparator)
}
// Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1)
listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace.
var files = []string{
@ -594,7 +595,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
}
for i, test := range testCases {
var entry treeWalkResult
for entry = range startTreeWalk(volume, test.prefix, test.marker, test.recursive, listDir, isLeaf, endWalkCh) {
for entry = range startTreeWalk(context.Background(), volume, test.prefix, test.marker, test.recursive, listDir, isLeaf, endWalkCh) {
}
if entry.entry != test.expectedEntry {
t.Errorf("Test %d: Expected entry %s, but received %s with the EOF marker", i, test.expectedEntry, entry.entry)

View file

@ -18,6 +18,7 @@ package cmd
import (
"bufio"
"context"
"crypto"
"encoding/hex"
"fmt"
@ -32,6 +33,7 @@ import (
"github.com/fatih/color"
"github.com/inconshreveable/go-update"
"github.com/minio/cli"
"github.com/minio/minio/cmd/logger"
_ "github.com/minio/sha256-simd" // Needed for sha256 hash verifier.
"github.com/segmentio/go-prompt"
)
@ -154,7 +156,7 @@ func IsDocker() bool {
}
// Log error, as we will not propagate it to caller
errorIf(err, "Error in docker check.")
logger.LogIf(context.Background(), err)
return err == nil
}
@ -184,7 +186,7 @@ func IsBOSH() bool {
}
// Log error, as we will not propagate it to caller
errorIf(err, "Error in BOSH check.")
logger.LogIf(context.Background(), err)
return err == nil
}
@ -199,7 +201,9 @@ func getHelmVersion(helmInfoFilePath string) string {
// Log errors and return "" as Minio can be deployed
// without Helm charts as well.
if !os.IsNotExist(err) {
errorIf(err, "Unable to read %s", helmInfoFilePath)
reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
return ""
}
@ -491,33 +495,33 @@ func mainUpdate(ctx *cli.Context) {
quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
if quiet {
log.EnableQuiet()
logger.EnableQuiet()
}
minioMode := ""
updateMsg, sha256Hex, _, latestReleaseTime, err := getUpdateInfo(10*time.Second, minioMode)
if err != nil {
log.Println(err)
logger.Println(err)
os.Exit(-1)
}
// Nothing to update running the latest release.
if updateMsg == "" {
log.Println(greenColorSprintf("You are already running the most recent version of minio."))
logger.Println(greenColorSprintf("You are already running the most recent version of minio."))
os.Exit(0)
}
log.Println(updateMsg)
logger.Println(updateMsg)
// if the in-place update is disabled then we shouldn't ask the
// user to update the binaries.
if strings.Contains(updateMsg, minioReleaseURL) && !globalInplaceUpdateDisabled {
var successMsg string
successMsg, err = doUpdate(sha256Hex, latestReleaseTime, shouldUpdate(quiet, sha256Hex, latestReleaseTime))
if err != nil {
log.Println(err)
logger.Println(err)
os.Exit(-1)
}
log.Println(successMsg)
logger.Println(successMsg)
os.Exit(1)
}
}

View file

@ -44,7 +44,9 @@ import (
// Close Http tracing file.
func stopHTTPTrace() {
if globalHTTPTraceFile != nil {
errorIf(globalHTTPTraceFile.Close(), "Unable to close httpTraceFile %s", globalHTTPTraceFile.Name())
reqInfo := (&logger.ReqInfo{}).AppendTags("traceFile", globalHTTPTraceFile.Name())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, globalHTTPTraceFile.Close())
globalHTTPTraceFile = nil
}
}
@ -331,8 +333,8 @@ func newContext(r *http.Request, api string) context.Context {
if prefix != "" {
object = prefix
}
return logger.SetContext(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil})
reqInfo := &logger.ReqInfo{RemoteHost: r.RemoteAddr, UserAgent: r.Header.Get("user-agent"), API: api, BucketName: bucket, ObjectName: object}
return logger.SetReqInfo(context.Background(), reqInfo)
}
// isNetworkOrHostDown - if there was a network error or if the host is down.

View file

@ -35,6 +35,7 @@ import (
"github.com/gorilla/rpc/v2/json2"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/browser"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event"
@ -384,7 +385,9 @@ func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginR
if err != nil {
// Make sure to log errors related to browser login,
// for security and auditing reasons.
errorIf(err, "Unable to login request from %s", r.RemoteAddr)
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", r.RemoteAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return toJSONError(err)
}
@ -463,7 +466,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
reply.PeerErrMsgs = make(map[string]string)
for svr, errVal := range errsMap {
tErr := fmt.Errorf("Unable to change credentials on %s: %v", svr, errVal)
errorIf(tErr, "Credentials change could not be propagated successfully!")
logger.LogIf(context.Background(), tErr)
reply.PeerErrMsgs[svr] = errVal.Error()
}
@ -571,7 +574,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
}
// Extract incoming metadata if any.
metadata, err := extractMetadataFromHeader(r.Header)
metadata, err := extractMetadataFromHeader(context.Background(), r.Header)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
@ -1095,7 +1098,7 @@ func toWebAPIError(err error) APIError {
}
// Log unexpected and unhandled errors.
errorIf(err, errUnexpected.Error())
logger.LogIf(context.Background(), err)
return APIError{
Code: "InternalError",
HTTPStatusCode: http.StatusInternalServerError,

View file

@ -1286,7 +1286,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
},
},
}
if err := writeBucketPolicy(bucketName, obj, policyVal); err != nil {
if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil {
t.Fatal("Unexpected error: ", err)
}
@ -1380,7 +1380,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t
},
},
}
if err := writeBucketPolicy(bucketName, obj, policyVal); err != nil {
if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil {
t.Fatal("Unexpected error: ", err)
}

View file

@ -28,6 +28,7 @@ import (
"time"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
@ -214,7 +215,7 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP
nsMutex: mutex,
bp: bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2),
}
go s.sets[i].cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
go s.sets[i].cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
}
// Connect disks right away.
@ -431,7 +432,7 @@ func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuation
// SetBucketPolicy persist the new policy on the bucket.
func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
return persistAndNotifyBucketPolicyChange(bucket, false, policy, s)
return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, s)
}
// GetBucketPolicy will return a policy on a bucket
@ -446,7 +447,7 @@ func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (policy.Buc
// DeleteBucketPolicy deletes all policies on bucket
func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, s)
return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, s)
}
// RefreshBucketPolicy refreshes policy cache from disk
@ -498,7 +499,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
}
// Delete all bucket metadata.
deleteBucketMetadata(bucket, s)
deleteBucketMetadata(ctx, bucket, s)
// Success.
return nil
@ -585,26 +586,26 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
}
go func() {
if gerr := srcSet.getObject(srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr := srcSet.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
}
return
}
// Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}()
return destSet.putObject(destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined)
return destSet.putObject(ctx, destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined)
}
// Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - used for doing disk.ListDir(). Sets passes set of disks.
func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...[]StorageAPI) listDirFunc {
func listDirSetsFactory(ctx context.Context, isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...[]StorageAPI) listDirFunc {
listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string, err error) {
for _, disk := range disks {
if disk == nil {
@ -620,7 +621,8 @@ func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...
if errors.IsErrIgnored(err, treeWalkIgnoredErrs...) {
continue
}
return nil, errors.Trace(err)
logger.LogIf(ctx, err)
return nil, err
}
// Find elements in entries which are not in mergedEntries
@ -679,7 +681,7 @@ func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...
// value through the walk channel receives the data properly lexically sorted.
func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
// validate all the inputs for listObjects
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil {
if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
return result, err
}
@ -707,8 +709,8 @@ func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimi
setDisks = append(setDisks, set.getLoadBalancedDisks())
}
listDir := listDirSetsFactory(isLeaf, xlTreeWalkIgnoredErrs, setDisks...)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
listDir := listDirSetsFactory(ctx, isLeaf, xlTreeWalkIgnoredErrs, setDisks...)
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
}
for i := 0; i < maxKeys; {
@ -726,9 +728,9 @@ func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimi
var objInfo ObjectInfo
var err error
if hasSuffix(walkResult.entry, slashSeparator) {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(bucket, walkResult.entry)
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(ctx, bucket, walkResult.entry)
} else {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(bucket, walkResult.entry)
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(ctx, bucket, walkResult.entry)
}
if err != nil {
// Ignore errFileNotFound as the object might have got
@ -787,12 +789,12 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB
go func() {
if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}
if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}()
@ -1034,7 +1036,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResult
}
// Save formats `format.json` across all disks.
if err = saveFormatXLAll(storageDisks, tmpNewFormats); err != nil {
if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil {
return madmin.HealResultItem{}, err
}
@ -1228,7 +1230,7 @@ func listDirSetsHealFactory(isLeaf isLeafFunc, sets ...[]StorageAPI) listDirFunc
}
// listObjectsHeal - wrapper function implemented over file tree walk.
func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
func (s *xlSets) listObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
if delimiter == slashSeparator {
@ -1252,7 +1254,7 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
}
listDir := listDirSetsHealFactory(isLeaf, setDisks...)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, nil, endWalkCh)
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, nil, endWalkCh)
}
var objInfos []ObjectInfo
@ -1272,9 +1274,9 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
var objInfo ObjectInfo
var err error
if hasSuffix(walkResult.entry, slashSeparator) {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(bucket, walkResult.entry)
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(ctx, bucket, walkResult.entry)
} else {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(bucket, walkResult.entry)
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(ctx, bucket, walkResult.entry)
}
if err != nil {
// Ignore errFileNotFound
@ -1320,7 +1322,7 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
// This is not implemented yet, will be implemented later to comply with Admin API refactor.
func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil {
if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
return loi, err
}
@ -1343,7 +1345,7 @@ func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, de
}
// Initiate a list operation, if successful filter and return quickly.
listObjInfo, err := s.listObjectsHeal(bucket, prefix, marker, delimiter, maxKeys)
listObjInfo, err := s.listObjectsHeal(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err == nil {
// We got the entries successfully return.
return listObjInfo, nil

Some files were not shown because too many files have changed in this diff Show more