Compare commits
62 Commits
master
...
RELEASE.20
Author | SHA1 | Date |
---|---|---|
Harshavardhana | a28fee5bc3 | |
Harshavardhana | cf8ff4c1bc | |
Harshavardhana | 7b935613e8 | |
Anis Elleuch | 74c18717be | |
Harshavardhana | 6a1633e9f5 | |
Harshavardhana | fe8d1dbb3e | |
Harshavardhana | db80965259 | |
Klaus Post | eaaed2aeba | |
Harshavardhana | 45abe8fb10 | |
Poorna Krishnamoorthy | c6faef68e3 | |
Harshavardhana | e6cb8dd24d | |
Harshavardhana | 1f4d15d4c9 | |
Harshavardhana | f2d986cf14 | |
Anis Elleuch | 61606fe0e5 | |
Harshavardhana | affba4bdba | |
Anis Elleuch | ac62e4314f | |
Harshavardhana | 65162d0874 | |
Harshavardhana | ccc12a9412 | |
Poorna Krishnamoorthy | 03ba78a5b6 | |
Poorna Krishnamoorthy | 7617d6963c | |
Harshavardhana | 23a37d0855 | |
Harshavardhana | 1c7b1bb25c | |
Ritesh H Shukla | f9acbd5acb | |
Poorna Krishnamoorthy | 67a1b2916d | |
Harshavardhana | 4091252409 | |
Anis Elleuch | 9e8051c22f | |
Poorna Krishnamoorthy | 7b89f172df | |
Harshavardhana | 36595eef92 | |
Harshavardhana | 984066446a | |
Klaus Post | dd9f256d7c | |
Klaus Post | 04ab99f886 | |
Harshavardhana | 6eb769bcbd | |
Harshavardhana | a81b8d0fcd | |
Harshavardhana | 5d4bffe530 | |
Harshavardhana | 06989f0e8f | |
Harshavardhana | 4c22e0ca6c | |
Harshavardhana | ab6fe3e90c | |
Klaus Post | f65f6b7282 | |
Klaus Post | 2d02131c85 | |
Poorna Krishnamoorthy | f0924b1248 | |
Harshavardhana | 4d94d68916 | |
Harshavardhana | 7616734014 | |
Harshavardhana | 581f5ad7b8 | |
Anis Elleuch | fb1b4b33b2 | |
Harshavardhana | 9dc27902ca | |
Anis Elleuch | b27417a6f7 | |
Harshavardhana | c175f9b0e9 | |
Harshavardhana | f8ebd94297 | |
Poorna Krishnamoorthy | bfab990c33 | |
Harshavardhana | 94018588fe | |
Anis Elleuch | 8b76ba8d5d | |
Harshavardhana | 7eb7f65e48 | |
Harshavardhana | c608c0688a | |
Aditya Manthramurthy | 41a9d1d778 | |
Klaus Post | e21e80841e | |
Klaus Post | 98c792bbeb | |
Klaus Post | f687ba53bc | |
Harshavardhana | e3da59c923 | |
Harshavardhana | 781b9b051c | |
Harshavardhana | 438becfde8 | |
Harshavardhana | 16ef338649 | |
Harshavardhana | 3242847ec0 |
2
Makefile
2
Makefile
|
@ -47,7 +47,7 @@ test-race: verifiers build
|
|||
# Verify minio binary
|
||||
verify:
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
# Verify healing of disks with minio binary
|
||||
|
|
|
@ -50,7 +50,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucket := pathClean(vars["bucket"])
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -90,7 +90,8 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucket := pathClean(vars["bucket"])
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
@ -118,7 +119,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucket := pathClean(vars["bucket"])
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
|
@ -163,16 +164,51 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
target.SourceBucket = bucket
|
||||
if !update {
|
||||
var ops []madmin.TargetUpdateType
|
||||
if update {
|
||||
ops = madmin.GetTargetUpdateOps(r.URL.Query())
|
||||
} else {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
if target.Arn == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if update {
|
||||
// overlay the updates on existing target
|
||||
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, target.Arn)
|
||||
if tgt.Empty() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetNotFoundError, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, op := range ops {
|
||||
switch op {
|
||||
case madmin.CredentialsUpdateType:
|
||||
tgt.Credentials = target.Credentials
|
||||
tgt.TargetBucket = target.TargetBucket
|
||||
tgt.Secure = target.Secure
|
||||
tgt.Endpoint = target.Endpoint
|
||||
case madmin.SyncUpdateType:
|
||||
tgt.ReplicationSync = target.ReplicationSync
|
||||
case madmin.ProxyUpdateType:
|
||||
tgt.DisableProxy = target.DisableProxy
|
||||
case madmin.PathUpdateType:
|
||||
tgt.Path = target.Path
|
||||
case madmin.BandwidthLimitUpdateType:
|
||||
tgt.BandwidthLimit = target.BandwidthLimit
|
||||
case madmin.HealthCheckDurationUpdateType:
|
||||
tgt.HealthCheckDuration = target.HealthCheckDuration
|
||||
}
|
||||
}
|
||||
target = tgt
|
||||
}
|
||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
switch err.(type) {
|
||||
case BucketRemoteConnectionErr:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
|
@ -206,7 +242,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arnType := vars["type"]
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
|
@ -245,7 +281,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
bucket := pathClean(vars["bucket"])
|
||||
arn := vars["arn"]
|
||||
|
||||
if !globalIsErasure {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -363,7 +362,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
|||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
accessKey := path.Clean(vars["accessKey"])
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
|
@ -471,18 +470,12 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
|
@ -496,12 +489,56 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
// Disallow creating service accounts by root user.
|
||||
if createReq.TargetUser == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
|
||||
var (
|
||||
targetUser string
|
||||
targetGroups []string
|
||||
)
|
||||
|
||||
targetUser = createReq.TargetUser
|
||||
|
||||
// Need permission if we are creating a service acccount
|
||||
// for a user <> to the request sender
|
||||
if targetUser != "" && targetUser != cred.AccessKey {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if globalLDAPConfig.Enabled && targetUser != "" {
|
||||
// If LDAP enabled, service accounts need
|
||||
// to be created only for LDAP users.
|
||||
var err error
|
||||
targetUser, targetGroups, err = globalLDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// targerUser is set to bindDN at this point in time.
|
||||
} else {
|
||||
if targetUser == "" {
|
||||
targetUser = cred.AccessKey
|
||||
}
|
||||
if cred.ParentUser != "" {
|
||||
targetUser = cred.ParentUser
|
||||
}
|
||||
targetGroups = cred.Groups
|
||||
}
|
||||
|
||||
opts := newServiceAccountOpts{sessionPolicy: createReq.Policy, accessKey: createReq.AccessKey, secretKey: createReq.SecretKey}
|
||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
@ -537,6 +574,179 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// UpdateServiceAccount - POST /minio/admin/v3/update-service-account
|
||||
func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateServiceAccount")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := mux.Vars(r)["accessKey"]
|
||||
if accessKey == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.UpdateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
requestUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
requestUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if requestUser != svcAccount.ParentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var updateReq madmin.UpdateServiceAccountReq
|
||||
if err = json.Unmarshal(reqBytes, &updateReq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
opts := updateServiceAccountOpts{sessionPolicy: updateReq.NewPolicy, secretKey: updateReq.NewSecretKey, status: updateReq.NewStatus}
|
||||
err = globalIAMSys.UpdateServiceAccount(ctx, accessKey, opts)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other Minio peers to reload user the service account
|
||||
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// InfoServiceAccount - GET /minio/admin/v3/info-service-account
|
||||
func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoServiceAccount")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := mux.Vars(r)["accessKey"]
|
||||
if accessKey == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
svcAccount, policy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
requestUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
requestUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if requestUser != svcAccount.ParentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var svcAccountPolicy iampolicy.Policy
|
||||
|
||||
impliedPolicy := policy == nil
|
||||
|
||||
// If policy is empty, check for policy of the parent user
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(*policy)
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.ParentUser, false)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
svcAccountPolicy = svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
|
||||
}
|
||||
|
||||
policyJSON, err := json.Marshal(svcAccountPolicy)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var infoResp = madmin.InfoServiceAccountResp{
|
||||
ParentUser: svcAccount.ParentUser,
|
||||
AccountStatus: svcAccount.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
Policy: string(policyJSON),
|
||||
}
|
||||
|
||||
data, err := json.Marshal(infoResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
|
||||
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListServiceAccounts")
|
||||
|
@ -550,31 +760,48 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
var targetAccount string
|
||||
|
||||
user := r.URL.Query().Get("user")
|
||||
if user != "" {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount = user
|
||||
} else {
|
||||
targetAccount = cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
targetAccount = cred.ParentUser
|
||||
}
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, targetAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var serviceAccountsNames []string
|
||||
|
||||
for _, svc := range serviceAccounts {
|
||||
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccounts,
|
||||
Accounts: serviceAccountsNames,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(listResp)
|
||||
|
@ -605,41 +832,44 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow creating service accounts by root user.
|
||||
if owner {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccount := mux.Vars(r)["accessKey"]
|
||||
if serviceAccount == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
|
||||
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.RemoveServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
})
|
||||
|
||||
if parentUser != user || user == "" {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
|
||||
return
|
||||
if !adminPrivilege {
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
if parentUser != svcAccount.ParentUser {
|
||||
// The service account belongs to another user but return not
|
||||
// found error to mitigate brute force attacks. or the
|
||||
// serviceAccount doesn't exist.
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminServiceAccountNotFound), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
|
||||
|
@ -775,7 +1005,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
|||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
}
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
|
||||
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
|
||||
Name: bucket.Name,
|
||||
Created: bucket.Created,
|
||||
Size: size,
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -259,26 +260,15 @@ type ServerHTTPAPIStats struct {
|
|||
// ServerHTTPStats holds all type of http operations performed to/from the server
|
||||
// including their average execution time.
|
||||
type ServerHTTPStats struct {
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
}
|
||||
|
||||
// ServerInfoData holds storage, connections and other
|
||||
// information of a given server.
|
||||
type ServerInfoData struct {
|
||||
ConnStats ServerConnStats `json:"network"`
|
||||
HTTPStats ServerHTTPStats `json:"http"`
|
||||
Properties ServerProperties `json:"server"`
|
||||
}
|
||||
|
||||
// ServerInfo holds server information result of one node
|
||||
type ServerInfo struct {
|
||||
Error string `json:"error"`
|
||||
Addr string `json:"addr"`
|
||||
Data *ServerInfoData `json:"data"`
|
||||
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
|
||||
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
|
||||
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
|
||||
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
|
||||
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
|
||||
TotalS3RejectedAuth uint64 `json:"totalS3RejectedAuth"`
|
||||
TotalS3RejectedTime uint64 `json:"totalS3RejectedTime"`
|
||||
TotalS3RejectedHeader uint64 `json:"totalS3RejectedHeader"`
|
||||
TotalS3RejectedInvalid uint64 `json:"totalS3RejectedInvalid"`
|
||||
}
|
||||
|
||||
// StorageInfoHandler - GET /minio/admin/v3/storageinfo
|
||||
|
@ -1321,17 +1311,16 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
}
|
||||
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
deadlinedCtx, deadlineCancel := context.WithTimeout(ctx, deadline)
|
||||
defer deadlineCancel()
|
||||
|
||||
var err error
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
|
||||
ctx, err = nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
lkctx, err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
|
||||
if err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
defer nsLock.Unlock()
|
||||
defer nsLock.Unlock(lkctx.Cancel)
|
||||
|
||||
go func() {
|
||||
defer close(healthInfoCh)
|
||||
|
@ -1470,30 +1459,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
reportCh := make(chan bandwidth.Report, 1)
|
||||
reportCh := make(chan bandwidth.Report)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
for {
|
||||
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
time.Sleep(2 * time.Second)
|
||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case report := <-reportCh:
|
||||
enc := json.NewEncoder(w)
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
|
|
|
@ -121,6 +121,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
|||
|
||||
// Service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update-service-account").HandlerFunc(httpTraceHdrs(adminAPI.UpdateServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-service-account").HandlerFunc(httpTraceHdrs(adminAPI.InfoServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
@ -67,6 +68,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
|||
CommitID: CommitID,
|
||||
Network: network,
|
||||
}
|
||||
runtime.ReadMemStats(&props.MemStats)
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer != nil && !globalIsGateway {
|
||||
|
|
|
@ -80,6 +80,7 @@ const (
|
|||
ErrInvalidBucketName
|
||||
ErrInvalidDigest
|
||||
ErrInvalidRange
|
||||
ErrInvalidRangePartNumber
|
||||
ErrInvalidCopyPartRange
|
||||
ErrInvalidCopyPartRangeSource
|
||||
ErrInvalidMaxKeys
|
||||
|
@ -364,7 +365,7 @@ const (
|
|||
ErrAddUserInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
ErrServiceAccountNotFound
|
||||
ErrAdminServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
)
|
||||
|
||||
|
@ -503,6 +504,11 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The requested range is not satisfiable",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrInvalidRangePartNumber: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Cannot specify both Range header and partNumber query parameter",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedXML: {
|
||||
Code: "MalformedXML",
|
||||
Description: "The XML you provided was not well-formed or did not validate against our published schema.",
|
||||
|
@ -750,7 +756,7 @@ var errorCodes = errorCodeMap{
|
|||
},
|
||||
ErrSlowDown: {
|
||||
Code: "SlowDown",
|
||||
Description: "Please reduce your request",
|
||||
Description: "Resource requested is unreadable, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrInvalidPrefixMarker: {
|
||||
|
@ -1739,7 +1745,7 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The account key is not eligible for this operation",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrServiceAccountNotFound: {
|
||||
ErrAdminServiceAccountNotFound: {
|
||||
Code: "XMinioInvalidIAMCredentials",
|
||||
Description: "The specified service account is not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
|
@ -1775,6 +1781,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchServiceAccount:
|
||||
apiErr = ErrAdminServiceAccountNotFound
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
|
@ -1904,8 +1912,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrSlowDown
|
||||
case InsufficientReadQuorum:
|
||||
apiErr = ErrSlowDown
|
||||
case UnsupportedDelimiter:
|
||||
apiErr = ErrNotImplemented
|
||||
case InvalidMarkerPrefixCombination:
|
||||
apiErr = ErrNotImplemented
|
||||
case InvalidUploadIDKeyCombination:
|
||||
|
@ -2038,6 +2044,22 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
apiErr = errorCodes.ToAPIErrWithErr(code, e)
|
||||
}
|
||||
|
||||
if apiErr.Code == "NotImplemented" {
|
||||
switch e := err.(type) {
|
||||
case NotImplemented:
|
||||
desc := e.Error()
|
||||
if desc == "" {
|
||||
desc = apiErr.Description
|
||||
}
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: desc,
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
|
|
|
@ -43,7 +43,6 @@ var toAPIErrorTests = []struct {
|
|||
{err: InvalidPart{}, errCode: ErrInvalidPart},
|
||||
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
|
||||
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
|
||||
{err: UnsupportedDelimiter{}, errCode: ErrNotImplemented},
|
||||
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
|
||||
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
|
||||
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
|
||||
|
|
|
@ -158,16 +158,16 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
|||
return err
|
||||
}
|
||||
|
||||
if rs == nil && opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// For providing ranged content
|
||||
start, rangeLen, err = rs.GetOffsetLength(totalObjectSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rs == nil && opts.PartNumber > 0 {
|
||||
rs = partNumberToRangeSpec(objInfo, opts.PartNumber)
|
||||
}
|
||||
|
||||
// Set content length.
|
||||
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
|
||||
if rs != nil {
|
||||
|
|
|
@ -78,6 +78,103 @@ func getHost(r *http.Request) string {
|
|||
return r.Host
|
||||
}
|
||||
|
||||
func notImplementedHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
type rejectedAPI struct {
|
||||
api string
|
||||
methods []string
|
||||
queries []string
|
||||
path string
|
||||
}
|
||||
|
||||
var rejectedAPIs = []rejectedAPI{
|
||||
{
|
||||
api: "inventory",
|
||||
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"inventory", ""},
|
||||
},
|
||||
{
|
||||
api: "cors",
|
||||
methods: []string{http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"cors", ""},
|
||||
},
|
||||
{
|
||||
api: "metrics",
|
||||
methods: []string{http.MethodGet, http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"metrics", ""},
|
||||
},
|
||||
{
|
||||
api: "website",
|
||||
methods: []string{http.MethodPut},
|
||||
queries: []string{"website", ""},
|
||||
},
|
||||
{
|
||||
api: "logging",
|
||||
methods: []string{http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"logging", ""},
|
||||
},
|
||||
{
|
||||
api: "accelerate",
|
||||
methods: []string{http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"accelerate", ""},
|
||||
},
|
||||
{
|
||||
api: "requestPayment",
|
||||
methods: []string{http.MethodPut, http.MethodDelete},
|
||||
queries: []string{"requestPayment", ""},
|
||||
},
|
||||
{
|
||||
api: "torrent",
|
||||
methods: []string{http.MethodPut, http.MethodDelete, http.MethodGet},
|
||||
queries: []string{"torrent", ""},
|
||||
path: "/{object:.+}",
|
||||
},
|
||||
{
|
||||
api: "acl",
|
||||
methods: []string{http.MethodDelete},
|
||||
queries: []string{"acl", ""},
|
||||
path: "/{object:.+}",
|
||||
},
|
||||
{
|
||||
api: "acl",
|
||||
methods: []string{http.MethodDelete, http.MethodPut, http.MethodHead},
|
||||
queries: []string{"acl", ""},
|
||||
},
|
||||
{
|
||||
api: "publicAccessBlock",
|
||||
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
|
||||
queries: []string{"publicAccessBlock", ""},
|
||||
},
|
||||
{
|
||||
api: "ownershipControls",
|
||||
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
|
||||
queries: []string{"ownershipControls", ""},
|
||||
},
|
||||
{
|
||||
api: "intelligent-tiering",
|
||||
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
|
||||
queries: []string{"intelligent-tiering", ""},
|
||||
},
|
||||
{
|
||||
api: "analytics",
|
||||
methods: []string{http.MethodDelete, http.MethodPut, http.MethodGet},
|
||||
queries: []string{"analytics", ""},
|
||||
},
|
||||
}
|
||||
|
||||
func rejectUnsupportedAPIs(router *mux.Router) {
|
||||
for _, r := range rejectedAPIs {
|
||||
t := router.Methods(r.methods...).
|
||||
HandlerFunc(collectAPIStats(r.api, httpTraceAll(notImplementedHandler))).
|
||||
Queries(r.queries...)
|
||||
if r.path != "" {
|
||||
t.Path(r.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
|
@ -116,217 +213,228 @@ func registerAPIRouter(router *mux.Router) {
|
|||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
for _, bucket := range routers {
|
||||
for _, router := range routers {
|
||||
rejectUnsupportedAPIs(router)
|
||||
// Object operations
|
||||
// HeadObject
|
||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("headobject", maxClients(httpTraceAll(api.HeadObjectHandler))))
|
||||
// CopyObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").
|
||||
HandlerFunc(collectAPIStats("copyobjectpart", maxClients(httpTraceAll(api.CopyObjectPartHandler)))).
|
||||
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectpart", maxClients(httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("listobjectparts", maxClients(httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("completemutipartupload", maxClients(httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("newmultipartupload", maxClients(httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("abortmultipartupload", maxClients(httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectacl", maxClients(httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
|
||||
// PutObjectACL - this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectacl", maxClients(httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
|
||||
// GetObjectTagging
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjecttagging", maxClients(httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjecttagging", maxClients(httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobjecttagging", maxClients(httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("selectobjectcontent", maxClients(httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectretention", maxClients(httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobjectlegalhold", maxClients(httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("getobject", maxClients(httpTraceHdrs(api.GetObjectHandler))))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(
|
||||
collectAPIStats("copyobject", maxClients(httpTraceAll(api.CopyObjectHandler))))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectretention", maxClients(httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobjectlegalhold", maxClients(httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
|
||||
|
||||
// PutObject with auto-extract support for zip
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzSnowballExtract, "true").HandlerFunc(
|
||||
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectExtractHandler))))
|
||||
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("putobject", maxClients(httpTraceHdrs(api.PutObjectHandler))))
|
||||
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
|
||||
|
||||
// PostRestoreObject
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketpolicy", maxClients(httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketEncryption
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketencryption", maxClients(httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketobjectlockconfiguration", maxClients(httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// GetBucketReplicationConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationconfiguration", maxClients(httpTraceAll(api.GetBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketversioning", maxClients(httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketnotification", maxClients(httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
|
||||
// ListenNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listennotification", maxClients(httpTraceAll(api.ListenNotificationHandler)))).Queries("events", "{events:.*}")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketacl", maxClients(httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
|
||||
// PutBucketACL -- this is a dummy call.
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketacl", maxClients(httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
|
||||
// GetBucketCors - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketcors", maxClients(httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketwebsite", maxClients(httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
|
||||
// GetBucketAccelerateHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketaccelerate", maxClients(httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
|
||||
// GetBucketRequestPaymentHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketrequestpayment", maxClients(httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
|
||||
// GetBucketLoggingHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlogging", maxClients(httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
|
||||
// GetBucketLifecycleHandler - this is a dummy call.
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlifecycle", maxClients(httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// GetBucketTaggingHandler
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebuckettagging", maxClients(httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
|
||||
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listmultipartuploads", maxClients(httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv2M", maxClients(httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv2", maxClients(httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
|
||||
// ListObjectVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
|
||||
// GetBucketPolicyStatus
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// PutBucketReplicationConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketreplicationconfiguration", maxClients(httpTraceAll(api.PutBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// GetObjectRetention
|
||||
|
||||
// PutBucketEncryption
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketencryption", maxClients(httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
|
||||
// PutBucketPolicy
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketpolicy", maxClients(httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketobjectlockconfig", maxClients(httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
|
||||
// PutBucketTaggingHandler
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbuckettagging", maxClients(httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketversioning", maxClients(httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucketnotification", maxClients(httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(
|
||||
router.Methods(http.MethodPut).HandlerFunc(
|
||||
collectAPIStats("putbucket", maxClients(httpTraceAll(api.PutBucketHandler))))
|
||||
// HeadBucket
|
||||
bucket.Methods(http.MethodHead).HandlerFunc(
|
||||
router.Methods(http.MethodHead).HandlerFunc(
|
||||
collectAPIStats("headbucket", maxClients(httpTraceAll(api.HeadBucketHandler))))
|
||||
// PostPolicy
|
||||
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
router.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
|
||||
collectAPIStats("postpolicybucket", maxClients(httpTraceHdrs(api.PostPolicyBucketHandler))))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(
|
||||
router.Methods(http.MethodPost).HandlerFunc(
|
||||
collectAPIStats("deletemultipleobjects", maxClients(httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketpolicy", maxClients(httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
|
||||
// DeleteBucketReplication
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketreplicationconfiguration", maxClients(httpTraceAll(api.DeleteBucketReplicationConfigHandler)))).Queries("replication", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketlifecycle", maxClients(httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
|
||||
// DeleteBucketEncryption
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketencryption", maxClients(httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(
|
||||
|
||||
// MinIO extension API for replication.
|
||||
//
|
||||
// GetBucketReplicationMetrics
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketreplicationmetrics", maxClients(httpTraceAll(api.GetBucketReplicationMetricsHandler)))).Queries("replication-metrics", "")
|
||||
|
||||
// S3 ListObjectsV1 (Legacy)
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
|
||||
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
|
@ -193,24 +194,21 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
|||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
claims := xjwt.NewMapClaims()
|
||||
return claims.Map(), nil
|
||||
}
|
||||
|
||||
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
// that clients cannot decode the token using the temp
|
||||
// secret keys and generate an entirely new claim by essentially
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
|
||||
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
// that clients cannot decode the token using the temp
|
||||
// secret keys and generate an entirely new claim by essentially
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
claims, err := auth.ExtractClaims(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
|
@ -505,6 +503,7 @@ func setAuthHandler(h http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsAuth, 1)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -125,6 +125,11 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
|||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
||||
if len(b.data) == 0 {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext,
|
||||
fmt.Errorf("Error(%w) reading erasure shards at (%s: %s/%s), will attempt to reconstruct if we have quorum",
|
||||
err, b.disk, b.volume, b.filePath))
|
||||
}
|
||||
} else {
|
||||
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
||||
}
|
||||
|
@ -132,7 +137,6 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
|||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if offset != b.currOffset {
|
||||
// Can never happen unless there are programmer bugs
|
||||
return 0, errUnexpected
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -298,6 +299,12 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
|||
return
|
||||
}
|
||||
|
||||
// Anonymous users, should be rejected.
|
||||
if cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var bucketsInfo []BucketInfo
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
|
@ -496,7 +503,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
}
|
||||
if replicateDeletes {
|
||||
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}, goi, gerr)
|
||||
|
@ -511,9 +518,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
}
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
if delMarker {
|
||||
object.DeleteMarkerVersionID = object.VersionID
|
||||
}
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
|
@ -557,13 +561,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
dindex := objectsToDelete[ObjectToDelete{
|
||||
// DeleteMarkerVersionID is not used specifically to avoid
|
||||
// lookup errors, since DeleteMarkerVersionID is only
|
||||
// created during DeleteMarker creation when client didn't
|
||||
// specify a versionID.
|
||||
objToDel := ObjectToDelete{
|
||||
ObjectName: dObjects[i].ObjectName,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}]
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
if replicateDeletes {
|
||||
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
|
||||
|
@ -619,12 +628,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}
|
||||
|
||||
if dobj.DeleteMarker {
|
||||
objInfo.DeleteMarker = dobj.DeleteMarker
|
||||
if objInfo.DeleteMarker {
|
||||
objInfo.VersionID = dobj.DeleteMarkerVersionID
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
|
@ -757,7 +766,9 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
|
||||
if cp := pathClean(r.URL.Path); cp != "" {
|
||||
w.Header().Set(xhttp.Location, cp) // Clean any trailing slashes.
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
|
@ -1601,3 +1612,59 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
|
|||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
|
||||
// ----------
|
||||
// Gets the replication metrics for a bucket.
|
||||
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplicationMetrics")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(r.Context(), bucket)
|
||||
bucketReplStats := BucketReplicationStats{}
|
||||
// sum up metrics from each node in the cluster
|
||||
for _, bucketStat := range bucketStats {
|
||||
bucketReplStats.FailedCount += bucketStat.ReplicationStats.FailedCount
|
||||
bucketReplStats.FailedSize += bucketStat.ReplicationStats.FailedSize
|
||||
bucketReplStats.PendingCount += bucketStat.ReplicationStats.PendingCount
|
||||
bucketReplStats.PendingSize += bucketStat.ReplicationStats.PendingSize
|
||||
bucketReplStats.ReplicaSize += bucketStat.ReplicationStats.ReplicaSize
|
||||
bucketReplStats.ReplicatedSize += bucketStat.ReplicationStats.ReplicatedSize
|
||||
}
|
||||
// add initial usage from the time of cluster up
|
||||
usageStat := globalReplicationStats.GetInitialUsage(bucket)
|
||||
bucketReplStats.FailedCount += usageStat.FailedCount
|
||||
bucketReplStats.FailedSize += usageStat.FailedSize
|
||||
bucketReplStats.PendingCount += usageStat.PendingCount
|
||||
bucketReplStats.PendingSize += usageStat.PendingSize
|
||||
bucketReplStats.ReplicaSize += usageStat.ReplicaSize
|
||||
bucketReplStats.ReplicatedSize += usageStat.ReplicatedSize
|
||||
|
||||
if err := json.NewEncoder(w).Encode(&bucketReplStats); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
|
|
@ -342,6 +342,10 @@ func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send audit for the lifecycle delete operation
|
||||
auditLogLifecycle(ctx, bucket, object)
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
|
@ -530,7 +534,7 @@ type SelectParameters struct {
|
|||
|
||||
// IsEmpty returns true if no select parameters set
|
||||
func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil || sp.S3Select == s3select.S3Select{}
|
||||
return sp == nil
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -169,7 +169,10 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
|
|||
}
|
||||
meta.ReplicationConfigXML = configData
|
||||
case bucketTargetsFile:
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{bucket: meta.Name, bucketTargetsFile: bucketTargetsFile})
|
||||
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, crypto.Context{
|
||||
bucket: meta.Name,
|
||||
bucketTargetsFile: bucketTargetsFile,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
|
|
|
@ -416,6 +416,7 @@ func encryptBucketMetadata(bucket string, input []byte, kmsContext crypto.Contex
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
objectKey := crypto.GenerateKey(key, rand.Reader)
|
||||
sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
|
@ -437,7 +438,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
|||
return nil, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(meta)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -449,8 +449,8 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
|||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
|
||||
return outbuf.Bytes(), err
|
||||
}
|
||||
|
|
|
@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
|||
}
|
||||
}
|
||||
|
||||
authType := getRequestAuthType(r)
|
||||
var signatureVersion string
|
||||
switch authType {
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
signatureVersion = signV2Algorithm
|
||||
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
|
||||
signatureVersion = signV4Algorithm
|
||||
}
|
||||
|
||||
var authtype string
|
||||
switch authType {
|
||||
case authTypePresignedV2, authTypePresigned:
|
||||
authtype = "REST-QUERY-STRING"
|
||||
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
|
||||
authtype = "REST-HEADER"
|
||||
case authTypePostPolicy:
|
||||
authtype = "POST"
|
||||
}
|
||||
|
||||
args := map[string][]string{
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"CurrentTime": {currTime.Format(time.RFC3339)},
|
||||
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
|
||||
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
|
||||
"SourceIp": {handlers.GetSourceIP(r)},
|
||||
"UserAgent": {r.UserAgent()},
|
||||
"Referer": {r.Referer()},
|
||||
"principaltype": {principalType},
|
||||
"userid": {username},
|
||||
"username": {username},
|
||||
"versionid": {vid},
|
||||
"signatureversion": {signatureVersion},
|
||||
"authType": {authtype},
|
||||
}
|
||||
|
||||
if lc != "" {
|
||||
|
|
|
@ -88,7 +88,7 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
|||
return err
|
||||
}
|
||||
|
||||
dui := v.(DataUsageInfo)
|
||||
dui := v.(madmin.DataUsageInfo)
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
|
@ -115,7 +115,7 @@ func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
|||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui madmin.BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
)
|
||||
|
||||
func (b *BucketReplicationStats) hasReplicationUsage() bool {
|
||||
return b.PendingSize > 0 ||
|
||||
b.FailedSize > 0 ||
|
||||
b.ReplicatedSize > 0 ||
|
||||
b.ReplicaSize > 0 ||
|
||||
b.PendingCount > 0 ||
|
||||
b.FailedCount > 0
|
||||
}
|
||||
|
||||
// ReplicationStats holds the global in-memory replication stats
|
||||
type ReplicationStats struct {
|
||||
Cache map[string]*BucketReplicationStats
|
||||
UsageCache map[string]*BucketReplicationStats
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// Delete deletes in-memory replication statistics for a bucket.
|
||||
func (r *ReplicationStats) Delete(bucket string) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.Cache, bucket)
|
||||
delete(r.UsageCache, bucket)
|
||||
|
||||
}
|
||||
|
||||
// Update updates in-memory replication statistics with new values.
|
||||
func (r *ReplicationStats) Update(bucket string, n int64, status, prevStatus replication.StatusType, opType replication.Type) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
b, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
b = &BucketReplicationStats{}
|
||||
}
|
||||
r.RUnlock()
|
||||
switch status {
|
||||
case replication.Pending:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddUint64(&b.PendingSize, uint64(n))
|
||||
}
|
||||
atomic.AddUint64(&b.PendingCount, 1)
|
||||
case replication.Completed:
|
||||
switch prevStatus { // adjust counters based on previous state
|
||||
case replication.Pending:
|
||||
atomic.AddUint64(&b.PendingCount, ^uint64(0))
|
||||
case replication.Failed:
|
||||
atomic.AddUint64(&b.FailedCount, ^uint64(0))
|
||||
}
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddUint64(&b.ReplicatedSize, uint64(n))
|
||||
switch prevStatus {
|
||||
case replication.Pending:
|
||||
atomic.AddUint64(&b.PendingSize, ^uint64(n-1))
|
||||
case replication.Failed:
|
||||
atomic.AddUint64(&b.FailedSize, ^uint64(n-1))
|
||||
}
|
||||
}
|
||||
case replication.Failed:
|
||||
// count failures only once - not on every retry
|
||||
switch prevStatus { // adjust counters based on previous state
|
||||
case replication.Pending:
|
||||
atomic.AddUint64(&b.PendingCount, ^uint64(0))
|
||||
}
|
||||
if opType == replication.ObjectReplicationType {
|
||||
if prevStatus == replication.Pending {
|
||||
atomic.AddUint64(&b.FailedSize, uint64(n))
|
||||
atomic.AddUint64(&b.FailedCount, 1)
|
||||
atomic.AddUint64(&b.PendingSize, ^uint64(n-1))
|
||||
}
|
||||
}
|
||||
case replication.Replica:
|
||||
if opType == replication.ObjectReplicationType {
|
||||
atomic.AddUint64(&b.ReplicaSize, uint64(n))
|
||||
}
|
||||
}
|
||||
r.Lock()
|
||||
r.Cache[bucket] = b
|
||||
r.Unlock()
|
||||
}
|
||||
|
||||
// GetInitialUsage get replication metrics available at the time of cluster initialization
|
||||
func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats {
|
||||
if r == nil {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
|
||||
st, ok := r.UsageCache[bucket]
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
return BucketReplicationStats{
|
||||
PendingSize: atomic.LoadUint64(&st.PendingSize),
|
||||
FailedSize: atomic.LoadUint64(&st.FailedSize),
|
||||
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
|
||||
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
|
||||
PendingCount: atomic.LoadUint64(&st.PendingCount),
|
||||
FailedCount: atomic.LoadUint64(&st.FailedCount),
|
||||
}
|
||||
}
|
||||
|
||||
// Get replication metrics for a bucket from this node since this node came up.
|
||||
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
||||
if r == nil {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
|
||||
st, ok := r.Cache[bucket]
|
||||
if !ok {
|
||||
return BucketReplicationStats{}
|
||||
}
|
||||
|
||||
return BucketReplicationStats{
|
||||
PendingSize: atomic.LoadUint64(&st.PendingSize),
|
||||
FailedSize: atomic.LoadUint64(&st.FailedSize),
|
||||
ReplicatedSize: atomic.LoadUint64(&st.ReplicatedSize),
|
||||
ReplicaSize: atomic.LoadUint64(&st.ReplicaSize),
|
||||
PendingCount: atomic.LoadUint64(&st.PendingCount),
|
||||
FailedCount: atomic.LoadUint64(&st.FailedCount),
|
||||
}
|
||||
}
|
||||
|
||||
// NewReplicationStats initialize in-memory replication statistics
|
||||
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
|
||||
st := &ReplicationStats{
|
||||
Cache: make(map[string]*BucketReplicationStats),
|
||||
UsageCache: make(map[string]*BucketReplicationStats),
|
||||
}
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return st
|
||||
}
|
||||
|
||||
// data usage has not captured any data yet.
|
||||
if dataUsageInfo.LastUpdate.IsZero() {
|
||||
return st
|
||||
}
|
||||
|
||||
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
||||
b := &BucketReplicationStats{
|
||||
PendingSize: usage.ReplicationPendingSize,
|
||||
FailedSize: usage.ReplicationFailedSize,
|
||||
ReplicatedSize: usage.ReplicatedSize,
|
||||
ReplicaSize: usage.ReplicaSize,
|
||||
PendingCount: usage.ReplicationPendingCount,
|
||||
FailedCount: usage.ReplicationFailedCount,
|
||||
}
|
||||
if b.hasReplicationUsage() {
|
||||
st.UsageCache[bucket] = b
|
||||
}
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
|
@ -175,10 +175,10 @@ func isStandardHeader(matchHeaderKey string) bool {
|
|||
}
|
||||
|
||||
// returns whether object version is a deletemarker and if object qualifies for replication
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
|
||||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (replicate, sync bool) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
return false, false, sync
|
||||
return false, sync
|
||||
}
|
||||
opts := replication.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
|
@ -198,19 +198,19 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
|||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
return oi.DeleteMarker, true, sync
|
||||
return true, sync
|
||||
}
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
return oi.DeleteMarker, oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
return oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
if tgt == nil {
|
||||
return oi.DeleteMarker, false, false
|
||||
return false, false
|
||||
}
|
||||
return oi.DeleteMarker, replicate, tgt.replicateSync
|
||||
return replicate, tgt.replicateSync
|
||||
}
|
||||
|
||||
// replicate deletes to the designated replication target if replication configuration
|
||||
|
@ -291,6 +291,14 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
|
|||
versionPurgeStatus = Complete
|
||||
}
|
||||
}
|
||||
prevStatus := dobj.DeleteMarkerReplicationStatus
|
||||
currStatus := replicationStatus
|
||||
if dobj.VersionID != "" {
|
||||
prevStatus = string(dobj.VersionPurgeStatus)
|
||||
currStatus = string(versionPurgeStatus)
|
||||
}
|
||||
// to decrement pending count later.
|
||||
globalReplicationStats.Update(dobj.Bucket, 0, replication.StatusType(currStatus), replication.StatusType(prevStatus), replication.DeleteReplicationType)
|
||||
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
if replicationStatus == string(replication.Failed) || versionPurgeStatus == Failed {
|
||||
|
@ -563,12 +571,13 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
|
|||
|
||||
// replicateObject replicates the specified version of the object to destination bucket
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
|
||||
func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer) {
|
||||
z, ok := objectAPI.(*erasureServerPools)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
objInfo := ri.ObjectInfo
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
|
||||
|
@ -594,7 +603,7 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
})
|
||||
return
|
||||
}
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, writeLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -604,10 +613,10 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replicate for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
|
||||
return
|
||||
}
|
||||
defer gr.Close() // hold read lock for entire transaction
|
||||
defer gr.Close() // hold write lock for entire transaction
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
size, err := objInfo.GetActualSize()
|
||||
|
@ -644,7 +653,7 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
rtype = getReplicationAction(objInfo, oi)
|
||||
if rtype == replicateNone {
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed.
|
||||
// PutObject might have completed
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -656,7 +665,8 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
srcOpts := miniogo.CopySrcOptions{
|
||||
Bucket: dest.Bucket,
|
||||
Object: object,
|
||||
VersionID: objInfo.VersionID}
|
||||
VersionID: objInfo.VersionID,
|
||||
}
|
||||
dstOpts := miniogo.PutObjectOptions{
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
|
@ -697,21 +707,28 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
if totalNodesCount == 0 {
|
||||
totalNodesCount = 1 // For standalone erasure coding
|
||||
}
|
||||
b := target.BandwidthLimit / int64(totalNodesCount)
|
||||
|
||||
var headerSize int
|
||||
for k, v := range putOpts.Header() {
|
||||
headerSize += len(k) + len(v)
|
||||
}
|
||||
|
||||
// r takes over closing gr.
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
|
||||
opts := &bandwidth.MonitorReaderOptions{
|
||||
Bucket: objInfo.Bucket,
|
||||
Object: objInfo.Name,
|
||||
HeaderSize: headerSize,
|
||||
BandwidthBytesPerSec: target.BandwidthLimit / int64(totalNodesCount),
|
||||
ClusterBandwidth: target.BandwidthLimit,
|
||||
}
|
||||
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
defer r.Close()
|
||||
}
|
||||
|
||||
prevReplStatus := objInfo.ReplicationStatus
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
|
||||
if objInfo.UserTags != "" {
|
||||
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
|
@ -725,23 +742,43 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
|
|||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
|
||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
} else {
|
||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
// Leave metadata in `PENDING` state if inline replication fails to save iops
|
||||
if ri.OpType == replication.HealReplicationType || replicationStatus == replication.Completed {
|
||||
// This lower level implementation is necessary to avoid write locks from CopyObject.
|
||||
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
} else {
|
||||
metadata := make(map[string]string, len(objInfo.UserDefined))
|
||||
for k, v := range objInfo.UserDefined {
|
||||
metadata[k] = v
|
||||
}
|
||||
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, metadata, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
opType := replication.MetadataReplicationType
|
||||
if rtype == replicateAll {
|
||||
opType = replication.ObjectReplicationType
|
||||
}
|
||||
globalReplicationStats.Update(bucket, size, replicationStatus, prevReplStatus, opType)
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
// re-queue failures once more - keep a retry count to avoid flooding the queue if
|
||||
// the target site is down. Leave it to scanner to catch up instead.
|
||||
if replicationStatus != replication.Completed && ri.RetryCount < 1 {
|
||||
ri.OpType = replication.HealReplicationType
|
||||
ri.RetryCount++
|
||||
globalReplicationPool.queueReplicaFailedTask(ri)
|
||||
}
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
}
|
||||
|
||||
// filterReplicationStatusMetadata filters replication status metadata for COPY
|
||||
|
@ -774,41 +811,60 @@ type DeletedObjectVersionInfo struct {
|
|||
}
|
||||
|
||||
var (
|
||||
globalReplicationPool *ReplicationPool
|
||||
globalReplicationPool *ReplicationPool
|
||||
globalReplicationStats *ReplicationStats
|
||||
)
|
||||
|
||||
// ReplicationPool describes replication pool
|
||||
type ReplicationPool struct {
|
||||
mu sync.Mutex
|
||||
size int
|
||||
replicaCh chan ObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
killCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
objLayer ObjectLayer
|
||||
ctx context.Context
|
||||
mrfWorkerKillCh chan struct{}
|
||||
workerKillCh chan struct{}
|
||||
replicaCh chan ReplicateObjectInfo
|
||||
replicaDeleteCh chan DeletedObjectVersionInfo
|
||||
mrfReplicaCh chan ReplicateObjectInfo
|
||||
workerSize int
|
||||
mrfWorkerSize int
|
||||
workerWg sync.WaitGroup
|
||||
mrfWorkerWg sync.WaitGroup
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewReplicationPool creates a pool of replication workers of specified size
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, sz int) *ReplicationPool {
|
||||
func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts) *ReplicationPool {
|
||||
pool := &ReplicationPool{
|
||||
replicaCh: make(chan ObjectInfo, 10000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
|
||||
replicaCh: make(chan ReplicateObjectInfo, 100000),
|
||||
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 100000),
|
||||
mrfReplicaCh: make(chan ReplicateObjectInfo, 100000),
|
||||
ctx: ctx,
|
||||
objLayer: o,
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
close(pool.replicaCh)
|
||||
close(pool.replicaDeleteCh)
|
||||
}()
|
||||
pool.Resize(sz)
|
||||
pool.ResizeWorkers(opts.Workers)
|
||||
pool.ResizeFailedWorkers(opts.FailedWorkers)
|
||||
return pool
|
||||
}
|
||||
|
||||
// AddMRFWorker adds a pending/failed replication worker to handle requests that could not be queued
|
||||
// to the other workers
|
||||
func (p *ReplicationPool) AddMRFWorker() {
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case oi, ok := <-p.mrfReplicaCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
replicateObject(p.ctx, oi, p.objLayer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddWorker adds a replication worker to the pool
|
||||
func (p *ReplicationPool) AddWorker() {
|
||||
defer p.wg.Done()
|
||||
defer p.workerWg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
|
@ -823,35 +879,71 @@ func (p *ReplicationPool) AddWorker() {
|
|||
return
|
||||
}
|
||||
replicateDelete(p.ctx, doi, p.objLayer)
|
||||
case <-p.killCh:
|
||||
case <-p.workerKillCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Resize replication pool to new size
|
||||
func (p *ReplicationPool) Resize(n int) {
|
||||
// ResizeWorkers sets replication workers pool to new size
|
||||
func (p *ReplicationPool) ResizeWorkers(n int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
for p.size < n {
|
||||
p.size++
|
||||
p.wg.Add(1)
|
||||
for p.workerSize < n {
|
||||
p.workerSize++
|
||||
p.workerWg.Add(1)
|
||||
go p.AddWorker()
|
||||
}
|
||||
for p.size > n {
|
||||
p.size--
|
||||
go func() { p.killCh <- struct{}{} }()
|
||||
for p.workerSize > n {
|
||||
p.workerSize--
|
||||
go func() { p.workerKillCh <- struct{}{} }()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaTask(oi ObjectInfo) {
|
||||
// ResizeFailedWorkers sets replication failed workers pool size
|
||||
func (p *ReplicationPool) ResizeFailedWorkers(n int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
for p.mrfWorkerSize < n {
|
||||
p.mrfWorkerSize++
|
||||
p.mrfWorkerWg.Add(1)
|
||||
go p.AddMRFWorker()
|
||||
}
|
||||
for p.mrfWorkerSize > n {
|
||||
p.mrfWorkerSize--
|
||||
go func() { p.mrfWorkerKillCh <- struct{}{} }()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaFailedTask(ri ReplicateObjectInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case p.replicaCh <- oi:
|
||||
case <-GlobalContext.Done():
|
||||
p.once.Do(func() {
|
||||
close(p.replicaCh)
|
||||
close(p.mrfReplicaCh)
|
||||
})
|
||||
case p.mrfReplicaCh <- ri:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
p.once.Do(func() {
|
||||
close(p.replicaCh)
|
||||
close(p.mrfReplicaCh)
|
||||
})
|
||||
case p.replicaCh <- ri:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
@ -861,13 +953,26 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
|
|||
return
|
||||
}
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
p.once.Do(func() {
|
||||
close(p.replicaDeleteCh)
|
||||
})
|
||||
case p.replicaDeleteCh <- doi:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
type replicationPoolOpts struct {
|
||||
Workers int
|
||||
FailedWorkers int
|
||||
}
|
||||
|
||||
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationWorkers())
|
||||
globalReplicationPool = NewReplicationPool(ctx, objectAPI, replicationPoolOpts{
|
||||
Workers: globalAPIConfig.getReplicationWorkers(),
|
||||
FailedWorkers: globalAPIConfig.getReplicationFailedWorkers(),
|
||||
})
|
||||
globalReplicationStats = NewReplicationStats(ctx, objectAPI)
|
||||
}
|
||||
|
||||
// get Reader from replication target if active-active replication is in place and
|
||||
|
@ -952,7 +1057,10 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
|
|||
if tgt == nil || tgt.isOffline() {
|
||||
return nil, oi, false, fmt.Errorf("target is offline or not configured")
|
||||
}
|
||||
|
||||
// if proxying explicitly disabled on remote target
|
||||
if tgt.disableProxy {
|
||||
return nil, oi, false, nil
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
|
@ -1003,18 +1111,18 @@ func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, op
|
|||
return oi, proxy, err
|
||||
}
|
||||
|
||||
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool) {
|
||||
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, sync bool, opType replication.Type) {
|
||||
if sync {
|
||||
replicateObject(ctx, objInfo, o)
|
||||
replicateObject(ctx, ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType}, o)
|
||||
} else {
|
||||
globalReplicationPool.queueReplicaTask(objInfo)
|
||||
globalReplicationPool.queueReplicaTask(ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType})
|
||||
}
|
||||
if sz, err := objInfo.GetActualSize(); err == nil {
|
||||
globalReplicationStats.Update(objInfo.Bucket, sz, objInfo.ReplicationStatus, replication.StatusType(""), opType)
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo, o ObjectLayer, sync bool) {
|
||||
if sync {
|
||||
replicateDelete(ctx, dv, o)
|
||||
} else {
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
}
|
||||
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||
globalReplicationStats.Update(dv.Bucket, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// BucketStats bucket statistics
|
||||
type BucketStats struct {
|
||||
ReplicationStats BucketReplicationStats
|
||||
}
|
||||
|
||||
// BucketReplicationStats represents inline replication statistics
|
||||
// such as pending, failed and completed bytes in total for a bucket
|
||||
type BucketReplicationStats struct {
|
||||
// Pending size in bytes
|
||||
PendingSize uint64 `json:"pendingReplicationSize"`
|
||||
// Completed size in bytes
|
||||
ReplicatedSize uint64 `json:"completedReplicationSize"`
|
||||
// Total Replica size in bytes
|
||||
ReplicaSize uint64 `json:"replicaSize"`
|
||||
// Failed size in bytes
|
||||
FailedSize uint64 `json:"failedReplicationSize"`
|
||||
// Total number of pending operations including metadata updates
|
||||
PendingCount uint64 `json:"pendingReplicationCount"`
|
||||
// Total number of failed operations including metadata updates
|
||||
FailedCount uint64 `json:"failedReplicationCount"`
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "PendingSize":
|
||||
z.PendingSize, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketReplicationStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 6
|
||||
// write "PendingSize"
|
||||
err = en.Append(0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.PendingSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
// write "ReplicatedSize"
|
||||
err = en.Append(0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicatedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
// write "ReplicaSize"
|
||||
err = en.Append(0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicaSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
// write "FailedSize"
|
||||
err = en.Append(0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.FailedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
// write "PendingCount"
|
||||
err = en.Append(0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.PendingCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
// write "FailedCount"
|
||||
err = en.Append(0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.FailedCount)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketReplicationStats) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 6
|
||||
// string "PendingSize"
|
||||
o = append(o, 0x86, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.PendingSize)
|
||||
// string "ReplicatedSize"
|
||||
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ReplicatedSize)
|
||||
// string "ReplicaSize"
|
||||
o = append(o, 0xab, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ReplicaSize)
|
||||
// string "FailedSize"
|
||||
o = append(o, 0xaa, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendUint64(o, z.FailedSize)
|
||||
// string "PendingCount"
|
||||
o = append(o, 0xac, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendUint64(o, z.PendingCount)
|
||||
// string "FailedCount"
|
||||
o = append(o, 0xab, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
|
||||
o = msgp.AppendUint64(o, z.FailedCount)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "PendingSize":
|
||||
z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingSize")
|
||||
return
|
||||
}
|
||||
case "ReplicatedSize":
|
||||
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
case "ReplicaSize":
|
||||
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
case "FailedSize":
|
||||
z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedSize")
|
||||
return
|
||||
}
|
||||
case "PendingCount":
|
||||
z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PendingCount")
|
||||
return
|
||||
}
|
||||
case "FailedCount":
|
||||
z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "FailedCount")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketReplicationStats) Msgsize() (s int) {
|
||||
s = 1 + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 12 + msgp.Uint64Size + 11 + msgp.Uint64Size + 13 + msgp.Uint64Size + 12 + msgp.Uint64Size
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketStats) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
err = z.ReplicationStats.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketStats) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "ReplicationStats"
|
||||
err = en.Append(0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.ReplicationStats.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketStats) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "ReplicationStats"
|
||||
o = append(o, 0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
o, err = z.ReplicationStats.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketStats) Msgsize() (s int) {
|
||||
s = 1 + 17 + z.ReplicationStats.Msgsize()
|
||||
return
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBucketReplicationStats(t *testing.T) {
|
||||
v := BucketReplicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketReplicationStats(b *testing.B) {
|
||||
v := BucketReplicationStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketReplicationStats(b *testing.B) {
|
||||
v := BucketReplicationStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketReplicationStats(b *testing.B) {
|
||||
v := BucketReplicationStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketReplicationStats(t *testing.T) {
|
||||
v := BucketReplicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketReplicationStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketReplicationStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketReplicationStats(b *testing.B) {
|
||||
v := BucketReplicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketReplicationStats(b *testing.B) {
|
||||
v := BucketReplicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBucketStats(t *testing.T) {
|
||||
v := BucketStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBucketStats(b *testing.B) {
|
||||
v := BucketStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBucketStats(b *testing.B) {
|
||||
v := BucketStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBucketStats(b *testing.B) {
|
||||
v := BucketStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBucketStats(t *testing.T) {
|
||||
v := BucketStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBucketStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BucketStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBucketStats(b *testing.B) {
|
||||
v := BucketStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBucketStats(b *testing.B) {
|
||||
v := BucketStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -31,6 +31,7 @@ import (
|
|||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/versioning"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
@ -100,22 +101,25 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if tgt.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
|
||||
}
|
||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||
return BucketReplicationSourceNotVersioned{Bucket: bucket}
|
||||
}
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if tgt.ReplicationSync && tgt.BandwidthLimit > 0 {
|
||||
return NotImplemented{Message: "Synchronous replication does not support bandwidth limits"}
|
||||
}
|
||||
}
|
||||
if tgt.Type == madmin.ILMService {
|
||||
if globalBucketVersioningSys.Enabled(bucket) {
|
||||
|
@ -124,7 +128,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
|
@ -179,7 +183,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
|||
}
|
||||
if arn.Type == madmin.ReplicationService {
|
||||
if !globalIsErasure {
|
||||
return NotImplemented{}
|
||||
return NotImplemented{Message: "Replication is not implemented in " + getMinioMode()}
|
||||
}
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
|
@ -268,6 +272,21 @@ func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, a
|
|||
return ""
|
||||
}
|
||||
|
||||
// GetRemoteBucketTargetByArn returns BucketTarget for a ARN
|
||||
func (sys *BucketTargetSys) GetRemoteBucketTargetByArn(ctx context.Context, bucket, arn string) madmin.BucketTarget {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
var tgt madmin.BucketTarget
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if t.Arn == arn {
|
||||
tgt = t.Clone()
|
||||
tgt.Credentials = t.Credentials
|
||||
return tgt
|
||||
}
|
||||
}
|
||||
return tgt
|
||||
}
|
||||
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys() *BucketTargetSys {
|
||||
return &BucketTargetSys{
|
||||
|
@ -328,6 +347,7 @@ func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objA
|
|||
for _, bucket := range buckets {
|
||||
cfg, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket.Name)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if cfg == nil || cfg.Empty() {
|
||||
|
@ -339,6 +359,7 @@ func (sys *BucketTargetSys) load(ctx context.Context, buckets []BucketInfo, objA
|
|||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||
|
@ -377,6 +398,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
|||
healthCheckDuration: hcDuration,
|
||||
bucket: tcfg.TargetBucket,
|
||||
replicateSync: tcfg.ReplicationSync,
|
||||
disableProxy: tcfg.DisableProxy,
|
||||
}
|
||||
go tc.healthCheck()
|
||||
return tc, nil
|
||||
|
@ -432,7 +454,10 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
|||
return nil, err
|
||||
}
|
||||
if crypto.S3.IsEncrypted(meta) {
|
||||
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{bucket: bucket, bucketTargetsFile: bucketTargetsFile}); err != nil {
|
||||
if data, err = decryptBucketMetadata(cdata, bucket, meta, crypto.Context{
|
||||
bucket: bucket,
|
||||
bucketTargetsFile: bucketTargetsFile,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -451,6 +476,7 @@ type TargetClient struct {
|
|||
healthCheckDuration time.Duration
|
||||
bucket string // remote bucket target
|
||||
replicateSync bool
|
||||
disableProxy bool
|
||||
}
|
||||
|
||||
func (tc *TargetClient) isOffline() bool {
|
||||
|
|
|
@ -59,7 +59,15 @@ func init() {
|
|||
config.Logger.Info = logger.Info
|
||||
config.Logger.LogIf = logger.LogIf
|
||||
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
|
||||
if IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsKubernetesReplicaSet() || IsPCFTile() {
|
||||
// 30 seconds matches the orchestrator DNS TTLs, have
|
||||
// a 5 second timeout to lookup from DNS servers.
|
||||
globalDNSCache = xhttp.NewDNSCache(30*time.Second, 5*time.Second, logger.LogOnceIf)
|
||||
} else {
|
||||
// On bare-metals DNS do not change often, so it is
|
||||
// safe to assume a higher timeout upto 10 minutes.
|
||||
globalDNSCache = xhttp.NewDNSCache(10*time.Minute, 5*time.Second, logger.LogOnceIf)
|
||||
}
|
||||
|
||||
initGlobalContext()
|
||||
|
||||
|
@ -428,3 +436,13 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
|||
secureConn = true
|
||||
return x509Certs, manager, secureConn, nil
|
||||
}
|
||||
|
||||
// contextCanceled returns whether a context is canceled.
|
||||
func contextCanceled(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -624,6 +624,8 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
|
|||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
|
||||
// Update all dynamic config values in memory.
|
||||
|
|
|
@ -29,23 +29,26 @@ import (
|
|||
|
||||
// API sub-system constants
|
||||
const (
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
apiReplicationWorkers = "replication_workers"
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
|
||||
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
|
||||
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
|
||||
EnvAPIReplicationWorkers = "MINIO_API_REPLICATION_WORKERS"
|
||||
apiRequestsMax = "requests_max"
|
||||
apiRequestsDeadline = "requests_deadline"
|
||||
apiClusterDeadline = "cluster_deadline"
|
||||
apiCorsAllowOrigin = "cors_allow_origin"
|
||||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
apiListQuorum = "list_quorum"
|
||||
apiExtendListCacheLife = "extend_list_cache_life"
|
||||
apiReplicationWorkers = "replication_workers"
|
||||
apiReplicationFailedWorkers = "replication_failed_workers"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
|
||||
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
|
||||
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
|
||||
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
|
||||
EnvAPIReplicationWorkers = "MINIO_API_REPLICATION_WORKERS"
|
||||
EnvAPIReplicationFailedWorkers = "MINIO_API_REPLICATION_FAILED_WORKERS"
|
||||
)
|
||||
|
||||
// Deprecated key and ENVs
|
||||
|
@ -87,21 +90,26 @@ var (
|
|||
},
|
||||
config.KV{
|
||||
Key: apiReplicationWorkers,
|
||||
Value: "100",
|
||||
Value: "500",
|
||||
},
|
||||
config.KV{
|
||||
Key: apiReplicationFailedWorkers,
|
||||
Value: "4",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config storage class configuration
|
||||
type Config struct {
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ClusterDeadline time.Duration `json:"cluster_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
ListQuorum string `json:"list_strict_quorum"`
|
||||
ExtendListLife time.Duration `json:"extend_list_cache_life"`
|
||||
ReplicationWorkers int `json:"replication_workers"`
|
||||
RequestsMax int `json:"requests_max"`
|
||||
RequestsDeadline time.Duration `json:"requests_deadline"`
|
||||
ClusterDeadline time.Duration `json:"cluster_deadline"`
|
||||
CorsAllowOrigin []string `json:"cors_allow_origin"`
|
||||
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
|
||||
ListQuorum string `json:"list_strict_quorum"`
|
||||
ExtendListLife time.Duration `json:"extend_list_cache_life"`
|
||||
ReplicationWorkers int `json:"replication_workers"`
|
||||
ReplicationFailedWorkers int `json:"replication_failed_workers"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
|
@ -188,14 +196,24 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
|||
return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Minimum number of replication workers should be 1")
|
||||
}
|
||||
|
||||
replicationFailedWorkers, err := strconv.Atoi(env.Get(EnvAPIReplicationFailedWorkers, kvs.Get(apiReplicationFailedWorkers)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if replicationFailedWorkers <= 0 {
|
||||
return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Minimum number of replication failed workers should be 1")
|
||||
}
|
||||
|
||||
return Config{
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ClusterDeadline: clusterDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
ListQuorum: listQuorum,
|
||||
ExtendListLife: listLife,
|
||||
ReplicationWorkers: replicationWorkers,
|
||||
RequestsMax: requestsMax,
|
||||
RequestsDeadline: requestsDeadline,
|
||||
ClusterDeadline: clusterDeadline,
|
||||
CorsAllowOrigin: corsAllowOrigin,
|
||||
RemoteTransportDeadline: remoteTransportDeadline,
|
||||
ListQuorum: listQuorum,
|
||||
ExtendListLife: listLife,
|
||||
ReplicationWorkers: replicationWorkers,
|
||||
ReplicationFailedWorkers: replicationFailedWorkers,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -51,5 +51,11 @@ var (
|
|||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: apiReplicationFailedWorkers,
|
||||
Description: `set the number of replication workers for recently failed replicas, defaults to 4`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -260,6 +260,67 @@ func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error)
|
|||
return searchResult.Entries[0].DN, nil
|
||||
}
|
||||
|
||||
func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ([]string, error) {
|
||||
// User groups lookup.
|
||||
var groups []string
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
var newGroups []string
|
||||
newGroups, err := getGroups(conn, searchRequest)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("Error finding groups of %s: %v", bindDN, err)
|
||||
return nil, errRet
|
||||
}
|
||||
|
||||
groups = append(groups, newGroups...)
|
||||
}
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
// LookupUserDN searches for the full DN ang groups of a given username
|
||||
func (l *Config) LookupUserDN(username string) (string, []string, error) {
|
||||
if !l.isUsingLookupBind {
|
||||
return "", nil, errors.New("current lookup mode does not support searching for User DN")
|
||||
}
|
||||
|
||||
conn, err := l.Connect()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Bind to the lookup user account
|
||||
if err = l.lookupBind(conn); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Lookup user DN
|
||||
bindDN, err := l.lookupUserDN(conn, username)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("Unable to find user DN: %w", err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
|
||||
groups, err := l.searchForUserGroups(conn, username, bindDN)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return bindDN, groups, nil
|
||||
}
|
||||
|
||||
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
|
||||
// user and the list of groups.
|
||||
func (l *Config) Bind(username, password string) (string, []string, error) {
|
||||
|
@ -310,28 +371,9 @@ func (l *Config) Bind(username, password string) (string, []string, error) {
|
|||
}
|
||||
|
||||
// User groups lookup.
|
||||
var groups []string
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
var newGroups []string
|
||||
newGroups, err = getGroups(conn, searchRequest)
|
||||
if err != nil {
|
||||
errRet := fmt.Errorf("Error finding groups of %s: %v", bindDN, err)
|
||||
return "", nil, errRet
|
||||
}
|
||||
|
||||
groups = append(groups, newGroups...)
|
||||
}
|
||||
groups, err := l.searchForUserGroups(conn, username, bindDN)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return bindDN, groups, nil
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2020-2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -28,8 +28,10 @@ import (
|
|||
const (
|
||||
Delay = "delay"
|
||||
MaxWait = "max_wait"
|
||||
Cycle = "cycle"
|
||||
|
||||
EnvDelay = "MINIO_SCANNER_DELAY"
|
||||
EnvCycle = "MINIO_SCANNER_CYCLE"
|
||||
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
|
||||
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
|
||||
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
|
||||
|
@ -41,6 +43,8 @@ type Config struct {
|
|||
Delay float64 `json:"delay"`
|
||||
// MaxWait is maximum wait time between operations
|
||||
MaxWait time.Duration
|
||||
// Cycle is the time.Duration between each scanner cycles
|
||||
Cycle time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -54,6 +58,10 @@ var (
|
|||
Key: MaxWait,
|
||||
Value: "15s",
|
||||
},
|
||||
config.KV{
|
||||
Key: Cycle,
|
||||
Value: "1m",
|
||||
},
|
||||
}
|
||||
|
||||
// Help provides help for config values
|
||||
|
@ -70,6 +78,12 @@ var (
|
|||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Cycle,
|
||||
Description: `time duration between scanner cycles, defaults to '1m'`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -94,5 +108,10 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
|||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
cfg.Cycle, err = time.ParseDuration(env.Get(EnvCycle, kvs.Get(Cycle)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
|
||||
"github.com/minio/minio/cmd/config/heal"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/message/audit"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
|
@ -43,9 +44,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
dataScannerSleepPerFolder = 20 * time.Millisecond // Time to wait between folders.
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
|
@ -59,6 +60,9 @@ var (
|
|||
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
// Sleeper values are updated when config is loaded.
|
||||
scannerSleeper = newDynamicSleeper(10, 10*time.Second)
|
||||
scannerCycle = &safeDuration{
|
||||
t: dataScannerStartDelay,
|
||||
}
|
||||
)
|
||||
|
||||
// initDataScanner will start the scanner in the background.
|
||||
|
@ -66,20 +70,39 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
go runDataScanner(ctx, objAPI)
|
||||
}
|
||||
|
||||
type safeDuration struct {
|
||||
sync.Mutex
|
||||
t time.Duration
|
||||
}
|
||||
|
||||
func (s *safeDuration) Update(t time.Duration) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.t = t
|
||||
}
|
||||
|
||||
func (s *safeDuration) Get() time.Duration {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.t
|
||||
}
|
||||
|
||||
// runDataScanner will start a data scanner.
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one scanner running per cluster.
|
||||
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
var err error
|
||||
func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 scanner is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
|
||||
var ctx context.Context
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
|
||||
lkctx, err := locker.GetLock(pctx, dataScannerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataScannerStartDelay)))
|
||||
time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get())))
|
||||
continue
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lkctx.Cancel()
|
||||
break
|
||||
// No unlock for "leader" lock.
|
||||
}
|
||||
|
@ -101,7 +124,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
br.Close()
|
||||
}
|
||||
|
||||
scannerTimer := time.NewTimer(dataScannerStartDelay)
|
||||
scannerTimer := time.NewTimer(scannerCycle.Get())
|
||||
defer scannerTimer.Stop()
|
||||
|
||||
for {
|
||||
|
@ -110,14 +133,14 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
return
|
||||
case <-scannerTimer.C:
|
||||
// Reset the timer for next cycle.
|
||||
scannerTimer.Reset(dataScannerStartDelay)
|
||||
scannerTimer.Reset(scannerCycle.Get())
|
||||
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln("starting scanner cycle")
|
||||
}
|
||||
|
||||
// Wait before starting next cycle and wait on startup.
|
||||
results := make(chan DataUsageInfo, 1)
|
||||
results := make(chan madmin.DataUsageInfo, 1)
|
||||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -233,7 +256,7 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
|||
}
|
||||
|
||||
done := ctx.Done()
|
||||
var flattenLevels = 2
|
||||
const flattenLevels = 1
|
||||
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix)
|
||||
|
@ -402,7 +425,13 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
|
||||
err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
|
||||
// Parse
|
||||
entName = path.Clean(path.Join(folder.name, entName))
|
||||
entName = pathClean(path.Join(folder.name, entName))
|
||||
if entName == "" {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
|
||||
}
|
||||
return errDoneForNow
|
||||
}
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
|
||||
if bucket == "" {
|
||||
if f.dataUsageScannerDebug {
|
||||
|
@ -425,6 +454,8 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
}
|
||||
|
||||
if typ&os.ModeDir != 0 {
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
|
||||
h := hashPath(entName)
|
||||
_, exists := f.oldCache.Cache[h.Key()]
|
||||
cache.addChildString(entName)
|
||||
|
@ -537,8 +568,6 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
|
||||
}
|
||||
|
||||
// Dynamic time delay.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
resolver.bucket = bucket
|
||||
|
||||
foundObjs := false
|
||||
|
@ -567,9 +596,6 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
// agreed value less than expected quorum
|
||||
dangling = nAgreed < resolver.objQuorum || nAgreed < resolver.dirQuorum
|
||||
|
||||
// Sleep and reset.
|
||||
wait()
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if !ok {
|
||||
for _, err := range errs {
|
||||
|
@ -589,9 +615,14 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
if entry.isDir() {
|
||||
return
|
||||
}
|
||||
|
||||
// wait on timer per object.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
// We got an entry which we should be able to heal.
|
||||
fiv, err := entry.fileInfoVersions(bucket)
|
||||
if err != nil {
|
||||
wait()
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: entry.name,
|
||||
|
@ -603,10 +634,12 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
foundObjs = foundObjs || err == nil
|
||||
return
|
||||
}
|
||||
|
||||
for _, ver := range fiv.Versions {
|
||||
// Sleep and reset.
|
||||
wait()
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: fiv.Name,
|
||||
|
@ -637,6 +670,9 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
console.Debugf(healObjectsPrefix+" deleting dangling directory %s\n", prefix)
|
||||
}
|
||||
|
||||
// wait on timer per object.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
|
||||
Recursive: true,
|
||||
Remove: healDeleteDangling,
|
||||
|
@ -644,7 +680,6 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
func(bucket, object, versionID string) error {
|
||||
// Wait for each heal as per scanner frequency.
|
||||
wait()
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
|
@ -653,8 +688,6 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
})
|
||||
}
|
||||
|
||||
wait()
|
||||
|
||||
// Add unless healing returned an error.
|
||||
if foundObjs {
|
||||
this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
|
@ -773,6 +806,8 @@ type sizeSummary struct {
|
|||
pendingSize int64
|
||||
failedSize int64
|
||||
replicaSize int64
|
||||
pendingCount uint64
|
||||
failedCount uint64
|
||||
}
|
||||
|
||||
type getSizeFn func(item scannerItem) (sizeSummary, error)
|
||||
|
@ -797,42 +832,39 @@ type actionMeta struct {
|
|||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
return res.ObjectSize
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
|
||||
size, err := meta.oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if i.heal {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
}
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
}
|
||||
size = res.ObjectSize
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
versionID := meta.oi.VersionID
|
||||
|
@ -866,7 +898,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
|||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
|
||||
}
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
|
@ -878,19 +910,18 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
|||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
case ObjectNotFound, VersionNotFound:
|
||||
// object not found or version not found return 0
|
||||
return 0
|
||||
return false, 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return size
|
||||
return false, size
|
||||
}
|
||||
}
|
||||
|
||||
var applied bool
|
||||
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
|
||||
if action != lifecycle.NoneAction {
|
||||
applied = applyLifecycleAction(ctx, action, o, obj)
|
||||
|
@ -899,9 +930,30 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
|||
if applied {
|
||||
switch action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
default: // for all lifecycle actions that remove data
|
||||
return 0
|
||||
return true, size
|
||||
}
|
||||
// For all other lifecycle actions that remove data
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, size
|
||||
}
|
||||
|
||||
// applyActions will apply lifecycle checks on to a scanned item.
|
||||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta, sizeS *sizeSummary) int64 {
|
||||
applied, size := i.applyLifecycle(ctx, o, meta)
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied {
|
||||
if i.heal {
|
||||
size = i.applyHealing(ctx, o, meta)
|
||||
}
|
||||
// replicate only if lifecycle rules are not applied.
|
||||
i.healReplication(ctx, o, meta.oi.Clone(), sizeS)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
@ -1020,6 +1072,9 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
return false
|
||||
}
|
||||
|
||||
// Send audit for the lifecycle delete operation
|
||||
auditLogLifecycle(ctx, obj.Bucket, obj.Name)
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if obj.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
|
@ -1075,11 +1130,13 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
|
|||
}
|
||||
switch oi.ReplicationStatus {
|
||||
case replication.Pending:
|
||||
sizeS.pendingCount++
|
||||
sizeS.pendingSize += oi.Size
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
globalReplicationPool.queueReplicaTask(ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
|
||||
case replication.Failed:
|
||||
sizeS.failedSize += oi.Size
|
||||
globalReplicationPool.queueReplicaTask(oi)
|
||||
sizeS.failedCount++
|
||||
globalReplicationPool.queueReplicaTask(ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
|
||||
case replication.Completed, "COMPLETE":
|
||||
sizeS.replicatedSize += oi.Size
|
||||
case replication.Replica:
|
||||
|
@ -1235,3 +1292,13 @@ func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
|
|||
d.cycle = make(chan struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func auditLogLifecycle(ctx context.Context, bucket, object string) {
|
||||
entry := audit.NewEntry(globalDeploymentID)
|
||||
entry.Trigger = "internal-scanner"
|
||||
entry.API.Name = "DeleteObject"
|
||||
entry.API.Bucket = bucket
|
||||
entry.API.Object = object
|
||||
ctx = logger.SetAuditEntry(ctx, &entry)
|
||||
logger.AuditLog(ctx, nil, nil, nil)
|
||||
}
|
||||
|
|
|
@ -31,11 +31,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/willf/bloom"
|
||||
)
|
||||
|
||||
|
@ -80,7 +78,7 @@ func newDataUpdateTracker() *dataUpdateTracker {
|
|||
Current: dataUpdateFilter{
|
||||
idx: 1,
|
||||
},
|
||||
debug: env.Get(envDataUsageScannerDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
|
||||
debug: serverDebugLog,
|
||||
input: make(chan string, dataUpdateTrackerQueueSize),
|
||||
save: make(chan struct{}, 1),
|
||||
saveExited: make(chan struct{}),
|
||||
|
@ -640,7 +638,7 @@ func (d *dataUpdateTracker) cycleFilter(ctx context.Context, req bloomFilterRequ
|
|||
|
||||
// splitPathDeterministic will split the provided relative path
|
||||
// deterministically and return up to the first 3 elements of the path.
|
||||
// Slash and dot prefixes are removed.
|
||||
// slash and dot prefixes are removed.
|
||||
// Trailing slashes are removed.
|
||||
// Returns 0 length if no parts are found after trimming.
|
||||
func splitPathDeterministic(in string) []string {
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
|
@ -45,15 +46,26 @@ type sizeHistogram [dataUsageBucketLen]uint64
|
|||
|
||||
//msgp:tuple dataUsageEntry
|
||||
type dataUsageEntry struct {
|
||||
Children dataUsageHashMap
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
ReplicatedSize uint64
|
||||
ReplicationPendingSize uint64
|
||||
ReplicationFailedSize uint64
|
||||
ReplicaSize uint64
|
||||
Objects uint64
|
||||
ObjSizes sizeHistogram
|
||||
Children dataUsageHashMap
|
||||
Size int64
|
||||
Objects uint64
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats replicationStats
|
||||
}
|
||||
|
||||
//msgp:tuple replicationStats
|
||||
type replicationStats struct {
|
||||
PendingSize uint64
|
||||
ReplicatedSize uint64
|
||||
FailedSize uint64
|
||||
ReplicaSize uint64
|
||||
FailedCount uint64
|
||||
PendingCount uint64
|
||||
MissedThresholdSize uint64
|
||||
AfterThresholdSize uint64
|
||||
MissedThresholdCount uint64
|
||||
AfterThresholdCount uint64
|
||||
}
|
||||
|
||||
//msgp:tuple dataUsageEntryV2
|
||||
|
@ -65,20 +77,40 @@ type dataUsageEntryV2 struct {
|
|||
Children dataUsageHashMap
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries latest version 3.
|
||||
type dataUsageCache struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntry
|
||||
//msgp:tuple dataUsageEntryV3
|
||||
type dataUsageEntryV3 struct {
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
ReplicatedSize uint64
|
||||
ReplicationPendingSize uint64
|
||||
ReplicationFailedSize uint64
|
||||
ReplicaSize uint64
|
||||
Objects uint64
|
||||
ObjSizes sizeHistogram
|
||||
Children dataUsageHashMap
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 2.
|
||||
// dataUsageCache contains a cache of data usage entries latest version 4.
|
||||
type dataUsageCache struct {
|
||||
Info dataUsageCacheInfo
|
||||
Cache map[string]dataUsageEntry
|
||||
Disks []string
|
||||
}
|
||||
|
||||
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
||||
type dataUsageCacheV2 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV2
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 3.
|
||||
type dataUsageCacheV3 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV3
|
||||
}
|
||||
|
||||
//msgp:ignore dataUsageEntryInfo
|
||||
type dataUsageEntryInfo struct {
|
||||
Name string
|
||||
|
@ -89,8 +121,8 @@ type dataUsageEntryInfo struct {
|
|||
type dataUsageCacheInfo struct {
|
||||
// Name of the bucket. Also root element.
|
||||
Name string
|
||||
LastUpdate time.Time
|
||||
NextCycle uint32
|
||||
LastUpdate time.Time
|
||||
// indicates if the disk is being healed and scanner
|
||||
// should skip healing the disk
|
||||
SkipHealing bool
|
||||
|
@ -100,20 +132,25 @@ type dataUsageCacheInfo struct {
|
|||
|
||||
func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
||||
e.Size += summary.totalSize
|
||||
e.ReplicatedSize += uint64(summary.replicatedSize)
|
||||
e.ReplicationFailedSize += uint64(summary.failedSize)
|
||||
e.ReplicationPendingSize += uint64(summary.pendingSize)
|
||||
e.ReplicaSize += uint64(summary.replicaSize)
|
||||
e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize)
|
||||
e.ReplicationStats.FailedSize += uint64(summary.failedSize)
|
||||
e.ReplicationStats.PendingSize += uint64(summary.pendingSize)
|
||||
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
||||
e.ReplicationStats.PendingCount += uint64(summary.pendingCount)
|
||||
e.ReplicationStats.FailedCount += uint64(summary.failedCount)
|
||||
|
||||
}
|
||||
|
||||
// merge other data usage entry into this, excluding children.
|
||||
func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
e.Objects += other.Objects
|
||||
e.Size += other.Size
|
||||
e.ReplicationPendingSize += other.ReplicationPendingSize
|
||||
e.ReplicationFailedSize += other.ReplicationFailedSize
|
||||
e.ReplicatedSize += other.ReplicatedSize
|
||||
e.ReplicaSize += other.ReplicaSize
|
||||
e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize
|
||||
e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize
|
||||
e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize
|
||||
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
|
||||
e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount
|
||||
e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount
|
||||
|
||||
for i, v := range other.ObjSizes[:] {
|
||||
e.ObjSizes[i] += v
|
||||
|
@ -238,25 +275,27 @@ func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// dui converts the flattened version of the path to DataUsageInfo.
|
||||
// dui converts the flattened version of the path to madmin.DataUsageInfo.
|
||||
// As a side effect d will be flattened, use a clone if this is not ok.
|
||||
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
|
||||
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) madmin.DataUsageInfo {
|
||||
e := d.find(path)
|
||||
if e == nil {
|
||||
// No entry found, return empty.
|
||||
return DataUsageInfo{}
|
||||
return madmin.DataUsageInfo{}
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
return DataUsageInfo{
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
ReplicatedSize: flat.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationFailedSize,
|
||||
ReplicationPendingSize: flat.ReplicationPendingSize,
|
||||
ReplicaSize: flat.ReplicaSize,
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
return madmin.DataUsageInfo{
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -373,22 +412,24 @@ func (h *sizeHistogram) toMap() map[string]uint64 {
|
|||
|
||||
// bucketsUsageInfo returns the buckets usage info as a map, with
|
||||
// key as bucket name
|
||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
||||
var dst = make(map[string]BucketUsageInfo, len(buckets))
|
||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmin.BucketUsageInfo {
|
||||
var dst = make(map[string]madmin.BucketUsageInfo, len(buckets))
|
||||
for _, bucket := range buckets {
|
||||
e := d.find(bucket.Name)
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
dst[bucket.Name] = BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationPendingSize,
|
||||
ReplicatedSize: flat.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationFailedSize,
|
||||
ReplicaSize: flat.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
dst[bucket.Name] = madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
return dst
|
||||
|
@ -396,20 +437,22 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
|||
|
||||
// bucketUsageInfo returns the buckets usage info.
|
||||
// If not found all values returned are zero values.
|
||||
func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
|
||||
func (d *dataUsageCache) bucketUsageInfo(bucket string) madmin.BucketUsageInfo {
|
||||
e := d.find(bucket)
|
||||
if e == nil {
|
||||
return BucketUsageInfo{}
|
||||
return madmin.BucketUsageInfo{}
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
return BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationPendingSize,
|
||||
ReplicatedSize: flat.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationFailedSize,
|
||||
ReplicaSize: flat.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
return madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -485,7 +528,7 @@ type objectIO interface {
|
|||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, noLock, ObjectOptions{})
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound:
|
||||
|
@ -522,7 +565,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
|||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(r),
|
||||
ObjectOptions{NoLock: true})
|
||||
ObjectOptions{})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
@ -533,6 +576,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
|||
// Bumping the cache version will drop data from previous versions
|
||||
// and write new data with the new version.
|
||||
const (
|
||||
dataUsageCacheVerV4 = 4
|
||||
dataUsageCacheVerV3 = 3
|
||||
dataUsageCacheVerV2 = 2
|
||||
dataUsageCacheVerV1 = 1
|
||||
|
@ -541,7 +585,7 @@ const (
|
|||
// serialize the contents of the cache.
|
||||
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
|
||||
// Add version and compress.
|
||||
_, err := dst.Write([]byte{dataUsageCacheVerV3})
|
||||
_, err := dst.Write([]byte{dataUsageCacheVerV4})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -609,6 +653,35 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
|||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
dold := &dataUsageCacheV3{}
|
||||
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Info = dold.Info
|
||||
d.Disks = dold.Disks
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
d.Cache[k] = dataUsageEntry{
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
ObjSizes: v.ObjSizes,
|
||||
Children: v.Children,
|
||||
ReplicationStats: replicationStats{
|
||||
ReplicatedSize: v.ReplicatedSize,
|
||||
ReplicaSize: v.ReplicaSize,
|
||||
FailedSize: v.ReplicationFailedSize,
|
||||
PendingSize: v.ReplicationPendingSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerV4:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
|
||||
return d.DecodeMsg(msgp.NewReader(dec))
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -348,6 +348,119 @@ func BenchmarkDecodedataUsageCacheV2(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageCacheV3(t *testing.T) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageCacheV3(t *testing.T) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageCacheV3 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageCacheV3{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntry(t *testing.T) {
|
||||
v := dataUsageEntry{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
@ -574,6 +687,232 @@ func BenchmarkDecodedataUsageEntryV2(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntryV3(t *testing.T) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageEntryV3(t *testing.T) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageEntryV3 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageEntryV3{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
|
||||
v := replicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgreplicationStats(b *testing.B) {
|
||||
v := replicationStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgreplicationStats(b *testing.B) {
|
||||
v := replicationStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalreplicationStats(b *testing.B) {
|
||||
v := replicationStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodereplicationStats(t *testing.T) {
|
||||
v := replicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodereplicationStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := replicationStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodereplicationStats(b *testing.B) {
|
||||
v := replicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodereplicationStats(b *testing.B) {
|
||||
v := replicationStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalsizeHistogram(t *testing.T) {
|
||||
v := sizeHistogram{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
|
|
@ -25,11 +25,10 @@ import (
|
|||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
envDataUsageScannerDebug = "MINIO_DISK_USAGE_SCANNER_DEBUG"
|
||||
|
||||
dataUsageRoot = SlashSeparator
|
||||
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
|
||||
|
||||
|
@ -39,7 +38,7 @@ const (
|
|||
)
|
||||
|
||||
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan madmin.DataUsageInfo) {
|
||||
for dataUsageInfo := range dui {
|
||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||
if err != nil {
|
||||
|
@ -59,27 +58,27 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
|||
}
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (madmin.DataUsageInfo, error) {
|
||||
r, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageObjName, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
|
||||
return DataUsageInfo{}, nil
|
||||
return madmin.DataUsageInfo{}, nil
|
||||
}
|
||||
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
|
||||
return madmin.DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
var dataUsageInfo madmin.DataUsageInfo
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
||||
return DataUsageInfo{}, err
|
||||
return madmin.DataUsageInfo{}, err
|
||||
}
|
||||
|
||||
// For forward compatibility reasons, we need to add this code.
|
||||
if len(dataUsageInfo.BucketsUsage) == 0 {
|
||||
dataUsageInfo.BucketsUsage = make(map[string]BucketUsageInfo, len(dataUsageInfo.BucketSizes))
|
||||
dataUsageInfo.BucketsUsage = make(map[string]madmin.BucketUsageInfo, len(dataUsageInfo.BucketSizes))
|
||||
for bucket, size := range dataUsageInfo.BucketSizes {
|
||||
dataUsageInfo.BucketsUsage[bucket] = BucketUsageInfo{Size: size}
|
||||
dataUsageInfo.BucketsUsage[bucket] = madmin.BucketUsageInfo{Size: size}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -435,13 +435,13 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
|||
// statCachedMeta returns metadata from cache - including ranges cached, partial to indicate
|
||||
// if partial object is cached.
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if ctx, err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
lkctx, err := cLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer cLock.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.RUnlock(lkctx.Cancel)
|
||||
return c.statCache(ctx, cacheObjPath)
|
||||
}
|
||||
|
||||
|
@ -515,14 +515,14 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
|||
// saves object metadata to disk cache
|
||||
// incHitsOnly is true if metadata update is incrementing only the hit counter
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
var err error
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachedPath)
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
return c.saveMetadata(ctx, bucket, object, meta, actualSize, rs, rsFileName, incHitsOnly)
|
||||
}
|
||||
|
||||
|
@ -696,11 +696,12 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
|||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
|
||||
meta, _, numHits, err := c.statCache(ctx, cachePath)
|
||||
// Case where object not yet cached
|
||||
|
@ -911,12 +912,13 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
|||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
ctx, err = cLock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := cLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.RUnlock(lkctx.Cancel)
|
||||
|
||||
defer cLock.RUnlock()
|
||||
var objInfo ObjectInfo
|
||||
var rngInfo RangeInfo
|
||||
if objInfo, rngInfo, numHits, err = c.statRange(ctx, bucket, object, rs); err != nil {
|
||||
|
@ -976,11 +978,11 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
_, err = cLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
return removeAll(cacheObjPath)
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ type parallelReader struct {
|
|||
readers []io.ReaderAt
|
||||
orgReaders []io.ReaderAt
|
||||
dataBlocks int
|
||||
errs []error
|
||||
offset int64
|
||||
shardSize int64
|
||||
shardFileSize int64
|
||||
|
@ -48,7 +47,6 @@ func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int
|
|||
return ¶llelReader{
|
||||
readers: readers,
|
||||
orgReaders: readers,
|
||||
errs: make([]error, len(readers)),
|
||||
dataBlocks: e.dataBlocks,
|
||||
offset: (offset / e.blockSize) * e.ShardSize(),
|
||||
shardSize: e.ShardSize(),
|
||||
|
@ -172,7 +170,6 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
|
|||
// This will be communicated upstream.
|
||||
p.orgReaders[bufIdx] = nil
|
||||
p.readers[i] = nil
|
||||
p.errs[i] = err
|
||||
|
||||
// Since ReadAt returned error, trigger another read.
|
||||
readTriggerCh <- true
|
||||
|
@ -197,7 +194,8 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
|
|||
return newBuf, nil
|
||||
}
|
||||
|
||||
return nil, reduceReadQuorumErrs(context.Background(), p.errs, objectOpIgnoredErrs, p.dataBlocks)
|
||||
// If we cannot decode, just return read quorum error.
|
||||
return nil, errErasureReadQuorum
|
||||
}
|
||||
|
||||
// Decode reads from readers, reconstructs data if needed and writes the data to the writer.
|
||||
|
|
|
@ -162,7 +162,6 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) {
|
||||
availableDisks := make([]StorageAPI, len(onlineDisks))
|
||||
dataErrs := make([]error, len(onlineDisks))
|
||||
|
||||
inconsistent := 0
|
||||
for i, meta := range partsMetadata {
|
||||
if !meta.IsValid() {
|
||||
|
@ -234,6 +233,9 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
if dataErrs[i] == nil {
|
||||
// All parts verified, mark it as all data available.
|
||||
availableDisks[i] = onlineDisk
|
||||
} else {
|
||||
// upon errors just make that disk's fileinfo invalid
|
||||
partsMetadata[i] = FileInfo{}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -251,18 +251,36 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
|
||||
if !opts.NoLock {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
// List of disks having latest version of the object er.meta
|
||||
// (by modtime).
|
||||
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
|
||||
// List of disks having all parts as per latest er.meta.
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
|
||||
// make sure all parts metadata dataDir is same as returned by listOnlineDisks()
|
||||
// the reason is its possible that some of the disks might have stale data, for those
|
||||
// we simply override them with maximally occurring 'dataDir' - this ensures that
|
||||
// disksWithAllParts() verifies same dataDir across all drives.
|
||||
for i := range partsMetadata {
|
||||
partsMetadata[i].DataDir = lfi.DataDir
|
||||
}
|
||||
|
||||
// List of disks having all parts as per latest metadata.
|
||||
// NOTE: do not pass in latestDisks to diskWithAllParts since
|
||||
// the diskWithAllParts needs to reach the drive to ensure
|
||||
// validity of the metadata content, we should make sure that
|
||||
// we pass in disks as is for it to be verified. Once verified
|
||||
// the disksWithAllParts() returns the actual disks that can be
|
||||
// used here for reconstruction. This is done to ensure that
|
||||
// we do not skip drives that have inconsistent metadata to be
|
||||
// skipped from purging when they are stale.
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, storageDisks, partsMetadata, errs, bucket, object, scanMode)
|
||||
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
|
@ -379,12 +397,12 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
dataDir = migrateDataDir
|
||||
}
|
||||
|
||||
if !latestMeta.Deleted || latestMeta.TransitionStatus != lifecycle.TransitionComplete {
|
||||
if !latestMeta.Deleted && latestMeta.TransitionStatus != lifecycle.TransitionComplete {
|
||||
result.DataBlocks = latestMeta.Erasure.DataBlocks
|
||||
result.ParityBlocks = latestMeta.Erasure.ParityBlocks
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks = shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
|
@ -768,6 +786,7 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
|
|||
corruptedErasureMeta++
|
||||
}
|
||||
}
|
||||
|
||||
var notFoundParts int
|
||||
for i := range dataErrs {
|
||||
// Only count part errors, if the error is not
|
||||
|
|
|
@ -135,7 +135,8 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
|
|||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s returned an error (%w)", disks[index], err),
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
|
||||
disks[index], bucket, object, err),
|
||||
disks[index].String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -322,8 +322,12 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix
|
|||
return errDiskNotFound
|
||||
}
|
||||
// Pick one FileInfo for a disk at index.
|
||||
files[index].Erasure.Index = index + 1
|
||||
return disks[index].WriteMetadata(ctx, bucket, prefix, files[index])
|
||||
fi := files[index]
|
||||
fi.Erasure.Index = index + 1
|
||||
if fi.IsValid() {
|
||||
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
|
||||
}
|
||||
return errCorruptedFormat
|
||||
}, index)
|
||||
}
|
||||
|
||||
|
@ -344,12 +348,16 @@ func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []
|
|||
return 0, 0, err
|
||||
}
|
||||
|
||||
dataBlocks := latestFileInfo.Erasure.DataBlocks
|
||||
if !latestFileInfo.IsValid() {
|
||||
return 0, 0, errErasureReadQuorum
|
||||
}
|
||||
|
||||
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
|
||||
if parityBlocks <= 0 {
|
||||
parityBlocks = defaultParityCount
|
||||
}
|
||||
|
||||
dataBlocks := len(partsMetaData) - parityBlocks
|
||||
writeQuorum := dataBlocks
|
||||
if dataBlocks == parityBlocks {
|
||||
writeQuorum++
|
||||
|
|
|
@ -384,21 +384,22 @@ func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObjec
|
|||
// Implements S3 compatible Upload Part API.
|
||||
func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
rlkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
rctx := rlkctx.Context()
|
||||
readLocked := true
|
||||
defer func() {
|
||||
if readLocked {
|
||||
uploadIDLock.RUnlock()
|
||||
uploadIDLock.RUnlock(rlkctx.Cancel)
|
||||
}
|
||||
}()
|
||||
|
||||
data := r.Reader
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
||||
logger.LogIf(rctx, errInvalidArgument, logger.Application)
|
||||
return pi, toObjectErr(errInvalidArgument)
|
||||
}
|
||||
|
||||
|
@ -407,23 +408,23 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
if err = er.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
|
||||
return pi, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs = readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket,
|
||||
partsMetadata, errs = readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket,
|
||||
uploadIDPath, "", false)
|
||||
|
||||
// get Quorum for this object
|
||||
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
|
||||
_, writeQuorum, err := objectQuorumFromMeta(rctx, partsMetadata, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
reducedErr := reduceWriteQuorumErrs(rctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
if reducedErr == errErasureWriteQuorum {
|
||||
return pi, toObjectErr(reducedErr, bucket, object)
|
||||
}
|
||||
|
@ -432,7 +433,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
|
||||
// Pick one from the first valid metadata.
|
||||
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
||||
fi, err := pickValidFileInfo(rctx, partsMetadata, modTime, writeQuorum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
@ -454,7 +455,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
}
|
||||
}()
|
||||
|
||||
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||
erasure, err := NewErasure(rctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
@ -491,9 +492,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
|
||||
}
|
||||
|
||||
n, err := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
n, err := erasure.Encode(rctx, data, writers, buffer, writeQuorum)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -511,29 +513,31 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
|
||||
// Unlock here before acquiring write locks all concurrent
|
||||
// PutObjectParts would serialize here updating `xl.meta`
|
||||
uploadIDLock.RUnlock()
|
||||
uploadIDLock.RUnlock(rlkctx.Cancel)
|
||||
readLocked = false
|
||||
ctx, err = uploadIDLock.GetLock(ctx, globalOperationTimeout)
|
||||
wlkctx, err := uploadIDLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.Unlock()
|
||||
wctx := wlkctx.Context()
|
||||
defer uploadIDLock.Unlock(wlkctx.Cancel)
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
if err = er.checkUploadIDExists(wctx, bucket, object, uploadID); err != nil {
|
||||
return pi, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// Rename temporary part file to its final location.
|
||||
partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix)
|
||||
onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil)
|
||||
onlineDisks, err = rename(wctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
||||
}
|
||||
|
||||
// Read metadata again because it might be updated with parallel upload of another part.
|
||||
partsMetadata, errs = readAllFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
|
||||
reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
partsMetadata, errs = readAllFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
|
||||
reducedErr = reduceWriteQuorumErrs(wctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
if reducedErr == errErasureWriteQuorum {
|
||||
return pi, toObjectErr(reducedErr, bucket, object)
|
||||
}
|
||||
|
@ -542,9 +546,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
|
||||
|
||||
// Pick one from the first valid metadata.
|
||||
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
||||
fi, err = pickValidFileInfo(wctx, partsMetadata, modTime, writeQuorum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Once part is successfully committed, proceed with updating erasure metadata.
|
||||
|
@ -570,7 +575,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
}
|
||||
|
||||
// Writes update `xl.meta` format for each disk.
|
||||
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
||||
if _, err = writeUniqueFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
|
@ -597,13 +603,13 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
|||
UploadID: uploadID,
|
||||
}
|
||||
|
||||
var err error
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return MultipartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer uploadIDLock.RUnlock(lkctx.Cancel)
|
||||
|
||||
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
return result, toObjectErr(err, bucket, object, uploadID)
|
||||
|
@ -648,11 +654,12 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
|||
// replied back to the client.
|
||||
func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer uploadIDLock.RUnlock(lkctx.Cancel)
|
||||
|
||||
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
return result, toObjectErr(err, bucket, object, uploadID)
|
||||
|
@ -742,19 +749,20 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
// Hold read-locks to verify uploaded parts, also disallows
|
||||
// parallel part uploads as well.
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
ctx, err = uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
rlkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
rctx := rlkctx.Context()
|
||||
defer uploadIDLock.RUnlock(rlkctx.Cancel)
|
||||
|
||||
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
if err = er.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
|
||||
return oi, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
if opts.ParentIsObject != nil && opts.ParentIsObject(ctx, bucket, path.Dir(object)) {
|
||||
if opts.ParentIsObject != nil && opts.ParentIsObject(rctx, bucket, path.Dir(object)) {
|
||||
return oi, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -768,15 +776,15 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
storageDisks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
|
||||
partsMetadata, errs := readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
|
||||
|
||||
// get Quorum for this object
|
||||
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
|
||||
_, writeQuorum, err := objectQuorumFromMeta(rctx, partsMetadata, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
reducedErr := reduceWriteQuorumErrs(rctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
if reducedErr == errErasureWriteQuorum {
|
||||
return oi, toObjectErr(reducedErr, bucket, object)
|
||||
}
|
||||
|
@ -878,8 +886,18 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
}
|
||||
}
|
||||
|
||||
// Hold namespace to complete the transaction
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
// Write final `xl.meta` at uploadID location
|
||||
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
||||
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
|
@ -896,14 +914,6 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
}
|
||||
}
|
||||
|
||||
// Hold namespace to complete the transaction
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
// Rename the multipart object to final location.
|
||||
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
|
||||
fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
|
||||
|
@ -941,11 +951,12 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
// operation.
|
||||
func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) {
|
||||
lk := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
|
|
|
@ -63,15 +63,16 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
|||
defer ObjectPathUpdated(pathJoin(dstBucket, dstObject))
|
||||
|
||||
lk := er.NewNSLock(dstBucket, dstObject)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
storageDisks := er.getDisks()
|
||||
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, false)
|
||||
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
|
||||
|
||||
// get Quorum for this object
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
|
@ -164,17 +165,19 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||
lock := er.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
|
||||
case readLock:
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
|
||||
}
|
||||
unlockOnDefer = true
|
||||
}
|
||||
|
@ -230,11 +233,12 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.RUnlock(lkctx.Cancel)
|
||||
|
||||
// Start offset cannot be negative.
|
||||
if startOffset < 0 {
|
||||
|
@ -292,6 +296,11 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||
}
|
||||
var healOnce sync.Once
|
||||
|
||||
// once we have obtained a common FileInfo i.e latest, we should stick
|
||||
// to single dataDir to read the content to avoid reading from some other
|
||||
// dataDir that has stale FileInfo{} to ensure that we fail appropriately
|
||||
// during reads and expect the same dataDir everywhere.
|
||||
dataDir := fi.DataDir
|
||||
for ; partIndex <= lastPartIndex; partIndex++ {
|
||||
if length == totalBytesRead {
|
||||
break
|
||||
|
@ -320,9 +329,8 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||
continue
|
||||
}
|
||||
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber)
|
||||
partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber))
|
||||
data := metaArr[index].Data
|
||||
readers[index] = newBitrotReader(disk, data, bucket, partPath, tillOffset,
|
||||
partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber))
|
||||
readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset,
|
||||
checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
|
||||
|
||||
// Prefer local disks
|
||||
|
@ -343,10 +351,12 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||
var scan madmin.HealScanMode
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
scan = madmin.HealNormalScan
|
||||
logger.Info("Healing required, attempting to heal missing shards for %s", pathJoin(bucket, object, fi.VersionID))
|
||||
} else if errors.Is(err, errFileCorrupt) {
|
||||
scan = madmin.HealDeepScan
|
||||
logger.Info("Healing required, attempting to heal bitrot for %s", pathJoin(bucket, object, fi.VersionID))
|
||||
}
|
||||
if scan != madmin.HealUnknownScan {
|
||||
if scan == madmin.HealNormalScan || scan == madmin.HealDeepScan {
|
||||
healOnce.Do(func() {
|
||||
if _, healing := er.getOnlineDisksWithHealing(); !healing {
|
||||
go healObject(bucket, object, fi.VersionID, scan)
|
||||
|
@ -395,11 +405,12 @@ func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object strin
|
|||
if !opts.NoLock {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.RUnlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
return er.getObjectInfo(ctx, bucket, object, opts)
|
||||
|
@ -447,6 +458,12 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||
return fi, nil, nil, err
|
||||
}
|
||||
|
||||
// if one of the disk is offline, return right here no need
|
||||
// to attempt a heal on the object.
|
||||
if countErrs(errs, errDiskNotFound) > 0 {
|
||||
return fi, metaArr, onlineDisks, nil
|
||||
}
|
||||
|
||||
var missingBlocks int
|
||||
for i, err := range errs {
|
||||
if err != nil && errors.Is(err, errFileNotFound) {
|
||||
|
@ -527,7 +544,6 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str
|
|||
// Similar to rename but renames data from srcEntry to dstEntry at dataDir
|
||||
func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dataDir, dstBucket, dstEntry string, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
|
||||
dataDir = retainSlash(dataDir)
|
||||
defer ObjectPathUpdated(pathJoin(srcBucket, srcEntry))
|
||||
defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry))
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
|
@ -581,7 +597,6 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
|
|||
dstEntry = retainSlash(dstEntry)
|
||||
srcEntry = retainSlash(srcEntry)
|
||||
}
|
||||
defer ObjectPathUpdated(pathJoin(srcBucket, srcEntry))
|
||||
defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry))
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
|
@ -625,14 +640,8 @@ func (er erasureObjects) PutObject(ctx context.Context, bucket string, object st
|
|||
|
||||
// putObject wrapper for erasureObjects PutObject
|
||||
func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
defer func() {
|
||||
ObjectPathUpdated(pathJoin(bucket, object))
|
||||
}()
|
||||
|
||||
data := r.Reader
|
||||
|
||||
uniqueID := mustGetUUID()
|
||||
tempObj := uniqueID
|
||||
// No metadata is set, allocate a new one.
|
||||
if opts.UserDefined == nil {
|
||||
opts.UserDefined = make(map[string]string)
|
||||
|
@ -681,7 +690,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
fi.VersionID = mustGetUUID()
|
||||
}
|
||||
}
|
||||
|
||||
fi.DataDir = mustGetUUID()
|
||||
uniqueID := mustGetUUID()
|
||||
tempObj := uniqueID
|
||||
|
||||
// Initialize erasure metadata.
|
||||
for index := range partsMetadata {
|
||||
|
@ -756,13 +768,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
}
|
||||
|
||||
if !opts.NoLock {
|
||||
var err error
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
for i, w := range writers {
|
||||
|
@ -801,11 +813,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
|
||||
// Write unique `xl.meta` for each disk.
|
||||
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Rename the successfully written temporary object to final location.
|
||||
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -1069,11 +1083,12 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
|||
}
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
writeQuorum := len(storageDisks)/2 + 1
|
||||
|
@ -1179,19 +1194,19 @@ func (er erasureObjects) addPartial(bucket, object, versionID string) {
|
|||
|
||||
// PutObjectTags - replace or add tags to an existing object
|
||||
func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
var err error
|
||||
// Lock the object before updating tags.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
disks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, true)
|
||||
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
|
@ -1255,7 +1270,7 @@ func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object st
|
|||
disks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, true)
|
||||
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
|
|
|
@ -318,8 +318,8 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||
}
|
||||
|
||||
err = xl.GetObject(ctx, bucket, object, 0, int64(len(buf)), ioutil.Discard, "", opts)
|
||||
if err != toObjectErr(errFileNotFound, bucket, object) {
|
||||
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
|
||||
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
||||
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
||||
}
|
||||
|
||||
// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
|
||||
|
|
|
@ -433,7 +433,7 @@ func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []er
|
|||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
|
@ -448,7 +448,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
|
|||
}
|
||||
|
||||
if len(allBuckets) == 0 {
|
||||
updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state
|
||||
updates <- madmin.DataUsageInfo{} // no buckets found update data usage to reflect latest state
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -614,17 +614,19 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
|||
lock := z.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
|
||||
case readLock:
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
|
||||
}
|
||||
unlockOnDefer = true
|
||||
}
|
||||
|
@ -683,11 +685,12 @@ func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object s
|
|||
|
||||
// Lock the object before reading.
|
||||
lk := z.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.RUnlock(lkctx.Cancel)
|
||||
|
||||
errs := make([]error, len(z.serverPools))
|
||||
objInfos := make([]ObjectInfo, len(z.serverPools))
|
||||
|
@ -799,17 +802,17 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
|||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
// Acquire a bulk write lock across 'objects'
|
||||
multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...)
|
||||
ctx, err = multiDeleteLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := multiDeleteLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
for i := range derrs {
|
||||
derrs[i] = err
|
||||
}
|
||||
return dobjects, derrs
|
||||
}
|
||||
defer multiDeleteLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer multiDeleteLock.Unlock(lkctx.Cancel)
|
||||
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
|
||||
|
@ -1364,14 +1367,14 @@ func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketI
|
|||
}
|
||||
|
||||
func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
var err error
|
||||
// Acquire lock on format.json
|
||||
formatLock := z.NewNSLock(minioMetaBucket, formatConfigFile)
|
||||
ctx, err = formatLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := formatLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer formatLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer formatLock.Unlock(lkctx.Cancel)
|
||||
|
||||
var r = madmin.HealResultItem{
|
||||
Type: madmin.HealItemMetadata,
|
||||
|
|
|
@ -250,8 +250,8 @@ func (s *erasureSets) connectDisks() {
|
|||
}
|
||||
disk.SetDiskLoc(s.poolIndex, setIndex, diskIndex)
|
||||
s.endpointStrings[setIndex*s.setDriveCount+diskIndex] = disk.String()
|
||||
s.erasureDisksMu.Unlock()
|
||||
setsJustConnected[setIndex] = true
|
||||
s.erasureDisksMu.Unlock()
|
||||
}(endpoint)
|
||||
}
|
||||
|
||||
|
@ -612,7 +612,7 @@ func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
|||
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
errs := make([]error, 0, len(s.sets)*s.setDriveCount)
|
||||
for i := range s.sets {
|
||||
errs = append(errs, storageInfoErrs[i]...)
|
||||
}
|
||||
|
|
|
@ -435,6 +435,11 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
|||
}
|
||||
}()
|
||||
|
||||
// Shuffle disks to ensure a total randomness of bucket/disk association to ensure
|
||||
// that objects that are not present in all disks are accounted and ILM applied.
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
r.Shuffle(len(disks), func(i, j int) { disks[i], disks[j] = disks[j], disks[i] })
|
||||
|
||||
// Start one scanner per disk
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(disks))
|
||||
|
|
|
@ -663,7 +663,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
|||
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
|
||||
// Compute the local disks eligible for meta volumes (re)initialization
|
||||
var disksToInit []StorageAPI
|
||||
disksToInit := make([]StorageAPI, 0, len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
if formats[index] == nil || storageDisks[index] == nil || !storageDisks[index].IsLocal() {
|
||||
// Ignore create meta volume on disks which are not found or not local.
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
// Code generated by "stringer -type=format -trimprefix=format untar.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[formatUnknown-0]
|
||||
_ = x[formatGzip-1]
|
||||
_ = x[formatZstd-2]
|
||||
_ = x[formatLZ4-3]
|
||||
_ = x[formatS2-4]
|
||||
_ = x[formatBZ2-5]
|
||||
}
|
||||
|
||||
const _format_name = "UnknownGzipZstdLZ4S2BZ2"
|
||||
|
||||
var _format_index = [...]uint8{0, 7, 11, 15, 18, 20, 23}
|
||||
|
||||
func (i format) String() string {
|
||||
if i < 0 || i >= format(len(_format_index)-1) {
|
||||
return "format(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _format_name[_format_index[i]:_format_index[i+1]]
|
||||
}
|
|
@ -709,11 +709,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||
|
||||
// Hold write lock on the object.
|
||||
destLock := fs.NewNSLock(bucket, object)
|
||||
ctx, err = destLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := destLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer destLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer destLock.Unlock(lkctx.Cancel)
|
||||
|
||||
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
||||
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
|
||||
|
|
39
cmd/fs-v1.go
39
cmd/fs-v1.go
|
@ -237,7 +237,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
|||
}
|
||||
|
||||
// NSScanner returns data usage stats of the current FS deployment
|
||||
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
// Load bucket totals
|
||||
var totalCache dataUsageCache
|
||||
err := totalCache.load(ctx, fs, dataUsageCacheName)
|
||||
|
@ -359,7 +359,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
|||
}
|
||||
|
||||
oi := fsMeta.ToObjectInfo(bucket, object, fi)
|
||||
sz := item.applyActions(ctx, fs, actionMeta{oi: oi})
|
||||
sz := item.applyActions(ctx, fs, actionMeta{oi: oi}, &sizeSummary{})
|
||||
if sz >= 0 {
|
||||
return sizeSummary{totalSize: sz}, nil
|
||||
}
|
||||
|
@ -609,11 +609,12 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
|||
|
||||
if !cpSrcDstSame {
|
||||
objectDWLock := fs.NewNSLock(dstBucket, dstObject)
|
||||
ctx, err = objectDWLock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := objectDWLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer objectDWLock.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer objectDWLock.Unlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
|
@ -703,17 +704,19 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||
lock := fs.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
ctx, err = lock.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
|
||||
case readLock:
|
||||
ctx, err = lock.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
ctx = lkctx.Context()
|
||||
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -982,11 +985,12 @@ func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (
|
|||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetRLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.RUnlock(lkctx.Cancel)
|
||||
|
||||
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||
return oi, err
|
||||
|
@ -1021,19 +1025,20 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
|||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
|
||||
err = fs.createFsJSON(object, fsMetaPath)
|
||||
lk.Unlock()
|
||||
lk.Unlock(lkctx.Cancel)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
oi, err = fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
@ -1073,12 +1078,13 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
|||
|
||||
// Lock the object.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
defer ObjectPathUpdated(path.Join(bucket, object))
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
|
@ -1249,11 +1255,12 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, op
|
|||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
|
||||
return objInfo, err
|
||||
|
|
|
@ -47,7 +47,7 @@ func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo,
|
|||
}
|
||||
|
||||
// NSScanner - scanner is not implemented for gateway
|
||||
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
|
@ -63,6 +64,7 @@ func setRequestHeaderSizeLimitHandler(h http.Handler) http.Handler {
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if isHTTPHeaderSizeTooLarge(r.Header) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrMetadataTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsHeader, 1)
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
|
@ -344,6 +346,7 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
|
|||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
|
@ -351,6 +354,7 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
|
|||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -358,105 +362,6 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
var supportedDummyBucketAPIs = map[string][]string{
|
||||
"acl": {http.MethodPut, http.MethodGet},
|
||||
"cors": {http.MethodGet},
|
||||
"website": {http.MethodGet, http.MethodDelete},
|
||||
"logging": {http.MethodGet},
|
||||
"accelerate": {http.MethodGet},
|
||||
"requestPayment": {http.MethodGet},
|
||||
}
|
||||
|
||||
// List of not implemented bucket queries
|
||||
var notImplementedBucketResourceNames = map[string]struct{}{
|
||||
"cors": {},
|
||||
"metrics": {},
|
||||
"website": {},
|
||||
"logging": {},
|
||||
"inventory": {},
|
||||
"accelerate": {},
|
||||
"requestPayment": {},
|
||||
"analytics": {},
|
||||
"intelligent-tiering": {},
|
||||
"ownershipControls": {},
|
||||
"publicAccessBlock": {},
|
||||
}
|
||||
|
||||
// Checks requests for not implemented Bucket resources
|
||||
func ignoreNotImplementedBucketResources(req *http.Request) bool {
|
||||
for name := range req.URL.Query() {
|
||||
methods, ok := supportedDummyBucketAPIs[name]
|
||||
if ok {
|
||||
for _, method := range methods {
|
||||
if method == req.Method {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := notImplementedBucketResourceNames[name]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var supportedDummyObjectAPIs = map[string][]string{
|
||||
"acl": {http.MethodGet, http.MethodPut},
|
||||
}
|
||||
|
||||
// List of not implemented object APIs
|
||||
var notImplementedObjectResourceNames = map[string]struct{}{
|
||||
"torrent": {},
|
||||
}
|
||||
|
||||
// Checks requests for not implemented Object resources
|
||||
func ignoreNotImplementedObjectResources(req *http.Request) bool {
|
||||
for name := range req.URL.Query() {
|
||||
methods, ok := supportedDummyObjectAPIs[name]
|
||||
if ok {
|
||||
for _, method := range methods {
|
||||
if method == req.Method {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := notImplementedObjectResourceNames[name]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// setIgnoreResourcesHandler -
|
||||
// Ignore resources handler is wrapper handler used for API request resource validation
|
||||
// Since we do not support all the S3 queries, it is necessary for us to throw back a
|
||||
// valid error message indicating that requested feature is not implemented.
|
||||
func setIgnoreResourcesHandler(h http.Handler) http.Handler {
|
||||
// Resource handler ServeHTTP() wrapper
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
bucketName, objectName := request2BucketObjectName(r)
|
||||
|
||||
// If bucketName is present and not objectName check for bucket level resource queries.
|
||||
if bucketName != "" && objectName == "" {
|
||||
if ignoreNotImplementedBucketResources(r) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
// If bucketName and objectName are present check for its resource queries.
|
||||
if bucketName != "" && objectName != "" {
|
||||
if ignoreNotImplementedObjectResources(r) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Serve HTTP.
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// setHttpStatsHandler sets a http Stats handler to gather HTTP statistics
|
||||
func setHTTPStatsHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -517,6 +422,7 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
|
|||
// Check for bad components in URL path.
|
||||
if hasBadPathComponent(r.URL.Path) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
// Check for bad components in URL query values.
|
||||
|
@ -524,12 +430,14 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
|
|||
for _, v := range vv {
|
||||
if hasBadPathComponent(v) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasMultipleAuth(r) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/minio/minio/pkg/bucket/bandwidth"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/dns"
|
||||
|
@ -254,7 +254,7 @@ var (
|
|||
globalCompressConfig compress.Config
|
||||
|
||||
// Some standard object extensions which we strictly dis-allow for compression.
|
||||
standardExcludeCompressExtensions = []string{".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov"}
|
||||
standardExcludeCompressExtensions = []string{".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov", ".jpg", ".png", ".gif"}
|
||||
|
||||
// Some standard content-types which we strictly dis-allow for compression.
|
||||
standardExcludeCompressContentTypes = []string{"video/*", "audio/*", "application/zip", "application/x-gzip", "application/x-zip-compressed", " application/x-compress", "application/x-spoon"}
|
||||
|
|
|
@ -36,8 +36,9 @@ type apiConfig struct {
|
|||
extendListLife time.Duration
|
||||
corsAllowOrigins []string
|
||||
// total drives per erasure set across pools.
|
||||
totalDriveCount int
|
||||
replicationWorkers int
|
||||
totalDriveCount int
|
||||
replicationWorkers int
|
||||
replicationFailedWorkers int
|
||||
}
|
||||
|
||||
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
||||
|
@ -82,8 +83,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||
t.extendListLife = cfg.ExtendListLife
|
||||
if globalReplicationPool != nil &&
|
||||
cfg.ReplicationWorkers != t.replicationWorkers {
|
||||
globalReplicationPool.Resize(cfg.ReplicationWorkers)
|
||||
globalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)
|
||||
globalReplicationPool.ResizeWorkers(cfg.ReplicationWorkers)
|
||||
}
|
||||
t.replicationFailedWorkers = cfg.ReplicationFailedWorkers
|
||||
t.replicationWorkers = cfg.ReplicationWorkers
|
||||
}
|
||||
|
||||
|
@ -165,6 +168,13 @@ func maxClients(f http.HandlerFunc) http.HandlerFunc {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *apiConfig) getReplicationFailedWorkers() int {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
return t.replicationFailedWorkers
|
||||
}
|
||||
|
||||
func (t *apiConfig) getReplicationWorkers() int {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
|
|
@ -233,10 +233,15 @@ func extractReqParams(r *http.Request) map[string]string {
|
|||
region := globalServerRegion
|
||||
cred := getReqAccessCred(r, region)
|
||||
|
||||
principalID := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
principalID = cred.ParentUser
|
||||
}
|
||||
|
||||
// Success.
|
||||
m := map[string]string{
|
||||
"region": region,
|
||||
"accessKey": cred.AccessKey,
|
||||
"principalId": principalID,
|
||||
"sourceIPAddress": handlers.GetSourceIP(r),
|
||||
// Add more fields here.
|
||||
}
|
||||
|
|
|
@ -137,11 +137,15 @@ func (stats *HTTPAPIStats) Load() map[string]int {
|
|||
// HTTPStats holds statistics information about
|
||||
// HTTP requests made by all clients
|
||||
type HTTPStats struct {
|
||||
s3RequestsInQueue int32
|
||||
currentS3Requests HTTPAPIStats
|
||||
totalS3Requests HTTPAPIStats
|
||||
totalS3Errors HTTPAPIStats
|
||||
totalS3Canceled HTTPAPIStats
|
||||
s3RequestsInQueue int32
|
||||
currentS3Requests HTTPAPIStats
|
||||
totalS3Requests HTTPAPIStats
|
||||
totalS3Errors HTTPAPIStats
|
||||
totalS3Canceled HTTPAPIStats
|
||||
rejectedRequestsAuth uint64
|
||||
rejectedRequestsTime uint64
|
||||
rejectedRequestsHeader uint64
|
||||
rejectedRequestsInvalid uint64
|
||||
}
|
||||
|
||||
func (st *HTTPStats) addRequestsInQueue(i int32) {
|
||||
|
@ -152,6 +156,10 @@ func (st *HTTPStats) addRequestsInQueue(i int32) {
|
|||
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
|
||||
serverStats := ServerHTTPStats{}
|
||||
serverStats.S3RequestsInQueue = atomic.LoadInt32(&st.s3RequestsInQueue)
|
||||
serverStats.TotalS3RejectedAuth = atomic.LoadUint64(&st.rejectedRequestsAuth)
|
||||
serverStats.TotalS3RejectedTime = atomic.LoadUint64(&st.rejectedRequestsTime)
|
||||
serverStats.TotalS3RejectedHeader = atomic.LoadUint64(&st.rejectedRequestsHeader)
|
||||
serverStats.TotalS3RejectedInvalid = atomic.LoadUint64(&st.rejectedRequestsInvalid)
|
||||
serverStats.CurrentS3Requests = ServerHTTPAPIStats{
|
||||
APIStats: st.currentS3Requests.Load(),
|
||||
}
|
||||
|
|
|
@ -121,6 +121,8 @@ func NewDNSCache(freq time.Duration, lookupTimeout time.Duration, loggerOnce fun
|
|||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
timer := time.NewTimer(freq)
|
||||
go func() {
|
||||
defer timer.Stop()
|
||||
|
@ -128,7 +130,10 @@ func NewDNSCache(freq time.Duration, lookupTimeout time.Duration, loggerOnce fun
|
|||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
timer.Reset(freq)
|
||||
// Make sure that refreshes on DNS do not be attempted
|
||||
// at the same time, allows for reduced load on the
|
||||
// DNS servers.
|
||||
timer.Reset(time.Duration(rnd.Float64() * float64(freq)))
|
||||
|
||||
r.Refresh()
|
||||
case <-r.doneCh:
|
||||
|
|
|
@ -88,6 +88,8 @@ const (
|
|||
AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold"
|
||||
AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention"
|
||||
AmzBucketReplicationStatus = "X-Amz-Replication-Status"
|
||||
AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract"
|
||||
|
||||
// Multipart parts count
|
||||
AmzMpPartsCount = "x-amz-mp-parts-count"
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
|
|||
// suffix := "config.json"
|
||||
// result is foo
|
||||
func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
|
||||
return path.Clean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix))
|
||||
return pathClean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix))
|
||||
}
|
||||
|
||||
// IAMEtcdStore implements IAMStorageAPI
|
||||
|
|
216
cmd/iam.go
216
cmd/iam.go
|
@ -30,7 +30,6 @@ import (
|
|||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
|
@ -585,7 +584,8 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
for {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if _, err := txnLk.GetLock(retryCtx, iamLockTimeout); err != nil {
|
||||
lkctx, err := txnLk.GetLock(retryCtx, iamLockTimeout)
|
||||
if err != nil {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. trying to acquire lock")
|
||||
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
|
||||
continue
|
||||
|
@ -595,8 +595,8 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
// **** WARNING ****
|
||||
// Migrating to encrypted backend on etcd should happen before initialization of
|
||||
// IAM sub-system, make sure that we do not move the above codeblock elsewhere.
|
||||
if err := migrateIAMConfigsEtcdToEncrypted(ctx, globalEtcdClient); err != nil {
|
||||
txnLk.Unlock()
|
||||
if err := migrateIAMConfigsEtcdToEncrypted(lkctx.Context(), globalEtcdClient); err != nil {
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to decrypt an encrypted ETCD backend for IAM users and policies: %w", err))
|
||||
logger.LogIf(ctx, errors.New("IAM sub-system is partially initialized, some users may not be available"))
|
||||
return
|
||||
|
@ -609,8 +609,8 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
}
|
||||
|
||||
// Migrate IAM configuration, if necessary.
|
||||
if err := sys.doIAMConfigMigration(ctx); err != nil {
|
||||
txnLk.Unlock()
|
||||
if err := sys.doIAMConfigMigration(lkctx.Context()); err != nil {
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
if configRetriableErrors(err) {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err)
|
||||
continue
|
||||
|
@ -621,7 +621,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
}
|
||||
|
||||
// Successfully migrated, proceed to load the users.
|
||||
txnLk.Unlock()
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -673,8 +673,10 @@ func (sys *IAMSys) DeletePolicy(policyName string) error {
|
|||
if pset.Contains(policyName) {
|
||||
cr, ok := sys.iamUsersMap[u]
|
||||
if !ok {
|
||||
// This case cannot happen
|
||||
return errNoSuchUser
|
||||
// This case can happen when an temporary account
|
||||
// is deleted or expired, removed it from userPolicyMap.
|
||||
delete(sys.iamUserPolicyMap, u)
|
||||
continue
|
||||
}
|
||||
pset.Remove(policyName)
|
||||
// User is from STS if the cred are temporary
|
||||
|
@ -841,40 +843,31 @@ func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyNa
|
|||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
|
||||
ttl := int64(cred.Expiration.Sub(UTCNow()).Seconds())
|
||||
|
||||
// If OPA is not set we honor any policy claims for this
|
||||
// temporary user which match with pre-configured canned
|
||||
// policies for this server.
|
||||
if globalPolicyOPA == nil && policyName != "" {
|
||||
var availablePolicies []iampolicy.Policy
|
||||
mp := newMappedPolicy(policyName)
|
||||
for _, policy := range mp.toSlice() {
|
||||
p, found := sys.iamPolicyDocsMap[policy]
|
||||
if found {
|
||||
availablePolicies = append(availablePolicies, p)
|
||||
}
|
||||
}
|
||||
|
||||
combinedPolicy := availablePolicies[0]
|
||||
for i := 1; i < len(availablePolicies); i++ {
|
||||
combinedPolicy.Statements = append(combinedPolicy.Statements,
|
||||
availablePolicies[i].Statements...)
|
||||
}
|
||||
combinedPolicy := sys.GetCombinedPolicy(mp.toSlice()...)
|
||||
|
||||
if combinedPolicy.IsEmpty() {
|
||||
delete(sys.iamUserPolicyMap, accessKey)
|
||||
return nil
|
||||
return fmt.Errorf("specified policy %s, not found %w", policyName, errNoSuchPolicy)
|
||||
}
|
||||
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
|
||||
if err := sys.store.saveMappedPolicy(context.Background(), accessKey, stsUser, false, mp, options{ttl: ttl}); err != nil {
|
||||
sys.store.unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
sys.iamUserPolicyMap[accessKey] = mp
|
||||
} else {
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
}
|
||||
|
||||
u := newUserIdentity(cred)
|
||||
|
@ -1046,9 +1039,9 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
|
|||
SecretKey: cred.SecretKey,
|
||||
Status: func() string {
|
||||
if status == madmin.AccountEnabled {
|
||||
return config.EnableOn
|
||||
return auth.AccountOn
|
||||
}
|
||||
return config.EnableOff
|
||||
return auth.AccountOff
|
||||
}(),
|
||||
})
|
||||
|
||||
|
@ -1060,19 +1053,25 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
|
|||
return nil
|
||||
}
|
||||
|
||||
type newServiceAccountOpts struct {
|
||||
sessionPolicy *iampolicy.Policy
|
||||
accessKey string
|
||||
secretKey string
|
||||
}
|
||||
|
||||
// NewServiceAccount - create a new service account
|
||||
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, groups []string, sessionPolicy *iampolicy.Policy) (auth.Credentials, error) {
|
||||
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, groups []string, opts newServiceAccountOpts) (auth.Credentials, error) {
|
||||
if !sys.Initialized() {
|
||||
return auth.Credentials{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
var policyBuf []byte
|
||||
if sessionPolicy != nil {
|
||||
err := sessionPolicy.Validate()
|
||||
if opts.sessionPolicy != nil {
|
||||
err := opts.sessionPolicy.Validate()
|
||||
if err != nil {
|
||||
return auth.Credentials{}, err
|
||||
}
|
||||
policyBuf, err = json.Marshal(sessionPolicy)
|
||||
policyBuf, err = json.Marshal(opts.sessionPolicy)
|
||||
if err != nil {
|
||||
return auth.Credentials{}, err
|
||||
}
|
||||
|
@ -1125,13 +1124,22 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||
m[iamPolicyClaimNameSA()] = "inherited-policy"
|
||||
}
|
||||
|
||||
secret := globalActiveCred.SecretKey
|
||||
cred, err := auth.GetNewCredentialsWithMetadata(m, secret)
|
||||
var (
|
||||
cred auth.Credentials
|
||||
err error
|
||||
)
|
||||
|
||||
if len(opts.accessKey) > 0 {
|
||||
cred, err = auth.CreateNewCredentialsWithMetadata(opts.accessKey, opts.secretKey, m, globalActiveCred.SecretKey)
|
||||
} else {
|
||||
cred, err = auth.GetNewCredentialsWithMetadata(m, globalActiveCred.SecretKey)
|
||||
}
|
||||
if err != nil {
|
||||
return auth.Credentials{}, err
|
||||
}
|
||||
cred.ParentUser = parentUser
|
||||
cred.Groups = groups
|
||||
cred.Status = string(auth.AccountOn)
|
||||
|
||||
u := newUserIdentity(cred)
|
||||
|
||||
|
@ -1144,8 +1152,69 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||
return cred, nil
|
||||
}
|
||||
|
||||
type updateServiceAccountOpts struct {
|
||||
sessionPolicy *iampolicy.Policy
|
||||
secretKey string
|
||||
status string
|
||||
}
|
||||
|
||||
// UpdateServiceAccount - edit a service account
|
||||
func (sys *IAMSys) UpdateServiceAccount(ctx context.Context, accessKey string, opts updateServiceAccountOpts) error {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
|
||||
cr, ok := sys.iamUsersMap[accessKey]
|
||||
if !ok || !cr.IsServiceAccount() {
|
||||
return errNoSuchServiceAccount
|
||||
}
|
||||
|
||||
if opts.secretKey != "" {
|
||||
cr.SecretKey = opts.secretKey
|
||||
}
|
||||
|
||||
if opts.status != "" {
|
||||
cr.Status = opts.status
|
||||
}
|
||||
|
||||
if opts.sessionPolicy != nil {
|
||||
m := make(map[string]interface{})
|
||||
err := opts.sessionPolicy.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
policyBuf, err := json.Marshal(opts.sessionPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(policyBuf) > 16*humanize.KiByte {
|
||||
return fmt.Errorf("Session policy should not exceed 16 KiB characters")
|
||||
}
|
||||
|
||||
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf)
|
||||
m[iamPolicyClaimNameSA()] = "embedded-policy"
|
||||
m[parentClaim] = cr.ParentUser
|
||||
cr.SessionToken, err = auth.JWTSignWithAccessKey(accessKey, m, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
u := newUserIdentity(cr)
|
||||
if err := sys.store.saveUserIdentity(context.Background(), u.Credentials.AccessKey, srvAccUser, u); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.iamUsersMap[u.Credentials.AccessKey] = u.Credentials
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListServiceAccounts - lists all services accounts associated to a specific user
|
||||
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]string, error) {
|
||||
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]auth.Credentials, error) {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
@ -1155,30 +1224,56 @@ func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([
|
|||
sys.store.rlock()
|
||||
defer sys.store.runlock()
|
||||
|
||||
var serviceAccounts []string
|
||||
for k, v := range sys.iamUsersMap {
|
||||
var serviceAccounts []auth.Credentials
|
||||
for _, v := range sys.iamUsersMap {
|
||||
if v.IsServiceAccount() && v.ParentUser == accessKey {
|
||||
serviceAccounts = append(serviceAccounts, k)
|
||||
// Hide secret key & session key here
|
||||
v.SecretKey = ""
|
||||
v.SessionToken = ""
|
||||
serviceAccounts = append(serviceAccounts, v)
|
||||
}
|
||||
}
|
||||
|
||||
return serviceAccounts, nil
|
||||
}
|
||||
|
||||
// GetServiceAccountParent - gets information about a service account
|
||||
func (sys *IAMSys) GetServiceAccountParent(ctx context.Context, accessKey string) (string, error) {
|
||||
// GetServiceAccount - gets information about a service account
|
||||
func (sys *IAMSys) GetServiceAccount(ctx context.Context, accessKey string) (auth.Credentials, *iampolicy.Policy, error) {
|
||||
if !sys.Initialized() {
|
||||
return "", errServerNotInitialized
|
||||
return auth.Credentials{}, nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
sys.store.rlock()
|
||||
defer sys.store.runlock()
|
||||
|
||||
sa, ok := sys.iamUsersMap[accessKey]
|
||||
if ok && sa.IsServiceAccount() {
|
||||
return sa.ParentUser, nil
|
||||
if !ok || !sa.IsServiceAccount() {
|
||||
return auth.Credentials{}, nil, errNoSuchServiceAccount
|
||||
}
|
||||
return "", nil
|
||||
|
||||
var embeddedPolicy *iampolicy.Policy
|
||||
|
||||
jwtClaims, err := auth.ExtractClaims(sa.SessionToken, globalActiveCred.SecretKey)
|
||||
if err == nil {
|
||||
pt, ptok := jwtClaims.Lookup(iamPolicyClaimNameSA())
|
||||
sp, spok := jwtClaims.Lookup(iampolicy.SessionPolicyName)
|
||||
if ptok && spok && pt == "embedded-policy" {
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(sp)
|
||||
if err == nil {
|
||||
p, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))
|
||||
if err == nil {
|
||||
policy := iampolicy.Policy{}.Merge(*p)
|
||||
embeddedPolicy = &policy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hide secret & session keys
|
||||
sa.SecretKey = ""
|
||||
sa.SessionToken = ""
|
||||
|
||||
return sa, embeddedPolicy, nil
|
||||
}
|
||||
|
||||
// DeleteServiceAccount - delete a service account
|
||||
|
@ -1231,7 +1326,12 @@ func (sys *IAMSys) CreateUser(accessKey string, uinfo madmin.UserInfo) error {
|
|||
u := newUserIdentity(auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: uinfo.SecretKey,
|
||||
Status: string(uinfo.Status),
|
||||
Status: func() string {
|
||||
if uinfo.Status == madmin.AccountEnabled {
|
||||
return auth.AccountOn
|
||||
}
|
||||
return auth.AccountOff
|
||||
}(),
|
||||
})
|
||||
|
||||
if err := sys.store.saveUserIdentity(context.Background(), accessKey, regularUser, u); err != nil {
|
||||
|
@ -1630,7 +1730,14 @@ func (sys *IAMSys) policyDBSet(name, policyName string, userType IAMUserType, is
|
|||
|
||||
// Handle policy mapping removal
|
||||
if policyName == "" {
|
||||
if err := sys.store.deleteMappedPolicy(context.Background(), name, userType, isGroup); err != nil && err != errNoSuchPolicy {
|
||||
if sys.usersSysType == LDAPUsersSysType {
|
||||
// Add a fallback removal towards previous content that may come back
|
||||
// as a ghost user due to lack of delete, this change occurred
|
||||
// introduced in PR #11840
|
||||
sys.store.deleteMappedPolicy(context.Background(), name, regularUser, false)
|
||||
}
|
||||
err := sys.store.deleteMappedPolicy(context.Background(), name, userType, isGroup)
|
||||
if err != nil && err != errNoSuchPolicy {
|
||||
return err
|
||||
}
|
||||
if !isGroup {
|
||||
|
@ -1704,7 +1811,7 @@ func (sys *IAMSys) PolicyDBGet(name string, isGroup bool, groups ...string) ([]s
|
|||
// information in IAM (i.e sys.iam*Map) - this info is stored only in the STS
|
||||
// generated credentials. Thus we skip looking up group memberships, user map,
|
||||
// and group map and check the appropriate policy maps directly.
|
||||
func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
func (sys *IAMSys) policyDBGet(name string, isGroup bool) (policies []string, err error) {
|
||||
if isGroup {
|
||||
if sys.usersSysType == MinIOUsersSysType {
|
||||
g, ok := sys.iamGroupsMap[name]
|
||||
|
@ -1719,8 +1826,7 @@ func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
mp := sys.iamGroupPolicyMap[name]
|
||||
return mp.toSlice(), nil
|
||||
return sys.iamGroupPolicyMap[name].toSlice(), nil
|
||||
}
|
||||
|
||||
var u auth.Credentials
|
||||
|
@ -1738,8 +1844,6 @@ func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
var policies []string
|
||||
|
||||
mp, ok := sys.iamUserPolicyMap[name]
|
||||
if !ok {
|
||||
if u.ParentUser != "" {
|
||||
|
@ -1757,8 +1861,7 @@ func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
p := sys.iamGroupPolicyMap[group]
|
||||
policies = append(policies, p.toSlice()...)
|
||||
policies = append(policies, sys.iamGroupPolicyMap[group].toSlice()...)
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
|
@ -1788,8 +1891,9 @@ func (sys *IAMSys) IsAllowedServiceAccount(args iampolicy.Args, parent string) b
|
|||
}
|
||||
|
||||
// Check policy for this service account.
|
||||
svcPolicies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
svcPolicies, err := sys.PolicyDBGet(parent, false, args.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -2072,7 +2176,7 @@ func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool {
|
|||
}
|
||||
|
||||
// Continue with the assumption of a regular user
|
||||
policies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
policies, err := sys.PolicyDBGet(args.AccountName, false, args.Groups...)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -81,6 +81,15 @@ type MapClaims struct {
|
|||
jwtgo.MapClaims
|
||||
}
|
||||
|
||||
// GetAccessKey will return the access key.
|
||||
// If nil an empty string will be returned.
|
||||
func (c *MapClaims) GetAccessKey() string {
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.AccessKey
|
||||
}
|
||||
|
||||
// NewStandardClaims - initializes standard claims
|
||||
func NewStandardClaims() *StandardClaims {
|
||||
return &StandardClaims{}
|
||||
|
|
|
@ -123,6 +123,31 @@ func (lrw *ResponseWriter) Size() int {
|
|||
return lrw.bytesWritten
|
||||
}
|
||||
|
||||
const contextAuditKey = contextKeyType("audit-entry")
|
||||
|
||||
// SetAuditEntry sets Audit info in the context.
|
||||
func SetAuditEntry(ctx context.Context, audit *audit.Entry) context.Context {
|
||||
if ctx == nil {
|
||||
LogIf(context.Background(), fmt.Errorf("context is nil"))
|
||||
return nil
|
||||
}
|
||||
return context.WithValue(ctx, contextAuditKey, audit)
|
||||
}
|
||||
|
||||
// GetAuditEntry returns Audit entry if set.
|
||||
func GetAuditEntry(ctx context.Context) *audit.Entry {
|
||||
if ctx != nil {
|
||||
r, ok := ctx.Value(contextAuditKey).(*audit.Entry)
|
||||
if ok {
|
||||
return r
|
||||
}
|
||||
r = &audit.Entry{}
|
||||
SetAuditEntry(ctx, r)
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AuditLog - logs audit logs to all audit targets.
|
||||
func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, filterKeys ...string) {
|
||||
// Fast exit if there is not audit target configured
|
||||
|
@ -130,41 +155,53 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl
|
|||
return
|
||||
}
|
||||
|
||||
var (
|
||||
statusCode int
|
||||
timeToResponse time.Duration
|
||||
timeToFirstByte time.Duration
|
||||
)
|
||||
var entry audit.Entry
|
||||
|
||||
st, ok := w.(*ResponseWriter)
|
||||
if ok {
|
||||
statusCode = st.StatusCode
|
||||
timeToResponse = time.Now().UTC().Sub(st.StartTime)
|
||||
timeToFirstByte = st.TimeToFirstByte
|
||||
}
|
||||
if w != nil && r != nil {
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
if reqInfo == nil {
|
||||
return
|
||||
}
|
||||
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
if reqInfo == nil {
|
||||
return
|
||||
}
|
||||
entry = audit.ToEntry(w, r, reqClaims, globalDeploymentID)
|
||||
entry.Trigger = "external-request"
|
||||
|
||||
entry := audit.ToEntry(w, r, reqClaims, globalDeploymentID)
|
||||
for _, filterKey := range filterKeys {
|
||||
delete(entry.ReqClaims, filterKey)
|
||||
delete(entry.ReqQuery, filterKey)
|
||||
delete(entry.ReqHeader, filterKey)
|
||||
delete(entry.RespHeader, filterKey)
|
||||
}
|
||||
entry.API.Name = reqInfo.API
|
||||
entry.API.Bucket = reqInfo.BucketName
|
||||
entry.API.Object = reqInfo.ObjectName
|
||||
entry.API.Status = http.StatusText(statusCode)
|
||||
entry.API.StatusCode = statusCode
|
||||
entry.API.TimeToResponse = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) + "ns"
|
||||
entry.Tags = reqInfo.GetTagsMap()
|
||||
// ttfb will be recorded only for GET requests, Ignore such cases where ttfb will be empty.
|
||||
if timeToFirstByte != 0 {
|
||||
entry.API.TimeToFirstByte = strconv.FormatInt(timeToFirstByte.Nanoseconds(), 10) + "ns"
|
||||
for _, filterKey := range filterKeys {
|
||||
delete(entry.ReqClaims, filterKey)
|
||||
delete(entry.ReqQuery, filterKey)
|
||||
delete(entry.ReqHeader, filterKey)
|
||||
delete(entry.RespHeader, filterKey)
|
||||
}
|
||||
|
||||
var (
|
||||
statusCode int
|
||||
timeToResponse time.Duration
|
||||
timeToFirstByte time.Duration
|
||||
)
|
||||
|
||||
st, ok := w.(*ResponseWriter)
|
||||
if ok {
|
||||
statusCode = st.StatusCode
|
||||
timeToResponse = time.Now().UTC().Sub(st.StartTime)
|
||||
timeToFirstByte = st.TimeToFirstByte
|
||||
}
|
||||
|
||||
entry.API.Name = reqInfo.API
|
||||
entry.API.Bucket = reqInfo.BucketName
|
||||
entry.API.Object = reqInfo.ObjectName
|
||||
entry.API.Status = http.StatusText(statusCode)
|
||||
entry.API.StatusCode = statusCode
|
||||
entry.API.TimeToResponse = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) + "ns"
|
||||
entry.Tags = reqInfo.GetTagsMap()
|
||||
// ttfb will be recorded only for GET requests, Ignore such cases where ttfb will be empty.
|
||||
if timeToFirstByte != 0 {
|
||||
entry.API.TimeToFirstByte = strconv.FormatInt(timeToFirstByte.Nanoseconds(), 10) + "ns"
|
||||
}
|
||||
} else {
|
||||
auditEntry := GetAuditEntry(ctx)
|
||||
if auditEntry != nil {
|
||||
entry = *auditEntry
|
||||
}
|
||||
}
|
||||
|
||||
// Send audit logs only to http targets.
|
||||
|
|
|
@ -33,6 +33,7 @@ type Entry struct {
|
|||
Version string `json:"version"`
|
||||
DeploymentID string `json:"deploymentid,omitempty"`
|
||||
Time string `json:"time"`
|
||||
Trigger string `json:"trigger"`
|
||||
API struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
|
@ -52,38 +53,48 @@ type Entry struct {
|
|||
Tags map[string]interface{} `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// ToEntry - constructs an audit entry object.
|
||||
// NewEntry - constructs an audit entry object with some fields filled
|
||||
func NewEntry(deploymentID string) Entry {
|
||||
return Entry{
|
||||
Version: Version,
|
||||
DeploymentID: deploymentID,
|
||||
Time: time.Now().UTC().Format(time.RFC3339Nano),
|
||||
}
|
||||
}
|
||||
|
||||
// ToEntry - constructs an audit entry from a http request
|
||||
func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) Entry {
|
||||
|
||||
entry := NewEntry(deploymentID)
|
||||
|
||||
entry.RemoteHost = handlers.GetSourceIP(r)
|
||||
entry.UserAgent = r.UserAgent()
|
||||
entry.ReqClaims = reqClaims
|
||||
|
||||
q := r.URL.Query()
|
||||
reqQuery := make(map[string]string, len(q))
|
||||
for k, v := range q {
|
||||
reqQuery[k] = strings.Join(v, ",")
|
||||
}
|
||||
entry.ReqQuery = reqQuery
|
||||
|
||||
reqHeader := make(map[string]string, len(r.Header))
|
||||
for k, v := range r.Header {
|
||||
reqHeader[k] = strings.Join(v, ",")
|
||||
}
|
||||
entry.ReqHeader = reqHeader
|
||||
|
||||
wh := w.Header()
|
||||
entry.RequestID = wh.Get(xhttp.AmzRequestID)
|
||||
respHeader := make(map[string]string, len(wh))
|
||||
for k, v := range wh {
|
||||
respHeader[k] = strings.Join(v, ",")
|
||||
}
|
||||
entry.RespHeader = respHeader
|
||||
|
||||
if etag := respHeader[xhttp.ETag]; etag != "" {
|
||||
respHeader[xhttp.ETag] = strings.Trim(etag, `"`)
|
||||
}
|
||||
|
||||
entry := Entry{
|
||||
Version: Version,
|
||||
DeploymentID: deploymentID,
|
||||
RemoteHost: handlers.GetSourceIP(r),
|
||||
RequestID: wh.Get(xhttp.AmzRequestID),
|
||||
UserAgent: r.UserAgent(),
|
||||
Time: time.Now().UTC().Format(time.RFC3339Nano),
|
||||
ReqQuery: reqQuery,
|
||||
ReqHeader: reqHeader,
|
||||
ReqClaims: reqClaims,
|
||||
RespHeader: respHeader,
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
|
|
@ -130,8 +130,8 @@ func (h *Target) startHTTPLogger() {
|
|||
resp, err := h.client.Do(req)
|
||||
cancel()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s returned '%w', please check your endpoint configuration\n",
|
||||
h.endpoint, err))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%w', please check your endpoint configuration",
|
||||
h.endpoint, err), h.endpoint)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -141,11 +141,11 @@ func (h *Target) startHTTPLogger() {
|
|||
if resp.StatusCode != http.StatusOK {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusForbidden:
|
||||
logger.LogIf(ctx, fmt.Errorf("%s returned '%s', please check if your auth token is correctly set",
|
||||
h.endpoint, resp.Status))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%s', please check if your auth token is correctly set",
|
||||
h.endpoint, resp.Status), h.endpoint)
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("%s returned '%s', please check your endpoint configuration",
|
||||
h.endpoint, resp.Status))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("%s returned '%s', please check your endpoint configuration",
|
||||
h.endpoint, resp.Status), h.endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -366,7 +366,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
|
|||
retries = 1
|
||||
}
|
||||
|
||||
const retryDelay = 500 * time.Millisecond
|
||||
const retryDelay = 1 * time.Second
|
||||
// Load first part metadata...
|
||||
// All operations are performed without locks, so we must be careful and allow for failures.
|
||||
// Read metadata associated with the object from a disk.
|
||||
|
@ -811,6 +811,9 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
|||
if len(disks) == 0 {
|
||||
return fmt.Errorf("listPathRaw: 0 drives provided")
|
||||
}
|
||||
// Cancel upstream if we finish before we expect.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
askDisks := len(disks)
|
||||
readers := make([]*metacacheReader, askDisks)
|
||||
|
@ -821,6 +824,8 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure we close the pipe so blocked writes doesn't stay around.
|
||||
defer r.CloseWithError(context.Canceled)
|
||||
// Send request to each disk.
|
||||
go func() {
|
||||
werr := d.WalkDir(ctx, WalkDirOptions{
|
||||
|
@ -832,7 +837,10 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
|||
ForwardTo: opts.forwardTo,
|
||||
}, w)
|
||||
w.CloseWithError(werr)
|
||||
if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() {
|
||||
if werr != io.EOF && werr != nil &&
|
||||
werr.Error() != errFileNotFound.Error() &&
|
||||
werr.Error() != errVolumeNotFound.Error() &&
|
||||
!errors.Is(werr, context.Canceled) {
|
||||
logger.LogIf(ctx, werr)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xioutil "github.com/minio/minio/pkg/ioutil"
|
||||
)
|
||||
|
@ -107,6 +108,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
forward := opts.ForwardTo
|
||||
var scanDir func(path string) error
|
||||
scanDir = func(current string) error {
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
entries, err := s.ListDir(ctx, opts.Bucket, current, -1)
|
||||
if err != nil {
|
||||
// Folder could have gone away in-between
|
||||
|
@ -142,6 +146,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
// Do do not retain the file.
|
||||
entries[i] = ""
|
||||
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
// If root was an object return it as such.
|
||||
if HasSuffix(entry, xlStorageFormatFile) {
|
||||
var meta metaCacheEntry
|
||||
|
@ -183,6 +190,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
if entry == "" {
|
||||
continue
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
meta := metaCacheEntry{name: PathJoin(current, entry)}
|
||||
|
||||
// If directory entry on stack before this, pop it now.
|
||||
|
@ -282,6 +292,7 @@ func (client *storageRESTClient) WalkDir(ctx context.Context, opts WalkDirOption
|
|||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return waitForHTTPStream(respBody, wr)
|
||||
}
|
||||
|
||||
|
|
|
@ -44,53 +44,60 @@ const (
|
|||
healMetricNamespace MetricNamespace = "minio_heal"
|
||||
interNodeMetricNamespace MetricNamespace = "minio_inter_node"
|
||||
nodeMetricNamespace MetricNamespace = "minio_node"
|
||||
minIOMetricNamespace MetricNamespace = "minio"
|
||||
minioMetricNamespace MetricNamespace = "minio"
|
||||
s3MetricNamespace MetricNamespace = "minio_s3"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheSubsystem MetricSubsystem = "cache"
|
||||
capacityRawSubsystem MetricSubsystem = "capacity_raw"
|
||||
capacityUsableSubsystem MetricSubsystem = "capacity_usable"
|
||||
diskSubsystem MetricSubsystem = "disk"
|
||||
fileDescriptorSubsystem MetricSubsystem = "file_descriptor"
|
||||
goRoutines MetricSubsystem = "go_routine"
|
||||
ioSubsystem MetricSubsystem = "io"
|
||||
nodesSubsystem MetricSubsystem = "nodes"
|
||||
objectsSubsystem MetricSubsystem = "objects"
|
||||
processSubsystem MetricSubsystem = "process"
|
||||
replicationSubsystem MetricSubsystem = "replication"
|
||||
requestsSubsystem MetricSubsystem = "requests"
|
||||
timeSubsystem MetricSubsystem = "time"
|
||||
trafficSubsystem MetricSubsystem = "traffic"
|
||||
softwareSubsystem MetricSubsystem = "software"
|
||||
sysCallSubsystem MetricSubsystem = "syscall"
|
||||
usageSubsystem MetricSubsystem = "usage"
|
||||
cacheSubsystem MetricSubsystem = "cache"
|
||||
capacityRawSubsystem MetricSubsystem = "capacity_raw"
|
||||
capacityUsableSubsystem MetricSubsystem = "capacity_usable"
|
||||
diskSubsystem MetricSubsystem = "disk"
|
||||
fileDescriptorSubsystem MetricSubsystem = "file_descriptor"
|
||||
goRoutines MetricSubsystem = "go_routine"
|
||||
ioSubsystem MetricSubsystem = "io"
|
||||
nodesSubsystem MetricSubsystem = "nodes"
|
||||
objectsSubsystem MetricSubsystem = "objects"
|
||||
processSubsystem MetricSubsystem = "process"
|
||||
replicationSubsystem MetricSubsystem = "replication"
|
||||
requestsSubsystem MetricSubsystem = "requests"
|
||||
requestsRejectedSubsystem MetricSubsystem = "requests_rejected"
|
||||
timeSubsystem MetricSubsystem = "time"
|
||||
trafficSubsystem MetricSubsystem = "traffic"
|
||||
softwareSubsystem MetricSubsystem = "software"
|
||||
sysCallSubsystem MetricSubsystem = "syscall"
|
||||
usageSubsystem MetricSubsystem = "usage"
|
||||
)
|
||||
|
||||
// MetricName are the individual names for the metric.
|
||||
type MetricName string
|
||||
|
||||
const (
|
||||
total MetricName = "total"
|
||||
errorsTotal MetricName = "error_total"
|
||||
canceledTotal MetricName = "canceled_total"
|
||||
healTotal MetricName = "heal_total"
|
||||
hitsTotal MetricName = "hits_total"
|
||||
inflightTotal MetricName = "inflight_total"
|
||||
limitTotal MetricName = "limit_total"
|
||||
missedTotal MetricName = "missed_total"
|
||||
waitingTotal MetricName = "waiting_total"
|
||||
objectTotal MetricName = "object_total"
|
||||
offlineTotal MetricName = "offline_total"
|
||||
onlineTotal MetricName = "online_total"
|
||||
openTotal MetricName = "open_total"
|
||||
readTotal MetricName = "read_total"
|
||||
writeTotal MetricName = "write_total"
|
||||
authTotal MetricName = "auth_total"
|
||||
canceledTotal MetricName = "canceled_total"
|
||||
errorsTotal MetricName = "errors_total"
|
||||
headerTotal MetricName = "header_total"
|
||||
healTotal MetricName = "heal_total"
|
||||
hitsTotal MetricName = "hits_total"
|
||||
inflightTotal MetricName = "inflight_total"
|
||||
invalidTotal MetricName = "invalid_total"
|
||||
limitTotal MetricName = "limit_total"
|
||||
missedTotal MetricName = "missed_total"
|
||||
waitingTotal MetricName = "waiting_total"
|
||||
objectTotal MetricName = "object_total"
|
||||
offlineTotal MetricName = "offline_total"
|
||||
onlineTotal MetricName = "online_total"
|
||||
openTotal MetricName = "open_total"
|
||||
readTotal MetricName = "read_total"
|
||||
timestampTotal MetricName = "timestamp_total"
|
||||
writeTotal MetricName = "write_total"
|
||||
total MetricName = "total"
|
||||
|
||||
failedCount MetricName = "failed_count"
|
||||
failedBytes MetricName = "failed_bytes"
|
||||
freeBytes MetricName = "free_bytes"
|
||||
pendingBytes MetricName = "pending_bytes"
|
||||
pendingCount MetricName = "pending_count"
|
||||
readBytes MetricName = "read_bytes"
|
||||
rcharBytes MetricName = "rchar_bytes"
|
||||
receivedBytes MetricName = "received_bytes"
|
||||
|
@ -351,6 +358,16 @@ func getNodeDiskTotalBytesMD() MetricDescription {
|
|||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getUsageLastScanActivityMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: minioMetricNamespace,
|
||||
Subsystem: usageSubsystem,
|
||||
Name: lastActivityTime,
|
||||
Help: "Time elapsed (in nano seconds) since last scan activity. This is set to 0 until first scan cycle",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func getBucketUsageTotalBytesMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: bucketMetricNamespace,
|
||||
|
@ -405,6 +422,24 @@ func getBucketRepReceivedBytesMD() MetricDescription {
|
|||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getBucketRepPendingOperationsMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: bucketMetricNamespace,
|
||||
Subsystem: replicationSubsystem,
|
||||
Name: pendingCount,
|
||||
Help: "Total number of objects pending replication",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getBucketRepFailedOperationsMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: bucketMetricNamespace,
|
||||
Subsystem: replicationSubsystem,
|
||||
Name: failedCount,
|
||||
Help: "Total number of objects which failed replication",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
func getBucketObjectDistributionMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: bucketMetricNamespace,
|
||||
|
@ -505,6 +540,42 @@ func getS3RequestsCanceledMD() MetricDescription {
|
|||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getS3RejectedAuthRequestsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: s3MetricNamespace,
|
||||
Subsystem: requestsRejectedSubsystem,
|
||||
Name: authTotal,
|
||||
Help: "Total number S3 requests rejected for auth failure.",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getS3RejectedHeaderRequestsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: s3MetricNamespace,
|
||||
Subsystem: requestsRejectedSubsystem,
|
||||
Name: headerTotal,
|
||||
Help: "Total number S3 requests rejected for invalid header.",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getS3RejectedTimestampRequestsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: s3MetricNamespace,
|
||||
Subsystem: requestsRejectedSubsystem,
|
||||
Name: timestampTotal,
|
||||
Help: "Total number S3 requests rejected for invalid timestamp.",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getS3RejectedInvalidRequestsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: s3MetricNamespace,
|
||||
Subsystem: requestsRejectedSubsystem,
|
||||
Name: invalidTotal,
|
||||
Help: "Total number S3 invalid requests.",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
func getCacheHitsTotalMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: minioNamespace,
|
||||
|
@ -625,7 +696,7 @@ func getNodeOfflineTotalMD() MetricDescription {
|
|||
}
|
||||
func getMinIOVersionMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: minIOMetricNamespace,
|
||||
Namespace: minioMetricNamespace,
|
||||
Subsystem: softwareSubsystem,
|
||||
Name: versionInfo,
|
||||
Help: "MinIO Release tag for the server",
|
||||
|
@ -634,7 +705,7 @@ func getMinIOVersionMD() MetricDescription {
|
|||
}
|
||||
func getMinIOCommitMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: minIOMetricNamespace,
|
||||
Namespace: minioMetricNamespace,
|
||||
Subsystem: softwareSubsystem,
|
||||
Name: commitInfo,
|
||||
Help: "Git commit hash for the MinIO release.",
|
||||
|
@ -955,13 +1026,14 @@ func getMinioHealingMetrics() MetricsGroup {
|
|||
if !exists {
|
||||
return
|
||||
}
|
||||
var dur time.Duration
|
||||
if !bgSeq.lastHealActivity.IsZero() {
|
||||
dur = time.Since(bgSeq.lastHealActivity)
|
||||
|
||||
if bgSeq.lastHealActivity.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getHealLastActivityTimeMD(),
|
||||
Value: float64(dur),
|
||||
Value: float64(time.Since(bgSeq.lastHealActivity)),
|
||||
})
|
||||
metrics = append(metrics, getObjectsScanned(bgSeq)...)
|
||||
metrics = append(metrics, getScannedItems(bgSeq)...)
|
||||
|
@ -1072,6 +1144,22 @@ func getHTTPMetrics() MetricsGroup {
|
|||
len(httpStats.CurrentS3Requests.APIStats)+
|
||||
len(httpStats.TotalS3Requests.APIStats)+
|
||||
len(httpStats.TotalS3Errors.APIStats))
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RejectedAuthRequestsTotalMD(),
|
||||
Value: float64(httpStats.TotalS3RejectedAuth),
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RejectedTimestampRequestsTotalMD(),
|
||||
Value: float64(httpStats.TotalS3RejectedTime),
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RejectedHeaderRequestsTotalMD(),
|
||||
Value: float64(httpStats.TotalS3RejectedHeader),
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RejectedInvalidRequestsTotalMD(),
|
||||
Value: float64(httpStats.TotalS3RejectedInvalid),
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getS3RequestsInQueueMD(),
|
||||
Value: float64(httpStats.S3RequestsInQueue),
|
||||
|
@ -1167,7 +1255,14 @@ func getBucketUsageMetrics() MetricsGroup {
|
|||
return
|
||||
}
|
||||
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getUsageLastScanActivityMD(),
|
||||
Value: float64(time.Since(dataUsageInfo.LastUpdate)),
|
||||
})
|
||||
|
||||
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
||||
stat := getLatestReplicationStats(bucket, usage)
|
||||
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketUsageTotalBytesMD(),
|
||||
Value: float64(usage.Size),
|
||||
|
@ -1180,25 +1275,35 @@ func getBucketUsageMetrics() MetricsGroup {
|
|||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
|
||||
if usage.hasReplicationUsage() {
|
||||
if stat.hasReplicationUsage() {
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepPendingBytesMD(),
|
||||
Value: float64(usage.ReplicationPendingSize),
|
||||
Value: float64(stat.PendingSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepFailedBytesMD(),
|
||||
Value: float64(usage.ReplicationFailedSize),
|
||||
Value: float64(stat.FailedSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepSentBytesMD(),
|
||||
Value: float64(usage.ReplicatedSize),
|
||||
Value: float64(stat.ReplicatedSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepReceivedBytesMD(),
|
||||
Value: float64(usage.ReplicaSize),
|
||||
Value: float64(stat.ReplicaSize),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepPendingOperationsMD(),
|
||||
Value: float64(stat.PendingCount),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
metrics = append(metrics, Metric{
|
||||
Description: getBucketRepFailedOperationsMD(),
|
||||
Value: float64(stat.FailedCount),
|
||||
VariableLabels: map[string]string{"bucket": bucket},
|
||||
})
|
||||
}
|
||||
|
@ -1315,13 +1420,6 @@ func getClusterStorageMetrics() MetricsGroup {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *BucketUsageInfo) hasReplicationUsage() bool {
|
||||
return b.ReplicationPendingSize > 0 ||
|
||||
b.ReplicationFailedSize > 0 ||
|
||||
b.ReplicatedSize > 0 ||
|
||||
b.ReplicaSize > 0
|
||||
}
|
||||
|
||||
type minioClusterCollector struct {
|
||||
desc *prometheus.Desc
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
@ -430,6 +431,67 @@ func networkMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
)
|
||||
}
|
||||
|
||||
// get the most current of in-memory replication stats and data usage info from crawler.
|
||||
func getLatestReplicationStats(bucket string, u madmin.BucketUsageInfo) (s BucketReplicationStats) {
|
||||
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
||||
|
||||
replStats := BucketReplicationStats{}
|
||||
for _, bucketStat := range bucketStats {
|
||||
replStats.FailedCount += bucketStat.ReplicationStats.FailedCount
|
||||
replStats.FailedSize += bucketStat.ReplicationStats.FailedSize
|
||||
replStats.PendingCount += bucketStat.ReplicationStats.PendingCount
|
||||
replStats.PendingSize += bucketStat.ReplicationStats.PendingSize
|
||||
replStats.ReplicaSize += bucketStat.ReplicationStats.ReplicaSize
|
||||
replStats.ReplicatedSize += bucketStat.ReplicationStats.ReplicatedSize
|
||||
}
|
||||
usageStat := globalReplicationStats.GetInitialUsage(bucket)
|
||||
replStats.FailedCount += usageStat.FailedCount
|
||||
replStats.FailedSize += usageStat.FailedSize
|
||||
replStats.PendingCount += usageStat.PendingCount
|
||||
replStats.PendingSize += usageStat.PendingSize
|
||||
replStats.ReplicaSize += usageStat.ReplicaSize
|
||||
replStats.ReplicatedSize += usageStat.ReplicatedSize
|
||||
|
||||
// use in memory replication stats if it is ahead of usage info.
|
||||
if replStats.ReplicatedSize >= u.ReplicatedSize {
|
||||
s.ReplicatedSize = replStats.ReplicatedSize
|
||||
} else {
|
||||
s.ReplicatedSize = u.ReplicatedSize
|
||||
}
|
||||
|
||||
if replStats.PendingSize > u.ReplicationPendingSize {
|
||||
s.PendingSize = replStats.PendingSize
|
||||
} else {
|
||||
s.PendingSize = u.ReplicationPendingSize
|
||||
}
|
||||
|
||||
if replStats.FailedSize > u.ReplicationFailedSize {
|
||||
s.FailedSize = replStats.FailedSize
|
||||
} else {
|
||||
s.FailedSize = u.ReplicationFailedSize
|
||||
}
|
||||
|
||||
if replStats.ReplicaSize > u.ReplicaSize {
|
||||
s.ReplicaSize = replStats.ReplicaSize
|
||||
} else {
|
||||
s.ReplicaSize = u.ReplicaSize
|
||||
}
|
||||
|
||||
if replStats.PendingCount > u.ReplicationPendingCount {
|
||||
s.PendingCount = replStats.PendingCount
|
||||
} else {
|
||||
s.PendingCount = u.ReplicationPendingCount
|
||||
}
|
||||
|
||||
if replStats.FailedCount > u.ReplicationFailedCount {
|
||||
s.FailedCount = replStats.FailedCount
|
||||
} else {
|
||||
s.FailedCount = u.ReplicationFailedCount
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Populates prometheus with bucket usage metrics, this metrics
|
||||
// is only enabled if scanner is enabled.
|
||||
func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
|
@ -447,13 +509,13 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// data usage has not captured any data yet.
|
||||
if dataUsageInfo.LastUpdate.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
for bucket, usageInfo := range dataUsageInfo.BucketsUsage {
|
||||
stat := getLatestReplicationStats(bucket, usageInfo)
|
||||
// Total space used by bucket
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
|
@ -479,7 +541,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
"Total capacity pending to be replicated",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(usageInfo.ReplicationPendingSize),
|
||||
float64(stat.PendingSize),
|
||||
bucket,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
@ -488,7 +550,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
"Total capacity failed to replicate at least once",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(usageInfo.ReplicationFailedSize),
|
||||
float64(stat.FailedSize),
|
||||
bucket,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
@ -497,7 +559,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
"Total capacity replicated to destination",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(usageInfo.ReplicatedSize),
|
||||
float64(stat.ReplicatedSize),
|
||||
bucket,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
@ -506,7 +568,25 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
"Total capacity replicated to this instance",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(usageInfo.ReplicaSize),
|
||||
float64(stat.ReplicaSize),
|
||||
bucket,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName("bucket", "replication", "pending_count"),
|
||||
"Total replication operations pending",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(stat.PendingCount),
|
||||
bucket,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName("bucket", "replication", "failed_count"),
|
||||
"Total replication operations failed",
|
||||
[]string{"bucket"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(stat.FailedCount),
|
||||
bucket,
|
||||
)
|
||||
for k, v := range usageInfo.ObjectSizesHistogram {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2016, 2017, 2018, 2019 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -38,10 +38,28 @@ var globalLockServer *localLocker
|
|||
|
||||
// RWLocker - locker interface to introduce GetRLock, RUnlock.
|
||||
type RWLocker interface {
|
||||
GetLock(ctx context.Context, timeout *dynamicTimeout) (newCtx context.Context, timedOutErr error)
|
||||
Unlock()
|
||||
GetRLock(ctx context.Context, timeout *dynamicTimeout) (newCtx context.Context, timedOutErr error)
|
||||
RUnlock()
|
||||
GetLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
|
||||
Unlock(cancel context.CancelFunc)
|
||||
GetRLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
|
||||
RUnlock(cancel context.CancelFunc)
|
||||
}
|
||||
|
||||
// LockContext lock context holds the lock backed context and canceler for the context.
|
||||
type LockContext struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// Context returns lock context
|
||||
func (l LockContext) Context() context.Context {
|
||||
return l.ctx
|
||||
}
|
||||
|
||||
// Cancel function calls cancel() function
|
||||
func (l LockContext) Cancel() {
|
||||
if l.cancel != nil {
|
||||
l.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// newNSLock - return a new name space lock map.
|
||||
|
@ -142,7 +160,7 @@ type distLockInstance struct {
|
|||
}
|
||||
|
||||
// Lock - block until write lock is taken or timeout has occurred.
|
||||
func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (context.Context, error) {
|
||||
func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
|
@ -151,19 +169,23 @@ func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout
|
|||
Timeout: timeout.Timeout(),
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
return ctx, OperationTimedOut{}
|
||||
cancel()
|
||||
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return newCtx, nil
|
||||
return LockContext{ctx: newCtx, cancel: cancel}, nil
|
||||
}
|
||||
|
||||
// Unlock - block until write lock is released.
|
||||
func (di *distLockInstance) Unlock() {
|
||||
func (di *distLockInstance) Unlock(cancel context.CancelFunc) {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
di.rwMutex.Unlock()
|
||||
}
|
||||
|
||||
// RLock - block until read lock is taken or timeout has occurred.
|
||||
func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (context.Context, error) {
|
||||
func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
|
@ -172,14 +194,18 @@ func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeou
|
|||
Timeout: timeout.Timeout(),
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
return ctx, OperationTimedOut{}
|
||||
cancel()
|
||||
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return newCtx, nil
|
||||
return LockContext{ctx: newCtx, cancel: cancel}, nil
|
||||
}
|
||||
|
||||
// RUnlock - block until read lock is released.
|
||||
func (di *distLockInstance) RUnlock() {
|
||||
func (di *distLockInstance) RUnlock(cancel context.CancelFunc) {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
di.rwMutex.RUnlock()
|
||||
}
|
||||
|
||||
|
@ -207,27 +233,32 @@ func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume
|
|||
}
|
||||
|
||||
// Lock - block until write lock is taken or timeout has occurred.
|
||||
func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (_ context.Context, timedOutErr error) {
|
||||
func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
const readLock = false
|
||||
var success []int
|
||||
success := make([]int, len(li.paths))
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
for si, sint := range success {
|
||||
if sint == 1 {
|
||||
li.ns.unlock(li.volume, li.paths[si], readLock)
|
||||
}
|
||||
}
|
||||
return nil, OperationTimedOut{}
|
||||
return LockContext{}, OperationTimedOut{}
|
||||
}
|
||||
success = append(success, i)
|
||||
success[i] = 1
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return ctx, nil
|
||||
return LockContext{ctx: ctx, cancel: func() {}}, nil
|
||||
}
|
||||
|
||||
// Unlock - block until write lock is released.
|
||||
func (li *localLockInstance) Unlock() {
|
||||
func (li *localLockInstance) Unlock(cancel context.CancelFunc) {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
const readLock = false
|
||||
for _, path := range li.paths {
|
||||
li.ns.unlock(li.volume, path, readLock)
|
||||
|
@ -235,27 +266,32 @@ func (li *localLockInstance) Unlock() {
|
|||
}
|
||||
|
||||
// RLock - block until read lock is taken or timeout has occurred.
|
||||
func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (_ context.Context, timedOutErr error) {
|
||||
func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
const readLock = true
|
||||
var success []int
|
||||
success := make([]int, len(li.paths))
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
for si, sint := range success {
|
||||
if sint == 1 {
|
||||
li.ns.unlock(li.volume, li.paths[si], readLock)
|
||||
}
|
||||
}
|
||||
return nil, OperationTimedOut{}
|
||||
return LockContext{}, OperationTimedOut{}
|
||||
}
|
||||
success = append(success, i)
|
||||
success[i] = 1
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return ctx, nil
|
||||
return LockContext{ctx: ctx, cancel: func() {}}, nil
|
||||
}
|
||||
|
||||
// RUnlock - block until read lock is released.
|
||||
func (li *localLockInstance) RUnlock() {
|
||||
func (li *localLockInstance) RUnlock(cancel context.CancelFunc) {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
const readLock = true
|
||||
for _, path := range li.paths {
|
||||
li.ns.unlock(li.volume, path, readLock)
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -615,6 +616,8 @@ func (sys *NotificationSys) findEarliestCleanBloomFilter(ctx context.Context, di
|
|||
return best
|
||||
}
|
||||
|
||||
var errPeerNotReachable = errors.New("peer is not reachable")
|
||||
|
||||
// GetLocks - makes GetLocks RPC call on all peers.
|
||||
func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*PeerLocks {
|
||||
locksResp := make([]*PeerLocks, len(sys.peerClients))
|
||||
|
@ -623,7 +626,7 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
|
|||
index := index
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return nil
|
||||
return errPeerNotReachable
|
||||
}
|
||||
serverLocksResp, err := sys.peerClients[index].GetLocks()
|
||||
if err != nil {
|
||||
|
@ -671,6 +674,7 @@ func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName s
|
|||
|
||||
// DeleteBucketMetadata - calls DeleteBucketMetadata call on all peers
|
||||
func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName string) {
|
||||
globalReplicationStats.Delete(bucketName)
|
||||
globalBucketMetadataSys.Remove(bucketName)
|
||||
if localMetacacheMgr != nil {
|
||||
localMetacacheMgr.deleteBucketCache(bucketName)
|
||||
|
@ -694,6 +698,37 @@ func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName
|
|||
}
|
||||
}
|
||||
|
||||
// GetClusterBucketStats - calls GetClusterBucketStats call on all peers for a cluster statistics view.
|
||||
func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketName string) []BucketStats {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
bucketStats := make([]BucketStats, len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
index := index
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
}
|
||||
bs, err := client.GetBucketStats(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketStats[index] = bs
|
||||
return nil
|
||||
}, index, *client.host)
|
||||
}
|
||||
for _, nErr := range ng.Wait() {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
|
||||
if nErr.Err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
|
||||
}
|
||||
}
|
||||
bucketStats = append(bucketStats, BucketStats{
|
||||
ReplicationStats: globalReplicationStats.Get(bucketName),
|
||||
})
|
||||
return bucketStats
|
||||
}
|
||||
|
||||
// Loads notification policies for all buckets into NotificationSys.
|
||||
func (sys *NotificationSys) load(buckets []BucketInfo) {
|
||||
for _, bucket := range buckets {
|
||||
|
@ -1368,7 +1403,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
|||
AwsRegion: args.ReqParams["region"],
|
||||
EventTime: eventTime.Format(event.AMZTimeFormat),
|
||||
EventName: args.EventName,
|
||||
UserIdentity: event.Identity{PrincipalID: args.ReqParams["accessKey"]},
|
||||
UserIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
|
||||
RequestParameters: args.ReqParams,
|
||||
ResponseElements: respElements,
|
||||
S3: event.Metadata{
|
||||
|
@ -1376,7 +1411,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
|||
ConfigurationID: "Config",
|
||||
Bucket: event.Bucket{
|
||||
Name: args.BucketName,
|
||||
OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["accessKey"]},
|
||||
OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
|
||||
ARN: policy.ResourceARNPrefix + args.BucketName,
|
||||
},
|
||||
Object: event.Object{
|
||||
|
|
|
@ -69,57 +69,6 @@ var ObjectsHistogramIntervals = []objectHistogramInterval{
|
|||
{"GREATER_THAN_512_MB", humanize.MiByte * 512, math.MaxInt64},
|
||||
}
|
||||
|
||||
// BucketUsageInfo - bucket usage info provides
|
||||
// - total size of the bucket
|
||||
// - total objects in a bucket
|
||||
// - object size histogram per bucket
|
||||
type BucketUsageInfo struct {
|
||||
Size uint64 `json:"size"`
|
||||
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
|
||||
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
|
||||
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
|
||||
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
|
||||
ObjectsCount uint64 `json:"objectsCount"`
|
||||
ObjectSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"`
|
||||
}
|
||||
|
||||
// DataUsageInfo represents data usage stats of the underlying Object API
|
||||
type DataUsageInfo struct {
|
||||
// LastUpdate is the timestamp of when the data usage info was last updated.
|
||||
// This does not indicate a full scan.
|
||||
LastUpdate time.Time `json:"lastUpdate"`
|
||||
|
||||
// Objects total count across all buckets
|
||||
ObjectsTotalCount uint64 `json:"objectsCount"`
|
||||
|
||||
// Objects total size across all buckets
|
||||
ObjectsTotalSize uint64 `json:"objectsTotalSize"`
|
||||
|
||||
// Total Size for objects that have not yet been replicated
|
||||
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
|
||||
|
||||
// Total size for objects that have witness one or more failures and will be retried
|
||||
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
|
||||
|
||||
// Total size for objects that have been replicated to destination
|
||||
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
|
||||
|
||||
// Total size for objects that are replicas
|
||||
ReplicaSize uint64 `json:"objectsReplicaTotalSize"`
|
||||
|
||||
// Total number of buckets in this cluster
|
||||
BucketsCount uint64 `json:"bucketsCount"`
|
||||
|
||||
// Buckets usage info provides following information across all buckets
|
||||
// - total size of the bucket
|
||||
// - total objects in a bucket
|
||||
// - object size histogram per bucket
|
||||
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
|
||||
|
||||
// Deprecated kept here for backward compatibility reasons.
|
||||
BucketSizes map[string]uint64 `json:"bucketsSizes"`
|
||||
}
|
||||
|
||||
// BucketInfo - represents bucket metadata.
|
||||
type BucketInfo struct {
|
||||
// Name of the bucket.
|
||||
|
@ -271,6 +220,13 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) {
|
|||
return cinfo
|
||||
}
|
||||
|
||||
// ReplicateObjectInfo represents object info to be replicated
|
||||
type ReplicateObjectInfo struct {
|
||||
ObjectInfo
|
||||
OpType replication.Type
|
||||
RetryCount uint32
|
||||
}
|
||||
|
||||
// MultipartInfo captures metadata information about the uploadId
|
||||
// this data structure is used primarily for some internal purposes
|
||||
// for verifying upload type such as was the upload
|
||||
|
|
|
@ -318,15 +318,6 @@ func (e BucketExists) Error() string {
|
|||
return "Bucket exists: " + e.Bucket
|
||||
}
|
||||
|
||||
// UnsupportedDelimiter - unsupported delimiter.
|
||||
type UnsupportedDelimiter struct {
|
||||
Delimiter string
|
||||
}
|
||||
|
||||
func (e UnsupportedDelimiter) Error() string {
|
||||
return fmt.Sprintf("delimiter '%s' is not supported. Only '/' is supported", e.Delimiter)
|
||||
}
|
||||
|
||||
// InvalidUploadIDKeyCombination - invalid upload id and key marker combination.
|
||||
type InvalidUploadIDKeyCombination struct {
|
||||
UploadIDMarker, KeyMarker string
|
||||
|
@ -426,7 +417,7 @@ func (e BucketRemoteTargetNotFound) Error() string {
|
|||
type BucketRemoteConnectionErr GenericError
|
||||
|
||||
func (e BucketRemoteConnectionErr) Error() string {
|
||||
return "Remote service endpoint or target bucket not available: " + e.Bucket
|
||||
return fmt.Sprintf("Remote service endpoint or target bucket not available: %s \n\t%s", e.Bucket, e.Err.Error())
|
||||
}
|
||||
|
||||
// BucketRemoteAlreadyExists remote already exists for this target type.
|
||||
|
@ -624,14 +615,11 @@ func (e InvalidETag) Error() string {
|
|||
|
||||
// NotImplemented If a feature is not implemented
|
||||
type NotImplemented struct {
|
||||
API string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e NotImplemented) Error() string {
|
||||
if e.API != "" {
|
||||
return e.API + " is Not Implemented"
|
||||
}
|
||||
return "Not Implemented"
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// UnsupportedMetadata - unsupported metadata
|
||||
|
|
|
@ -91,7 +91,7 @@ type ObjectLayer interface {
|
|||
|
||||
// Storage operations.
|
||||
Shutdown(context.Context) error
|
||||
NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error
|
||||
NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error
|
||||
|
||||
BackendInfo() madmin.BackendInfo
|
||||
StorageInfo(ctx context.Context) (StorageInfo, []error)
|
||||
|
|
|
@ -814,13 +814,7 @@ func (g *GetObjectReader) Close() error {
|
|||
|
||||
// Read - to implement Reader interface.
|
||||
func (g *GetObjectReader) Read(p []byte) (n int, err error) {
|
||||
n, err = g.pReader.Read(p)
|
||||
if err != nil {
|
||||
// Calling code may not Close() in case of error, so
|
||||
// we ensure it.
|
||||
g.Close()
|
||||
}
|
||||
return
|
||||
return g.pReader.Read(p)
|
||||
}
|
||||
|
||||
//SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||
|
|
|
@ -19,17 +19,18 @@ package cmd
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -113,6 +114,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
|
@ -398,7 +400,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
// Both 'bytes' and 'partNumber' cannot be specified at the same time
|
||||
if rs != nil && opts.PartNumber > 0 {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -692,7 +694,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
// Both 'bytes' and 'partNumber' cannot be specified at the same time
|
||||
if rs != nil && opts.PartNumber > 0 {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1271,6 +1273,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(srcInfo.UserDefined)
|
||||
|
||||
// If we see legacy source, metadataOnly we have to overwrite the content.
|
||||
if srcInfo.Legacy {
|
||||
srcInfo.metadataOnly = false
|
||||
}
|
||||
|
||||
// Check if x-amz-metadata-directive or x-amz-tagging-directive was not set to REPLACE and source,
|
||||
// destination are same objects. Apply this restriction also when
|
||||
// metadataOnly is true indicating that we are not overwriting the object.
|
||||
|
@ -1339,7 +1346,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
@ -1654,7 +1661,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
}
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
||||
|
@ -1672,6 +1679,281 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
})
|
||||
}
|
||||
|
||||
// PutObjectExtractHandler - PUT Object extract is an extended API
|
||||
// based off from AWS Snowball feature to auto extract compressed
|
||||
// stream will be extracted in the same directory it is stored in
|
||||
// and the folder structures will be built out accordingly.
|
||||
func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PutObjectExtract")
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// X-Amz-Copy-Source shouldn't be set for this call.
|
||||
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate storage class metadata if present
|
||||
sc := r.Header.Get(xhttp.AmzStorageClass)
|
||||
if sc != "" {
|
||||
if !storageclass.IsValid(sc) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
clientETag, err := etag.FromContentMD5(r.Header)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
size := r.ContentLength
|
||||
rAuthType := getRequestAuthType(r)
|
||||
if rAuthType == authTypeStreamingSigned {
|
||||
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
|
||||
if sizeStr[0] == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if size == -1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
md5hex = clientETag.String()
|
||||
sha256hex = ""
|
||||
reader io.Reader = r.Body
|
||||
s3Err APIErrorCode
|
||||
putObject = objectAPI.PutObject
|
||||
)
|
||||
|
||||
// Check if put is allowed
|
||||
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
switch rAuthType {
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Err = newSignV4ChunkedReader(r)
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Err = isReqAuthenticatedV2(r)
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256hex = getContentSha256Cksum(r, serviceS3)
|
||||
}
|
||||
}
|
||||
|
||||
hreader, err := hash.NewReader(reader, size, md5hex, sha256hex, size)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, err = globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
|
||||
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
|
||||
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
|
||||
|
||||
if api.CacheAPI() != nil {
|
||||
putObject = api.CacheAPI().PutObject
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
putObjectTar := func(reader io.Reader, info os.FileInfo, object string) {
|
||||
size := info.Size()
|
||||
metadata := map[string]string{
|
||||
xhttp.AmzStorageClass: sc,
|
||||
}
|
||||
|
||||
actualSize := size
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
|
||||
actualReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
|
||||
// get encryption options
|
||||
opts, err := putOpts(ctx, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
opts.MTime = info.ModTime()
|
||||
|
||||
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
||||
if s3Err == ErrNone && retentionMode.Valid() {
|
||||
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
||||
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
||||
}
|
||||
|
||||
if s3Err == ErrNone && legalHold.Status.Valid() {
|
||||
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
||||
}
|
||||
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, ""); ok {
|
||||
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
||||
if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(metadata)
|
||||
|
||||
// Create the object..
|
||||
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
untar(hreader, putObjectTar)
|
||||
|
||||
w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
/// Multipart objectAPIHandlers
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload.
|
||||
|
@ -2737,7 +3019,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
|
@ -2817,7 +3099,8 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
VersionID: opts.VersionID,
|
||||
})
|
||||
}
|
||||
_, replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
|
||||
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
if replicateDel {
|
||||
if opts.VersionID != "" {
|
||||
opts.VersionPurgeStatus = Pending
|
||||
|
@ -2825,6 +3108,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
opts.DeleteMarkerReplicationStatus = string(replication.Pending)
|
||||
}
|
||||
}
|
||||
|
||||
vID := opts.VersionID
|
||||
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
||||
// check if replica has permission to be deleted.
|
||||
|
@ -3015,7 +3299,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
if replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
|
@ -3188,7 +3472,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
if replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType)
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -3371,7 +3655,7 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
|
|||
}
|
||||
|
||||
if replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objAPI, sync, replication.MetadataReplicationType)
|
||||
}
|
||||
|
||||
if objInfo.VersionID != "" {
|
||||
|
@ -3445,7 +3729,7 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
if replicate {
|
||||
scheduleReplication(ctx, oi.Clone(), objAPI, sync)
|
||||
scheduleReplication(ctx, oi.Clone(), objAPI, sync, replication.MetadataReplicationType)
|
||||
}
|
||||
|
||||
if oi.VersionID != "" {
|
||||
|
|
|
@ -434,6 +434,20 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err
|
|||
return data, err
|
||||
}
|
||||
|
||||
// GetBucketStats - load bucket statistics
|
||||
func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodGetBucketStats, values, nil, -1)
|
||||
if err != nil {
|
||||
return BucketStats{}, err
|
||||
}
|
||||
|
||||
var bs BucketStats
|
||||
defer http.DrainBody(respBody)
|
||||
return bs, msgp.Decode(respBody, &bs)
|
||||
}
|
||||
|
||||
// LoadBucketMetadata - load bucket metadata
|
||||
func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
|
||||
values := make(url.Values)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package cmd
|
||||
|
||||
const (
|
||||
peerRESTVersion = "v12"
|
||||
peerRESTVersion = "v14" // Add GetBucketStats API
|
||||
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
|
||||
peerRESTPrefix = minioReservedBucketPath + "/peer"
|
||||
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
|
||||
|
@ -36,6 +36,7 @@ const (
|
|||
peerRESTMethodDispatchNetInfo = "/dispatchnetinfo"
|
||||
peerRESTMethodDeleteBucketMetadata = "/deletebucketmetadata"
|
||||
peerRESTMethodLoadBucketMetadata = "/loadbucketmetadata"
|
||||
peerRESTMethodGetBucketStats = "/getbucketstats"
|
||||
peerRESTMethodServerUpdate = "/serverupdate"
|
||||
peerRESTMethodSignalService = "/signalservice"
|
||||
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
|
||||
|
|
|
@ -532,12 +532,35 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
globalReplicationStats.Delete(bucketName)
|
||||
globalBucketMetadataSys.Remove(bucketName)
|
||||
if localMetacacheMgr != nil {
|
||||
localMetacacheMgr.deleteBucketCache(bucketName)
|
||||
}
|
||||
}
|
||||
|
||||
// GetBucketStatsHandler - fetches current in-memory bucket stats, currently only
|
||||
// returns BucketReplicationStatus
|
||||
func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
bs := BucketStats{
|
||||
ReplicationStats: globalReplicationStats.Get(bucketName),
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(r.Context(), msgp.Encode(w, &bs))
|
||||
}
|
||||
|
||||
// LoadBucketMetadataHandler - reloads in memory bucket metadata
|
||||
func (s *peerRESTServer) LoadBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -1069,6 +1092,7 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCycleBloom).HandlerFunc(httpTraceHdrs(server.CycleServerBloomFilterHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeleteBucketMetadata).HandlerFunc(httpTraceHdrs(server.DeleteBucketMetadataHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLoadBucketMetadata).HandlerFunc(httpTraceHdrs(server.LoadBucketMetadataHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetBucketStats).HandlerFunc(httpTraceHdrs(server.GetBucketStatsHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerUpdate).HandlerFunc(httpTraceHdrs(server.ServerUpdateHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.DeletePolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
|
|
|
@ -127,6 +127,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
|
|||
}
|
||||
req.Header.Set("Authorization", "Bearer "+c.newAuthToken(req.URL.RawQuery))
|
||||
req.Header.Set("X-Minio-Time", time.Now().UTC().Format(time.RFC3339))
|
||||
req.Header.Set("Expect", "100-continue") // set expect continue to honor expect continue timeouts
|
||||
if length > 0 {
|
||||
req.ContentLength = length
|
||||
}
|
||||
|
|
|
@ -48,9 +48,6 @@ var globalHandlers = []mux.MiddlewareFunc{
|
|||
// routes them accordingly. Client receives a HTTP error for
|
||||
// invalid/unsupported signatures.
|
||||
setAuthHandler,
|
||||
// Validates all incoming URL resources, for invalid/unsupported
|
||||
// resources client receives a HTTP error.
|
||||
setIgnoreResourcesHandler,
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
setTimeValidityHandler,
|
||||
// Adds cache control for all browser requests.
|
||||
|
|
|
@ -278,7 +278,6 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
|
||||
lockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
|
||||
|
||||
var err error
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -289,7 +288,8 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if _, err = txnLk.GetLock(ctx, lockTimeout); err != nil {
|
||||
lkctx, err := txnLk.GetLock(ctx, lockTimeout)
|
||||
if err != nil {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
|
||||
|
||||
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
|
||||
|
@ -308,7 +308,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
// Upon success migrating the config, initialize all sub-systems
|
||||
// if all sub-systems initialized successfully return right away
|
||||
if err = initAllSubsystems(ctx, newObject); err == nil {
|
||||
txnLk.Unlock()
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
// All successful return.
|
||||
if globalIsDistErasure {
|
||||
// These messages only meant primarily for distributed setup, so only log during distributed setup.
|
||||
|
@ -318,7 +318,8 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
}
|
||||
}
|
||||
|
||||
txnLk.Unlock() // Unlock the transaction lock and allow other nodes to acquire the lock if possible.
|
||||
// Unlock the transaction lock and allow other nodes to acquire the lock if possible.
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
|
||||
if configRetriableErrors(err) {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. possible cause (%v)", err)
|
||||
|
|
|
@ -207,7 +207,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
|
|||
pw.CloseWithError(cache.serializeTo(pw))
|
||||
}()
|
||||
respBody, err := client.call(ctx, storageRESTMethodNSScanner, url.Values{}, pr, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
if err != nil {
|
||||
pr.Close()
|
||||
return cache, err
|
||||
|
@ -221,7 +221,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
|
|||
}()
|
||||
err = newCache.deserialize(pr)
|
||||
pr.CloseWithError(err)
|
||||
return newCache, err
|
||||
return newCache, toStorageErr(err)
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) GetDiskID() (string, error) {
|
||||
|
@ -238,6 +238,14 @@ func (client *storageRESTClient) SetDiskID(id string) {
|
|||
|
||||
// DiskInfo - fetch disk information for a remote disk.
|
||||
func (client *storageRESTClient) DiskInfo(ctx context.Context) (info DiskInfo, err error) {
|
||||
if !client.IsOnline() {
|
||||
// make sure to check if the disk is offline, since the underlying
|
||||
// value is cached we should attempt to invalidate it if such calls
|
||||
// were attempted. This can lead to false success under certain conditions
|
||||
// - this change attempts to avoid stale information if the underlying
|
||||
// transport is already down.
|
||||
return info, errDiskNotFound
|
||||
}
|
||||
client.diskInfoCache.Once.Do(func() {
|
||||
client.diskInfoCache.TTL = time.Second
|
||||
client.diskInfoCache.Update = func() (interface{}, error) {
|
||||
|
@ -247,7 +255,7 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context) (info DiskInfo, e
|
|||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
if err = msgp.Decode(respBody, &info); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
@ -270,7 +278,7 @@ func (client *storageRESTClient) MakeVolBulk(ctx context.Context, volumes ...str
|
|||
values := make(url.Values)
|
||||
values.Set(storageRESTVolumes, strings.Join(volumes, ","))
|
||||
respBody, err := client.call(ctx, storageRESTMethodMakeVolBulk, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -279,7 +287,7 @@ func (client *storageRESTClient) MakeVol(ctx context.Context, volume string) (er
|
|||
values := make(url.Values)
|
||||
values.Set(storageRESTVolume, volume)
|
||||
respBody, err := client.call(ctx, storageRESTMethodMakeVol, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -289,7 +297,7 @@ func (client *storageRESTClient) ListVols(ctx context.Context) (vols []VolInfo,
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
vinfos := VolsInfo(vols)
|
||||
err = msgp.Decode(respBody, &vinfos)
|
||||
return vinfos, err
|
||||
|
@ -303,7 +311,7 @@ func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vo
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
err = msgp.Decode(respBody, &vol)
|
||||
return vol, err
|
||||
}
|
||||
|
@ -316,7 +324,7 @@ func (client *storageRESTClient) DeleteVol(ctx context.Context, volume string, f
|
|||
values.Set(storageRESTForceDelete, "true")
|
||||
}
|
||||
respBody, err := client.call(ctx, storageRESTMethodDeleteVol, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -327,7 +335,7 @@ func (client *storageRESTClient) AppendFile(ctx context.Context, volume string,
|
|||
values.Set(storageRESTFilePath, path)
|
||||
reader := bytes.NewReader(buf)
|
||||
respBody, err := client.call(ctx, storageRESTMethodAppendFile, values, reader, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -337,12 +345,7 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path st
|
|||
values.Set(storageRESTFilePath, path)
|
||||
values.Set(storageRESTLength, strconv.Itoa(int(size)))
|
||||
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, ioutil.NopCloser(reader), size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waitReader, err := waitForHTTPResponse(respBody)
|
||||
defer http.DrainBody(ioutil.NopCloser(waitReader))
|
||||
defer respBody.Close()
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -357,7 +360,7 @@ func (client *storageRESTClient) WriteMetadata(ctx context.Context, volume, path
|
|||
}
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodWriteMetadata, values, &reader, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -373,7 +376,7 @@ func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path
|
|||
}
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodDeleteVersion, values, &buffer, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -383,7 +386,7 @@ func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, pa
|
|||
values.Set(storageRESTVolume, volume)
|
||||
values.Set(storageRESTFilePath, path)
|
||||
respBody, err := client.call(ctx, storageRESTMethodWriteAll, values, bytes.NewBuffer(b), int64(len(b)))
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -393,7 +396,7 @@ func (client *storageRESTClient) CheckFile(ctx context.Context, volume string, p
|
|||
values.Set(storageRESTVolume, volume)
|
||||
values.Set(storageRESTFilePath, path)
|
||||
respBody, err := client.call(ctx, storageRESTMethodCheckFile, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -410,7 +413,7 @@ func (client *storageRESTClient) CheckParts(ctx context.Context, volume string,
|
|||
}
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodCheckParts, values, &reader, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -454,13 +457,13 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path,
|
|||
if err != nil {
|
||||
return fi, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
|
||||
dec := msgpNewReader(respBody)
|
||||
defer readMsgpReaderPool.Put(dec)
|
||||
|
||||
err = fi.DecodeMsg(dec)
|
||||
return fi, err
|
||||
return fi, toStorageErr(err)
|
||||
}
|
||||
|
||||
// ReadAll - reads all contents of a file.
|
||||
|
@ -472,7 +475,7 @@ func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, pat
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return ioutil.ReadAll(respBody)
|
||||
}
|
||||
|
||||
|
@ -483,11 +486,7 @@ func (client *storageRESTClient) ReadFileStream(ctx context.Context, volume, pat
|
|||
values.Set(storageRESTFilePath, path)
|
||||
values.Set(storageRESTOffset, strconv.Itoa(int(offset)))
|
||||
values.Set(storageRESTLength, strconv.Itoa(int(length)))
|
||||
respBody, err := client.call(ctx, storageRESTMethodReadFileStream, values, nil, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return respBody, nil
|
||||
return client.call(ctx, storageRESTMethodReadFileStream, values, nil, -1)
|
||||
}
|
||||
|
||||
// ReadFile - reads section of a file.
|
||||
|
@ -508,9 +507,9 @@ func (client *storageRESTClient) ReadFile(ctx context.Context, volume string, pa
|
|||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
n, err := io.ReadFull(respBody, buf)
|
||||
return int64(n), err
|
||||
return int64(n), toStorageErr(err)
|
||||
}
|
||||
|
||||
// ListDir - lists a directory.
|
||||
|
@ -523,9 +522,9 @@ func (client *storageRESTClient) ListDir(ctx context.Context, volume, dirPath st
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&entries)
|
||||
return entries, err
|
||||
return entries, toStorageErr(err)
|
||||
}
|
||||
|
||||
// DeleteFile - deletes a file.
|
||||
|
@ -536,7 +535,7 @@ func (client *storageRESTClient) Delete(ctx context.Context, volume string, path
|
|||
values.Set(storageRESTRecursive, strconv.FormatBool(recursive))
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodDeleteFile, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -560,7 +559,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
|
|||
errs = make([]error, len(versions))
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodDeleteVersions, values, &buffer, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
if err != nil {
|
||||
for i := range errs {
|
||||
errs[i] = err
|
||||
|
@ -571,7 +570,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
|
|||
reader, err := waitForHTTPResponse(respBody)
|
||||
if err != nil {
|
||||
for i := range errs {
|
||||
errs[i] = err
|
||||
errs[i] = toStorageErr(err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
@ -599,7 +598,7 @@ func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcP
|
|||
values.Set(storageRESTDstVolume, dstVolume)
|
||||
values.Set(storageRESTDstPath, dstPath)
|
||||
respBody, err := client.call(ctx, storageRESTMethodRenameFile, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -614,19 +613,19 @@ func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path st
|
|||
}
|
||||
|
||||
respBody, err := client.call(ctx, storageRESTMethodVerifyFile, values, &reader, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
defer xhttp.DrainBody(respBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respReader, err := waitForHTTPResponse(respBody)
|
||||
if err != nil {
|
||||
return err
|
||||
return toStorageErr(err)
|
||||
}
|
||||
|
||||
verifyResp := &VerifyFileResp{}
|
||||
if err = gob.NewDecoder(respReader).Decode(verifyResp); err != nil {
|
||||
return err
|
||||
return toStorageErr(err)
|
||||
}
|
||||
|
||||
return toStorageErr(verifyResp.Err)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package cmd
|
||||
|
||||
const (
|
||||
storageRESTVersion = "v29" // Removed WalkVersions()
|
||||
storageRESTVersion = "v30" // CreateFile is back to non-streaming
|
||||
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
||||
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
||||
)
|
||||
|
|
|
@ -288,8 +288,10 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
done := keepHTTPResponseAlive(w)
|
||||
done(s.storage.CreateFile(r.Context(), volume, filePath, int64(fileSize), r.Body))
|
||||
err = s.storage.CreateFile(r.Context(), volume, filePath, int64(fileSize), r.Body)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteVersion delete updated metadata.
|
||||
|
@ -693,6 +695,7 @@ func keepHTTPResponseAlive(w http.ResponseWriter) func(error) {
|
|||
if doneCh == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Indicate we are ready to write.
|
||||
doneCh <- err
|
||||
|
||||
|
@ -732,23 +735,6 @@ func waitForHTTPResponse(respBody io.Reader) (io.Reader, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// drainCloser can be used for wrapping an http response.
|
||||
// It will drain the body before closing.
|
||||
type drainCloser struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
// Read forwards the read operation.
|
||||
func (f drainCloser) Read(p []byte) (n int, err error) {
|
||||
return f.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close drains the body and closes the upstream.
|
||||
func (f drainCloser) Close() error {
|
||||
xhttp.DrainBody(f.rc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// httpStreamResponse allows streaming a response, but still send an error.
|
||||
type httpStreamResponse struct {
|
||||
done chan error
|
||||
|
@ -840,7 +826,6 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|||
case 0:
|
||||
// 0 is unbuffered, copy the rest.
|
||||
_, err := io.Copy(w, respBody)
|
||||
respBody.Close()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
@ -850,18 +835,7 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
respBody.Close()
|
||||
return errors.New(string(errorText))
|
||||
case 3:
|
||||
// gob style is already deprecated, we can remove this when
|
||||
// storage API version will be greater or equal to 23.
|
||||
defer respBody.Close()
|
||||
dec := gob.NewDecoder(respBody)
|
||||
var err error
|
||||
if de := dec.Decode(&err); de == nil {
|
||||
return err
|
||||
}
|
||||
return errors.New("rpc error")
|
||||
case 2:
|
||||
// Block of data
|
||||
var tmp [4]byte
|
||||
|
@ -878,7 +852,6 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|||
case 32:
|
||||
continue
|
||||
default:
|
||||
go xhttp.DrainBody(respBody)
|
||||
return fmt.Errorf("unexpected filler byte: %d", tmp[0])
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,6 +77,9 @@ var errInvalidDecompressedSize = errors.New("Invalid Decompressed Size")
|
|||
// error returned in IAM subsystem when user doesn't exist.
|
||||
var errNoSuchUser = errors.New("Specified user does not exist")
|
||||
|
||||
// error returned when service account is not found
|
||||
var errNoSuchServiceAccount = errors.New("Specified service account does not exist")
|
||||
|
||||
// error returned in IAM subsystem when groups doesn't exist.
|
||||
var errNoSuchGroup = errors.New("Specified group does not exist")
|
||||
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/klauspost/compress/s2"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
func detect(r *bufio.Reader) format {
|
||||
z, err := r.Peek(4)
|
||||
if err != nil {
|
||||
return formatUnknown
|
||||
}
|
||||
for _, f := range magicHeaders {
|
||||
if bytes.Equal(f.header, z[:len(f.header)]) {
|
||||
return f.f
|
||||
}
|
||||
}
|
||||
return formatUnknown
|
||||
}
|
||||
|
||||
//go:generate stringer -type=format -trimprefix=format $GOFILE
|
||||
type format int
|
||||
|
||||
const (
|
||||
formatUnknown format = iota
|
||||
formatGzip
|
||||
formatZstd
|
||||
formatLZ4
|
||||
formatS2
|
||||
formatBZ2
|
||||
)
|
||||
|
||||
var magicHeaders = []struct {
|
||||
header []byte
|
||||
f format
|
||||
}{
|
||||
{
|
||||
header: []byte{0x1f, 0x8b, 8},
|
||||
f: formatGzip,
|
||||
},
|
||||
{
|
||||
// Zstd default header.
|
||||
header: []byte{0x28, 0xb5, 0x2f, 0xfd},
|
||||
f: formatZstd,
|
||||
},
|
||||
{
|
||||
// Zstd skippable frame header.
|
||||
header: []byte{0x2a, 0x4d, 0x18},
|
||||
f: formatZstd,
|
||||
},
|
||||
{
|
||||
// LZ4
|
||||
header: []byte{0x4, 0x22, 0x4d, 0x18},
|
||||
f: formatLZ4,
|
||||
},
|
||||
{
|
||||
// Snappy/S2 stream
|
||||
header: []byte{0xff, 0x06, 0x00, 0x00},
|
||||
f: formatS2,
|
||||
},
|
||||
{
|
||||
header: []byte{0x42, 0x5a, 'h'},
|
||||
f: formatBZ2,
|
||||
},
|
||||
}
|
||||
|
||||
func untar(r io.Reader, putObject func(reader io.Reader, info os.FileInfo, name string)) error {
|
||||
bf := bufio.NewReader(r)
|
||||
switch f := detect(bf); f {
|
||||
case formatGzip:
|
||||
gz, err := gzip.NewReader(bf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gz.Close()
|
||||
r = gz
|
||||
case formatS2:
|
||||
r = s2.NewReader(bf)
|
||||
case formatZstd:
|
||||
dec, err := zstd.NewReader(bf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
r = dec
|
||||
case formatBZ2:
|
||||
r = bzip2.NewReader(bf)
|
||||
case formatLZ4:
|
||||
r = lz4.NewReader(bf)
|
||||
case formatUnknown:
|
||||
r = bf
|
||||
default:
|
||||
return fmt.Errorf("Unsupported format %s", f)
|
||||
}
|
||||
tarReader := tar.NewReader(r)
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
|
||||
switch {
|
||||
|
||||
// if no more files are found return
|
||||
case err == io.EOF:
|
||||
return nil
|
||||
|
||||
// return any other error
|
||||
case err != nil:
|
||||
return err
|
||||
|
||||
// if the header is nil, just skip it (not sure how this happens)
|
||||
case header == nil:
|
||||
continue
|
||||
}
|
||||
|
||||
name := header.Name
|
||||
if name == slashSeparator {
|
||||
continue
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir: // = directory
|
||||
putObject(tarReader, header.FileInfo(), trimLeadingSlash(pathJoin(name, slashSeparator)))
|
||||
case tar.TypeReg, tar.TypeChar, tar.TypeBlock, tar.TypeFifo, tar.TypeGNUSparse: // = regular
|
||||
putObject(tarReader, header.FileInfo(), trimLeadingSlash(path.Clean(name)))
|
||||
default:
|
||||
// ignore symlink'ed
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
|
@ -219,6 +219,11 @@ func IsSourceBuild() bool {
|
|||
return err != nil
|
||||
}
|
||||
|
||||
// IsPCFTile returns if server is running in PCF
|
||||
func IsPCFTile() bool {
|
||||
return env.Get("MINIO_PCF_TILE_VERSION", "") != ""
|
||||
}
|
||||
|
||||
// DO NOT CHANGE USER AGENT STYLE.
|
||||
// The style should be
|
||||
//
|
||||
|
@ -284,9 +289,11 @@ func getUserAgent(mode string) string {
|
|||
}
|
||||
}
|
||||
|
||||
pcfTileVersion := env.Get("MINIO_PCF_TILE_VERSION", "")
|
||||
if pcfTileVersion != "" {
|
||||
uaAppend(" MinIO/pcf-tile-", pcfTileVersion)
|
||||
if IsPCFTile() {
|
||||
pcfTileVersion := env.Get("MINIO_PCF_TILE_VERSION", "")
|
||||
if pcfTileVersion != "" {
|
||||
uaAppend(" MinIO/pcf-tile-", pcfTileVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(userAgentParts, "")
|
||||
|
|
12
cmd/utils.go
12
cmd/utils.go
|
@ -455,7 +455,7 @@ func newInternodeHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration)
|
|||
WriteBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
|
||||
ReadBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode.
|
||||
ResponseHeaderTimeout: 15 * time.Minute, // Set conservative timeouts for MinIO internode.
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
ExpectContinueTimeout: 15 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
|
@ -687,6 +687,16 @@ func ceilFrac(numerator, denominator int64) (ceil int64) {
|
|||
return
|
||||
}
|
||||
|
||||
// pathClean is like path.Clean but does not return "." for
|
||||
// empty inputs, instead returns "empty" as is.
|
||||
func pathClean(p string) string {
|
||||
cp := path.Clean(p)
|
||||
if cp == "." {
|
||||
return ""
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
func trimLeadingSlash(ep string) string {
|
||||
if len(ep) > 0 && ep[0] == '/' {
|
||||
// Path ends with '/' preserve it
|
||||
|
|
|
@ -226,7 +226,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
|
|||
reply.UIVersion = Version
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.BucketCreated,
|
||||
|
@ -723,7 +723,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
|
|||
)
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
sourceIP := handlers.GetSourceIP(r)
|
||||
|
||||
next:
|
||||
|
@ -767,7 +767,7 @@ next:
|
|||
}
|
||||
if hasReplicationRules(ctx, args.BucketName, []ObjectToDelete{{ObjectName: objectName}}) || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, args.BucketName, objectName, opts)
|
||||
if _, replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{
|
||||
if replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
VersionID: goi.VersionID,
|
||||
}, goi, gerr); replicateDel {
|
||||
|
@ -903,7 +903,7 @@ next:
|
|||
}
|
||||
}
|
||||
}
|
||||
_, replicateDel, _ := checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ObjectName: obj.Name, VersionID: obj.VersionID}, obj, nil)
|
||||
replicateDel, _ := checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ObjectName: obj.Name, VersionID: obj.VersionID}, obj, nil)
|
||||
// since versioned delete is not available on web browser, yet - this is a simple DeleteMarker replication
|
||||
objToDel := ObjectToDelete{ObjectName: obj.Name}
|
||||
if replicateDel {
|
||||
|
@ -1336,11 +1336,11 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
if mustReplicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
// Notify object created event.
|
||||
sendEvent(eventArgs{
|
||||
|
@ -1529,7 +1529,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
|
@ -1684,10 +1684,13 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||
defer archive.Close()
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.AccessKey
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
respElements := extractRespElements(w)
|
||||
|
||||
for i, object := range args.Objects {
|
||||
if contextCanceled(ctx) {
|
||||
return
|
||||
}
|
||||
// Writes compressed object file to the response.
|
||||
zipit := func(objectName string) error {
|
||||
var opts ObjectOptions
|
||||
|
@ -1706,38 +1709,26 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||
return err
|
||||
}
|
||||
header := &zip.FileHeader{
|
||||
Name: strings.TrimPrefix(objectName, args.Prefix),
|
||||
Method: zip.Deflate,
|
||||
Flags: 1 << 11,
|
||||
Modified: info.ModTime,
|
||||
Name: strings.TrimPrefix(objectName, args.Prefix),
|
||||
Method: zip.Deflate,
|
||||
Flags: 1 << 11,
|
||||
Modified: info.ModTime,
|
||||
UncompressedSize64: uint64(info.Size),
|
||||
}
|
||||
if hasStringSuffixInSlice(info.Name, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, info.ContentType) {
|
||||
if info.Size < 20 || hasStringSuffixInSlice(info.Name, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, info.ContentType) {
|
||||
// We strictly disable compression for standard extensions/content-types.
|
||||
header.Method = zip.Store
|
||||
}
|
||||
writer, err := archive.CreateHeader(header)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, errUnexpected)
|
||||
return err
|
||||
}
|
||||
httpWriter := ioutil.WriteOnClose(writer)
|
||||
|
||||
// Write object content to response body
|
||||
if _, err = io.Copy(httpWriter, gr); err != nil {
|
||||
httpWriter.Close()
|
||||
if !httpWriter.HasWritten() { // write error response only if no data or headers has been written to client yet
|
||||
writeWebErrorResponse(w, err)
|
||||
}
|
||||
if _, err = io.Copy(writer, gr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = httpWriter.Close(); err != nil {
|
||||
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
|
||||
writeWebErrorResponse(w, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectAccessedGet,
|
||||
|
|
|
@ -438,6 +438,44 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
|||
return fi, nil
|
||||
}
|
||||
|
||||
func (z *xlMetaV2) SharedDataDirCountStr(versionID, dataDir string) int {
|
||||
var (
|
||||
uv uuid.UUID
|
||||
ddir uuid.UUID
|
||||
err error
|
||||
)
|
||||
if versionID == nullVersionID {
|
||||
versionID = ""
|
||||
}
|
||||
if versionID != "" {
|
||||
uv, err = uuid.Parse(versionID)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
ddir, err = uuid.Parse(dataDir)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return z.SharedDataDirCount(uv, ddir)
|
||||
}
|
||||
|
||||
func (z *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int {
|
||||
var sameDataDirCount int
|
||||
for _, version := range z.Versions {
|
||||
switch version.Type {
|
||||
case ObjectType:
|
||||
if version.ObjectV2.VersionID == versionID {
|
||||
continue
|
||||
}
|
||||
if version.ObjectV2.DataDir == dataDir {
|
||||
sameDataDirCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
return sameDataDirCount
|
||||
}
|
||||
|
||||
// DeleteVersion deletes the version specified by version id.
|
||||
// returns to the caller which dataDir to delete, also
|
||||
// indicates if this is the last version.
|
||||
|
@ -545,19 +583,6 @@ func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
|
|||
}
|
||||
}
|
||||
|
||||
findDataDir := func(dataDir [16]byte, versions []xlMetaV2Version) int {
|
||||
var sameDataDirCount int
|
||||
for _, version := range versions {
|
||||
switch version.Type {
|
||||
case ObjectType:
|
||||
if bytes.Equal(version.ObjectV2.DataDir[:], dataDir[:]) {
|
||||
sameDataDirCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
return sameDataDirCount
|
||||
}
|
||||
|
||||
for i, version := range z.Versions {
|
||||
if !version.Valid() {
|
||||
return "", false, errFileCorrupt
|
||||
|
@ -570,7 +595,7 @@ func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
|
|||
return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil
|
||||
}
|
||||
z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
|
||||
if findDataDir(version.ObjectV2.DataDir, z.Versions) > 0 {
|
||||
if z.SharedDataDirCount(version.ObjectV2.VersionID, version.ObjectV2.DataDir) > 0 {
|
||||
if fi.Deleted {
|
||||
z.Versions = append(z.Versions, ventry)
|
||||
}
|
||||
|
@ -612,7 +637,7 @@ func (z xlMetaV2) TotalSize() int64 {
|
|||
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
|
||||
// but waiting to be replicated
|
||||
func (z xlMetaV2) ListVersions(volume, path string) ([]FileInfo, time.Time, error) {
|
||||
var versions []FileInfo
|
||||
versions := make([]FileInfo, 0, len(z.Versions))
|
||||
var err error
|
||||
|
||||
for _, version := range z.Versions {
|
||||
|
@ -703,7 +728,6 @@ func (z xlMetaV2) ToFileInfo(volume, path, versionID string) (fi FileInfo, err e
|
|||
fi.IsLatest = true
|
||||
fi.NumVersions = len(orderedVersions)
|
||||
return fi, err
|
||||
|
||||
}
|
||||
return FileInfo{}, errFileNotFound
|
||||
}
|
||||
|
|
|
@ -287,7 +287,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
|||
_, _ = rand.Read(rnd[:])
|
||||
tmpFile := ".writable-check-" + hex.EncodeToString(rnd[:]) + ".tmp"
|
||||
filePath := pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile)
|
||||
w, err := disk.OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
||||
w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
@ -347,6 +347,8 @@ func (s *xlStorage) IsLocal() bool {
|
|||
|
||||
// Retrieve location indexes.
|
||||
func (s *xlStorage) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
// If unset, see if we can locate it.
|
||||
if s.poolIndex < 0 || s.setIndex < 0 || s.diskIndex < 0 {
|
||||
return getXLDiskLoc(s.diskID)
|
||||
|
@ -391,6 +393,10 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache) (dataUs
|
|||
|
||||
// return initialized object layer
|
||||
objAPI := newObjectLayerFn()
|
||||
// object layer not initialized, return.
|
||||
if objAPI == nil {
|
||||
return cache, errServerNotInitialized
|
||||
}
|
||||
|
||||
globalHealConfigMu.Lock()
|
||||
healOpts := globalHealConfig
|
||||
|
@ -428,13 +434,10 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache) (dataUs
|
|||
sizeS := sizeSummary{}
|
||||
for _, version := range fivs.Versions {
|
||||
oi := version.ToObjectInfo(item.bucket, item.objectPath())
|
||||
if objAPI != nil {
|
||||
totalSize += item.applyActions(ctx, objAPI, actionMeta{
|
||||
oi: oi,
|
||||
bitRotScan: healOpts.Bitrot,
|
||||
})
|
||||
item.healReplication(ctx, objAPI, oi.Clone(), &sizeS)
|
||||
}
|
||||
totalSize += item.applyActions(ctx, objAPI, actionMeta{
|
||||
oi: oi,
|
||||
bitRotScan: healOpts.Bitrot,
|
||||
}, &sizeS)
|
||||
}
|
||||
sizeS.totalSize = totalSize
|
||||
return sizeS, nil
|
||||
|
@ -902,6 +905,7 @@ func (s *xlStorage) WriteMetadata(ctx context.Context, volume, path string, fi F
|
|||
if err = xlMeta.Load(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = xlMeta.AddVersion(fi); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1032,11 +1036,13 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
|
|||
}
|
||||
|
||||
func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirectIO bool) (buf []byte, err error) {
|
||||
var f *os.File
|
||||
var r io.ReadCloser
|
||||
if requireDirectIO {
|
||||
f, err = disk.OpenFileDirectIO(filePath, readMode, 0666)
|
||||
var f *os.File
|
||||
f, err = OpenFileDirectIO(filePath, readMode, 0666)
|
||||
r = &odirectReader{f, nil, nil, true, true, s, nil}
|
||||
} else {
|
||||
f, err = OpenFile(filePath, readMode, 0)
|
||||
r, err = OpenFile(filePath, readMode, 0)
|
||||
}
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
|
@ -1071,10 +1077,8 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
|
|||
return nil, err
|
||||
}
|
||||
|
||||
or := &odirectReader{f, nil, nil, true, true, s, nil}
|
||||
defer or.Close()
|
||||
|
||||
buf, err = ioutil.ReadAll(or)
|
||||
defer r.Close()
|
||||
buf, err = ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
err = osErrToFileErr(err)
|
||||
}
|
||||
|
@ -1247,7 +1251,7 @@ type odirectReader struct {
|
|||
|
||||
// Read - Implements Reader interface.
|
||||
func (o *odirectReader) Read(buf []byte) (n int, err error) {
|
||||
if o.err != nil {
|
||||
if o.err != nil && (len(o.buf) == 0 || o.freshRead) {
|
||||
return 0, o.err
|
||||
}
|
||||
if o.buf == nil {
|
||||
|
@ -1274,20 +1278,22 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
|
|||
}
|
||||
}
|
||||
if n == 0 {
|
||||
// err is io.EOF
|
||||
// err is likely io.EOF
|
||||
o.err = err
|
||||
return n, err
|
||||
}
|
||||
o.err = err
|
||||
o.buf = o.buf[:n]
|
||||
o.freshRead = false
|
||||
}
|
||||
if len(buf) >= len(o.buf) {
|
||||
n = copy(buf, o.buf)
|
||||
o.freshRead = true
|
||||
return n, nil
|
||||
return n, o.err
|
||||
}
|
||||
n = copy(buf, o.buf)
|
||||
o.buf = o.buf[n:]
|
||||
// There is more left in buffer, do not return any EOF yet.
|
||||
return n, nil
|
||||
}
|
||||
|
||||
|
@ -1321,7 +1327,7 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
|
|||
var file *os.File
|
||||
// O_DIRECT only supported if offset is zero
|
||||
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {
|
||||
file, err = disk.OpenFileDirectIO(filePath, readMode, 0666)
|
||||
file, err = OpenFileDirectIO(filePath, readMode, 0666)
|
||||
} else {
|
||||
// Open the file for reading.
|
||||
file, err = OpenFile(filePath, readMode, 0666)
|
||||
|
@ -1441,7 +1447,7 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
|||
if fileSize >= 0 && fileSize <= smallFileThreshold {
|
||||
// For streams smaller than 128KiB we simply write them as O_DSYNC (fdatasync)
|
||||
// and not O_DIRECT to avoid the complexities of aligned I/O.
|
||||
w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC)
|
||||
w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1615,6 +1621,9 @@ func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) e
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.RLock()
|
||||
formatLegacy := s.formatLegacy
|
||||
s.RUnlock()
|
||||
|
||||
var checkFile func(p string) error
|
||||
checkFile = func(p string) error {
|
||||
|
@ -1626,10 +1635,10 @@ func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) e
|
|||
if err := checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st, _ := Lstat(filePath)
|
||||
if st == nil {
|
||||
if !s.formatLegacy {
|
||||
|
||||
if !formatLegacy {
|
||||
return errPathNotFound
|
||||
}
|
||||
|
||||
|
@ -1880,10 +1889,13 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
|||
legacyPreserved = true
|
||||
}
|
||||
} else {
|
||||
s.RLock()
|
||||
formatLegacy := s.formatLegacy
|
||||
s.RUnlock()
|
||||
// It is possible that some drives may not have `xl.meta` file
|
||||
// in such scenarios verify if atleast `part.1` files exist
|
||||
// to verify for legacy version.
|
||||
if s.formatLegacy {
|
||||
if formatLegacy {
|
||||
// We only need this code if we are moving
|
||||
// from `xl.json` to `xl.meta`, we can avoid
|
||||
// one extra readdir operation here for all
|
||||
|
@ -1946,9 +1958,11 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
|||
// return the latest "null" versionId info
|
||||
ofi, err := xlMeta.ToFileInfo(dstVolume, dstPath, nullVersionID)
|
||||
if err == nil && !ofi.Deleted {
|
||||
// Purge the destination path as we are not preserving anything
|
||||
// versioned object was not requested.
|
||||
oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir)
|
||||
if xlMeta.SharedDataDirCountStr(nullVersionID, ofi.DataDir) == 0 {
|
||||
// Purge the destination path as we are not preserving anything
|
||||
// versioned object was not requested.
|
||||
oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1967,9 +1981,11 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
|||
|
||||
// Commit data
|
||||
if srcDataPath != "" {
|
||||
if oldDstDataPath != "" {
|
||||
tmpuuid := mustGetUUID()
|
||||
renameAll(oldDstDataPath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
}
|
||||
tmpuuid := mustGetUUID()
|
||||
renameAll(oldDstDataPath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
tmpuuid = mustGetUUID()
|
||||
renameAll(dstDataPath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
if err = renameAll(srcDataPath, dstDataPath); err != nil {
|
||||
return osErrToFileErr(err)
|
||||
|
|
|
@ -12,6 +12,7 @@ These metrics can be from any MinIO server once per collection.
|
|||
|`minio_bucket_replication_pending_bytes` |Total bytes pending to replicate. |
|
||||
|`minio_bucket_replication_received_bytes` |Total number of bytes replicated to this bucket from another source bucket. |
|
||||
|`minio_bucket_replication_sent_bytes` |Total number of bytes replicated to the target bucket. |
|
||||
|`minio_bucket_replication_pending_count` |Total number of replication operations pending for this bucket. |
|
||||
|`minio_bucket_usage_object_total` |Total number of objects |
|
||||
|`minio_bucket_usage_total_bytes` |Total bucket size in bytes |
|
||||
|`minio_cache_hits_total` |Total number of disk cache hits |
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/jwt"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -83,6 +84,13 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// AccountOn indicates that credentials are enabled
|
||||
AccountOn = "on"
|
||||
// AccountOff indicates that credentials are disabled
|
||||
AccountOff = "off"
|
||||
)
|
||||
|
||||
// Credentials holds access and secret keys.
|
||||
type Credentials struct {
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
|
||||
|
@ -203,16 +211,34 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string)
|
|||
for i := 0; i < accessKeyMaxLen; i++ {
|
||||
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
|
||||
}
|
||||
cred.AccessKey = string(keyBytes)
|
||||
accessKey := string(keyBytes)
|
||||
|
||||
// Generate secret key.
|
||||
keyBytes, err = readBytes(secretKeyMaxLen)
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
cred.SecretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
|
||||
|
||||
secretKey := strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
|
||||
"/", "+", -1)
|
||||
cred.Status = "on"
|
||||
|
||||
return CreateNewCredentialsWithMetadata(accessKey, secretKey, m, tokenSecret)
|
||||
}
|
||||
|
||||
// CreateNewCredentialsWithMetadata - creates new credentials using the specified access & secret keys
|
||||
// and generate a session token if a secret token is provided.
|
||||
func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]interface{}, tokenSecret string) (cred Credentials, err error) {
|
||||
if len(accessKey) < accessKeyMinLen || len(accessKey) > accessKeyMaxLen {
|
||||
return Credentials{}, fmt.Errorf("access key length should be between %d and %d", accessKeyMinLen, accessKeyMaxLen)
|
||||
}
|
||||
|
||||
if len(secretKey) < secretKeyMinLen || len(secretKey) > secretKeyMaxLen {
|
||||
return Credentials{}, fmt.Errorf("secret key length should be between %d and %d", secretKeyMinLen, secretKeyMaxLen)
|
||||
}
|
||||
|
||||
cred.AccessKey = accessKey
|
||||
cred.SecretKey = secretKey
|
||||
cred.Status = AccountOn
|
||||
|
||||
if tokenSecret == "" {
|
||||
cred.Expiration = timeSentinel
|
||||
|
@ -223,12 +249,9 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string)
|
|||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
|
||||
m["accessKey"] = cred.AccessKey
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m))
|
||||
|
||||
cred.Expiration = time.Unix(expiry, 0).UTC()
|
||||
cred.SessionToken, err = jwt.SignedString([]byte(tokenSecret))
|
||||
|
||||
cred.SessionToken, err = JWTSignWithAccessKey(cred.AccessKey, m, tokenSecret)
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
|
@ -236,6 +259,31 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string)
|
|||
return cred, nil
|
||||
}
|
||||
|
||||
// JWTSignWithAccessKey - generates a session token.
|
||||
func JWTSignWithAccessKey(accessKey string, m map[string]interface{}, tokenSecret string) (string, error) {
|
||||
m["accessKey"] = accessKey
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m))
|
||||
return jwt.SignedString([]byte(tokenSecret))
|
||||
}
|
||||
|
||||
// ExtractClaims extracts JWT claims from a security token using a secret key
|
||||
func ExtractClaims(token, secretKey string) (*jwt.MapClaims, error) {
|
||||
if token == "" || secretKey == "" {
|
||||
return nil, errors.New("invalid argument")
|
||||
}
|
||||
|
||||
claims := jwt.NewMapClaims()
|
||||
stsTokenCallback := func(claims *jwt.MapClaims) ([]byte, error) {
|
||||
return []byte(secretKey), nil
|
||||
}
|
||||
|
||||
if err := jwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// GetNewCredentials generates and returns new credential.
|
||||
func GetNewCredentials() (cred Credentials, err error) {
|
||||
return GetNewCredentialsWithMetadata(map[string]interface{}{}, "")
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/bandwidth"
|
||||
"github.com/minio/minio/pkg/pubsub"
|
||||
)
|
||||
|
||||
// throttleBandwidth gets the throttle for bucket with the configured value
|
||||
|
@ -39,26 +38,6 @@ func (m *Monitor) throttleBandwidth(ctx context.Context, bucket string, bandwidt
|
|||
return throttle
|
||||
}
|
||||
|
||||
// SubscribeToBuckets subscribes to buckets. Empty array for monitoring all buckets.
|
||||
func (m *Monitor) SubscribeToBuckets(subCh chan interface{}, doneCh <-chan struct{}, buckets []string) {
|
||||
m.pubsub.Subscribe(subCh, doneCh, func(f interface{}) bool {
|
||||
if buckets != nil || len(buckets) == 0 {
|
||||
return true
|
||||
}
|
||||
report, ok := f.(*bandwidth.Report)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, b := range buckets {
|
||||
_, ok := report.BucketStats[b]
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// Monitor implements the monitoring for bandwidth measurements.
|
||||
type Monitor struct {
|
||||
lock sync.Mutex // lock for all updates
|
||||
|
@ -67,12 +46,8 @@ type Monitor struct {
|
|||
|
||||
bucketMovingAvgTicker *time.Ticker // Ticker for calculating moving averages
|
||||
|
||||
pubsub *pubsub.PubSub // PubSub for reporting bandwidths.
|
||||
|
||||
bucketThrottle map[string]*throttle
|
||||
|
||||
startProcessing sync.Once
|
||||
|
||||
doneCh <-chan struct{}
|
||||
}
|
||||
|
||||
|
@ -81,10 +56,10 @@ func NewMonitor(doneCh <-chan struct{}) *Monitor {
|
|||
m := &Monitor{
|
||||
activeBuckets: make(map[string]*bucketMeasurement),
|
||||
bucketMovingAvgTicker: time.NewTicker(2 * time.Second),
|
||||
pubsub: pubsub.New(),
|
||||
bucketThrottle: make(map[string]*throttle),
|
||||
doneCh: doneCh,
|
||||
}
|
||||
go m.trackEWMA()
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -123,20 +98,24 @@ func (m *Monitor) getReport(selectBucket SelectionFunction) *bandwidth.Report {
|
|||
if !selectBucket(bucket) {
|
||||
continue
|
||||
}
|
||||
bucketThrottle, ok := m.bucketThrottle[bucket]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
report.BucketStats[bucket] = bandwidth.Details{
|
||||
LimitInBytesPerSecond: m.bucketThrottle[bucket].clusterBandwidth,
|
||||
LimitInBytesPerSecond: bucketThrottle.clusterBandwidth,
|
||||
CurrentBandwidthInBytesPerSecond: bucketMeasurement.getExpMovingAvgBytesPerSecond(),
|
||||
}
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
func (m *Monitor) process(doneCh <-chan struct{}) {
|
||||
func (m *Monitor) trackEWMA() {
|
||||
for {
|
||||
select {
|
||||
case <-m.bucketMovingAvgTicker.C:
|
||||
m.processAvg()
|
||||
case <-doneCh:
|
||||
m.updateMovingAvg()
|
||||
case <-m.doneCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -151,24 +130,19 @@ func (m *Monitor) getBucketMeasurement(bucket string, initTime time.Time) *bucke
|
|||
return bucketTracker
|
||||
}
|
||||
|
||||
func (m *Monitor) processAvg() {
|
||||
func (m *Monitor) updateMovingAvg() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
for _, bucketMeasurement := range m.activeBuckets {
|
||||
bucketMeasurement.updateExponentialMovingAverage(time.Now())
|
||||
}
|
||||
m.pubsub.Publish(m.getReport(SelectBuckets()))
|
||||
}
|
||||
|
||||
// track returns the measurement object for bucket and object
|
||||
func (m *Monitor) track(bucket string, object string, timeNow time.Time) *bucketMeasurement {
|
||||
func (m *Monitor) track(bucket string, object string) *bucketMeasurement {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.startProcessing.Do(func() {
|
||||
go m.process(m.doneCh)
|
||||
})
|
||||
b := m.getBucketMeasurement(bucket, timeNow)
|
||||
return b
|
||||
return m.getBucketMeasurement(bucket, time.Now())
|
||||
}
|
||||
|
||||
// DeleteBucket deletes monitoring the 'bucket'
|
||||
|
|
|
@ -20,67 +20,61 @@ package bandwidth
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MonitoredReader monitors the bandwidth
|
||||
type MonitoredReader struct {
|
||||
bucket string // Token to track bucket
|
||||
opts *MonitorReaderOptions
|
||||
bucketMeasurement *bucketMeasurement // bucket measurement object
|
||||
object string // Token to track object
|
||||
reader io.ReadCloser // Reader to wrap
|
||||
lastStop time.Time // Last timestamp for a measurement
|
||||
headerSize int // Size of the header not captured by reader
|
||||
reader io.Reader // Reader to wrap
|
||||
throttle *throttle // throttle the rate at which replication occur
|
||||
monitor *Monitor // Monitor reference
|
||||
closed bool // Reader is closed
|
||||
lastErr error // last error reported, if this non-nil all reads will fail.
|
||||
}
|
||||
|
||||
// NewMonitoredReader returns a io.ReadCloser that reports bandwidth details.
|
||||
// The supplied reader will be closed.
|
||||
func NewMonitoredReader(ctx context.Context, monitor *Monitor, bucket string, object string, reader io.ReadCloser, headerSize int, bandwidthBytesPerSecond int64, clusterBandwidth int64) *MonitoredReader {
|
||||
timeNow := time.Now()
|
||||
b := monitor.track(bucket, object, timeNow)
|
||||
// MonitorReaderOptions provides configurable options for monitor reader implementation.
|
||||
type MonitorReaderOptions struct {
|
||||
Bucket string
|
||||
Object string
|
||||
HeaderSize int
|
||||
BandwidthBytesPerSec int64
|
||||
ClusterBandwidth int64
|
||||
}
|
||||
|
||||
// NewMonitoredReader returns a io.Reader that reports bandwidth details.
|
||||
func NewMonitoredReader(ctx context.Context, monitor *Monitor, reader io.Reader, opts *MonitorReaderOptions) *MonitoredReader {
|
||||
return &MonitoredReader{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
bucketMeasurement: b,
|
||||
opts: opts,
|
||||
bucketMeasurement: monitor.track(opts.Bucket, opts.Object),
|
||||
reader: reader,
|
||||
lastStop: timeNow,
|
||||
headerSize: headerSize,
|
||||
throttle: monitor.throttleBandwidth(ctx, bucket, bandwidthBytesPerSecond, clusterBandwidth),
|
||||
throttle: monitor.throttleBandwidth(ctx, opts.Bucket, opts.BandwidthBytesPerSec, opts.ClusterBandwidth),
|
||||
monitor: monitor,
|
||||
}
|
||||
}
|
||||
|
||||
// Read wraps the read reader
|
||||
func (m *MonitoredReader) Read(p []byte) (n int, err error) {
|
||||
if m.closed {
|
||||
err = io.ErrClosedPipe
|
||||
if m.lastErr != nil {
|
||||
err = m.lastErr
|
||||
return
|
||||
}
|
||||
|
||||
p = p[:m.throttle.GetLimitForBytes(int64(len(p)))]
|
||||
|
||||
n, err = m.reader.Read(p)
|
||||
stop := time.Now()
|
||||
update := uint64(n + m.headerSize)
|
||||
if err != nil {
|
||||
m.lastErr = err
|
||||
}
|
||||
|
||||
m.bucketMeasurement.incrementBytes(update)
|
||||
m.lastStop = stop
|
||||
unused := len(p) - (n + m.headerSize)
|
||||
m.headerSize = 0 // Set to 0 post first read
|
||||
update := n + m.opts.HeaderSize
|
||||
unused := len(p) - update
|
||||
|
||||
m.bucketMeasurement.incrementBytes(uint64(update))
|
||||
m.opts.HeaderSize = 0 // Set to 0 post first read
|
||||
|
||||
if unused > 0 {
|
||||
m.throttle.ReleaseUnusedBandwidth(int64(unused))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Close stops tracking the io
|
||||
func (m *MonitoredReader) Close() error {
|
||||
if m.closed {
|
||||
return nil
|
||||
}
|
||||
m.closed = true
|
||||
return m.reader.Close()
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package lifecycle
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -71,7 +72,8 @@ func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err e
|
|||
switch start.Name.Local {
|
||||
case "LifecycleConfiguration", "BucketLifecycleConfiguration":
|
||||
default:
|
||||
return errUnknownXMLTag
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <LifecycleConfiguration>/<BucketLifecycleConfiguration> but have <%s>",
|
||||
start.Name.Local))
|
||||
}
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
|
@ -93,7 +95,7 @@ func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err e
|
|||
}
|
||||
lc.Rules = append(lc.Rules, r)
|
||||
default:
|
||||
return errUnknownXMLTag
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <Rule> but have <%s>", se.Name.Local))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -489,6 +489,41 @@ type ObjectLegalHold struct {
|
|||
Status LegalHoldStatus `xml:"Status,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (l *ObjectLegalHold) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
switch start.Name.Local {
|
||||
case "LegalHold", "ObjectLockLegalHold":
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <LegalHold>/<ObjectLockLegalHold> but have <%s>",
|
||||
start.Name.Local))
|
||||
}
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "Status":
|
||||
var st LegalHoldStatus
|
||||
if err = d.DecodeElement(&st, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Status = st
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <Status> but have <%s>", se.Name.Local))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEmpty returns true if struct is empty
|
||||
func (l *ObjectLegalHold) IsEmpty() bool {
|
||||
return !l.Status.Valid()
|
||||
|
|
|
@ -18,6 +18,7 @@ package lock
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
@ -467,6 +468,23 @@ func TestParseObjectLegalHold(t *testing.T) {
|
|||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><ObjectLockLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></ObjectLockLegalHold>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
// invalid Status key
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><ObjectLockLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><MyStatus>ON</MyStatus></ObjectLockLegalHold>`,
|
||||
expectedErr: errors.New("expected element type <Status> but have <MyStatus>"),
|
||||
expectErr: true,
|
||||
},
|
||||
// invalid XML attr
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><UnknownLegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></UnknownLegalHold>`,
|
||||
expectedErr: errors.New("expected element type <LegalHold>/<ObjectLockLegalHold> but have <UnknownLegalHold>"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>On</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue