Merge branch 'master' into nats-creds-login
This commit is contained in:
commit
8d673eeb93
2
.github/workflows/go-cross.yml
vendored
2
.github/workflows/go-cross.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
2
.github/workflows/go-lint.yml
vendored
2
.github/workflows/go-lint.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
|
@ -56,7 +56,7 @@ $ git push origin my-new-feature
|
|||
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
|
||||
|
||||
## FAQs
|
||||
### How does ``MinIO`` manages dependencies?
|
||||
### How does ``MinIO`` manage dependencies?
|
||||
``MinIO`` uses `go mod` to manage its dependencies.
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
|
|
|
@ -21,10 +21,8 @@ for more complete documentation.
|
|||
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
|
||||
|
||||
```sh
|
||||
podman run \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
minio/minio server /data --console-address ":9001"
|
||||
podman run -p 9000:9000 -p 9001:9001 \
|
||||
quay.io/minio/minio server /data --console-address ":9001"
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
|
||||
|
@ -259,5 +257,5 @@ mc admin update <minio alias, e.g., myminio>
|
|||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
|
||||
# License
|
||||
MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
|
||||
MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) © 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -e
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
|
@ -14,8 +13,6 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
|||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
|
||||
|
||||
export GOGC=25
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
|
@ -88,22 +85,22 @@ function __init__()
|
|||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 60
|
||||
start_minio_3_node 120
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
start_minio_3_node 60
|
||||
start_minio_3_node 120
|
||||
|
||||
rv=$(check_online)
|
||||
if [ "$rv" == "1" ]; then
|
||||
pkill -9 minio
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
|
|
@ -85,7 +85,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
|
@ -122,7 +122,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
vars := mux.Vars(r)
|
||||
bucket := pathClean(vars["bucket"])
|
||||
update := r.URL.Query().Get("update") == "true"
|
||||
update := r.Form.Get("update") == "true"
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
|
@ -130,7 +130,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
|
@ -169,7 +169,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||
target.SourceBucket = bucket
|
||||
var ops []madmin.TargetUpdateType
|
||||
if update {
|
||||
ops = madmin.GetTargetUpdateOps(r.URL.Query())
|
||||
ops = madmin.GetTargetUpdateOps(r.Form)
|
||||
} else {
|
||||
target.Arn = globalBucketTargetSys.getRemoteARN(bucket, &target)
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
|
@ -298,7 +298,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
|
|
172
cmd/admin-handler-utils.go
Normal file
172
cmd/admin-handler-utils.go
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, auth.Credentials{}
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
type AdminError struct {
|
||||
Code string
|
||||
Message string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (ae AdminError) Error() string {
|
||||
return ae.Message
|
||||
}
|
||||
|
||||
func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr APIError
|
||||
switch e := err.(type) {
|
||||
case iampolicy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioMalformedIAMPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case config.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case AdminError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
case errors.Is(err, errIAMActionNotAllowed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMActionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
|
||||
// Tier admin API errors
|
||||
case errors.Is(err, madmin.ErrTierNameEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierNameEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfigVersion",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierTypeUnsupported):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierTypeUnsupported",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
|
||||
// specific error.
|
||||
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
switch err {
|
||||
case errErasureWriteQuorum:
|
||||
return ErrAdminConfigNoQuorum
|
||||
default:
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -28,7 +27,6 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/etcd"
|
||||
|
@ -40,31 +38,13 @@ import (
|
|||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return auth.Credentials{}, nil
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
return cred, objectAPI
|
||||
}
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DeleteConfigKV")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -106,7 +86,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -137,7 +117,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -173,7 +153,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -202,7 +182,7 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -239,7 +219,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -268,7 +248,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -287,7 +267,7 @@ func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *h
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -327,7 +307,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -337,7 +317,7 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
|||
subSys := vars["subSys"]
|
||||
key := vars["key"]
|
||||
|
||||
_, envOnly := r.URL.Query()["env"]
|
||||
_, envOnly := r.Form["env"]
|
||||
|
||||
rd, err := GetHelp(subSys, key, envOnly)
|
||||
if err != nil {
|
||||
|
@ -355,7 +335,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -380,7 +360,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg, objectAPI.SetDriveCounts()); err != nil {
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -407,7 +387,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
|
@ -29,40 +28,18 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -100,7 +77,7 @@ func (a adminAPIHandlers) ListBucketUsers(w http.ResponseWriter, r *http.Request
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -136,7 +113,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -234,7 +211,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -279,7 +256,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -308,7 +285,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -334,7 +311,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -371,7 +348,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -851,7 +828,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
|||
|
||||
var targetAccount string
|
||||
|
||||
user := r.URL.Query().Get("user")
|
||||
user := r.Form.Get("user")
|
||||
if user != "" {
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
|
@ -997,7 +974,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
|||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// Check if we are asked to return prefix usage
|
||||
enablePrefixUsage := r.URL.Query().Get("prefix-usage") == "true"
|
||||
enablePrefixUsage := r.Form.Get("prefix-usage") == "true"
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
|
@ -1144,7 +1121,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1169,7 +1146,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1205,7 +1182,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1239,7 +1216,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1267,7 +1244,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1319,7 +1296,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
|||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
@ -42,10 +41,7 @@ import (
|
|||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/kes"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -373,10 +369,10 @@ func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {
|
|||
}
|
||||
for k, v := range peerLock.Locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
if val, ok := entryMap[lockReqInfo.Name]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
entryMap[lockReqInfo.Name] = lriToLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -450,7 +446,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
count := 10 // by default list only top 10 entries
|
||||
if countStr := r.URL.Query().Get("count"); countStr != "" {
|
||||
if countStr := r.Form.Get("count"); countStr != "" {
|
||||
var err error
|
||||
count, err = strconv.Atoi(countStr)
|
||||
if err != nil {
|
||||
|
@ -458,7 +454,7 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
}
|
||||
stale := r.URL.Query().Get("stale") == "true" // list also stale locks
|
||||
stale := r.Form.Get("stale") == "true" // list also stale locks
|
||||
|
||||
peerLocks := globalNotificationSys.GetLocks(ctx, r)
|
||||
|
||||
|
@ -713,7 +709,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
hip, errCode := extractHealInitParams(mux.Vars(r), r.URL.Query(), r.Body)
|
||||
hip, errCode := extractHealInitParams(mux.Vars(r), r.Form, r.Body)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
return
|
||||
|
@ -926,9 +922,9 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
sizeStr := r.URL.Query().Get(peerRESTSize)
|
||||
durationStr := r.URL.Query().Get(peerRESTDuration)
|
||||
concurrentStr := r.URL.Query().Get(peerRESTConcurrent)
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
|
@ -957,37 +953,6 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
type AdminError struct {
|
||||
Code string
|
||||
Message string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (ae AdminError) Error() string {
|
||||
return ae.Message
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
const (
|
||||
AdminUpdateUnexpectedFailure = "XMinioAdminUpdateUnexpectedFailure"
|
||||
|
@ -995,119 +960,6 @@ const (
|
|||
AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure"
|
||||
)
|
||||
|
||||
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
|
||||
// specific error.
|
||||
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
|
||||
switch err {
|
||||
case errErasureWriteQuorum:
|
||||
return ErrAdminConfigNoQuorum
|
||||
default:
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
if err == nil {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr APIError
|
||||
switch e := err.(type) {
|
||||
case iampolicy.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioMalformedIAMPolicy",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case config.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case AdminError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case errors.Is(err, errConfigNotFound):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}
|
||||
case errors.Is(err, errIAMActionNotAllowed):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMActionNotAllowed",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
}
|
||||
case errors.Is(err, errIAMNotInitialized):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
|
||||
// Tier admin API errors
|
||||
case errors.Is(err, madmin.ErrTierNameEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierNameEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfigVersion",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierTypeUnsupported):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierTypeUnsupported",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
}
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// Returns true if the madmin.TraceInfo should be traced,
|
||||
// false if certain conditions are not met.
|
||||
// - input entry is not of the type *madmin.TraceInfo*
|
||||
|
@ -1157,7 +1009,7 @@ func mustTrace(entry interface{}, opts madmin.ServiceTraceOpts) (shouldTrace boo
|
|||
}
|
||||
|
||||
func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
|
||||
q := r.URL.Query()
|
||||
q := r.Form
|
||||
|
||||
opts.OnlyErrors = q.Get("err") == "true"
|
||||
opts.S3 = q.Get("s3") == "true"
|
||||
|
@ -1259,15 +1111,15 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
|||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
node := r.URL.Query().Get("node")
|
||||
node := r.Form.Get("node")
|
||||
// limit buffered console entries if client requested it.
|
||||
limitStr := r.URL.Query().Get("limit")
|
||||
limitStr := r.Form.Get("limit")
|
||||
limitLines, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
limitLines = 10
|
||||
}
|
||||
|
||||
logKind := r.URL.Query().Get("logType")
|
||||
logKind := r.Form.Get("logType")
|
||||
if logKind == "" {
|
||||
logKind = string(logger.All)
|
||||
}
|
||||
|
@ -1342,7 +1194,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if err := GlobalKMS.CreateKey(r.URL.Query().Get("key-id")); err != nil {
|
||||
if err := GlobalKMS.CreateKey(r.Form.Get("key-id")); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1409,7 +1261,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
keyID := r.URL.Query().Get("key-id")
|
||||
keyID := r.Form.Get("key-id")
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
}
|
||||
|
@ -1576,7 +1428,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
query := r.URL.Query()
|
||||
query := r.Form
|
||||
healthInfo := madmin.HealthInfo{Version: madmin.HealthInfoVersion}
|
||||
healthInfoCh := make(chan madmin.HealthInfo)
|
||||
|
||||
|
@ -1600,7 +1452,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
deadline := 1 * time.Hour
|
||||
if dstr := r.URL.Query().Get("deadline"); dstr != "" {
|
||||
if dstr := r.Form.Get("deadline"); dstr != "" {
|
||||
var err error
|
||||
deadline, err = time.ParseDuration(dstr)
|
||||
if err != nil {
|
||||
|
@ -1715,6 +1567,38 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
}
|
||||
|
||||
getAndWriteSysConfig := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysConfig)) == "true" {
|
||||
localSysConfig := madmin.GetSysConfig(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysConfig)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, localSysConfig)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysConfig := globalNotificationSys.GetSysConfig(deadlinedCtx)
|
||||
for _, sc := range peerSysConfig {
|
||||
anonymizeAddr(&sc)
|
||||
healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, sc)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
getAndWriteSysServices := func() {
|
||||
if query.Get(string(madmin.HealthDataTypeSysServices)) == "true" {
|
||||
localSysServices := madmin.GetSysServices(deadlinedCtx, globalLocalNodeName)
|
||||
anonymizeAddr(&localSysServices)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, localSysServices)
|
||||
partialWrite(healthInfo)
|
||||
|
||||
peerSysServices := globalNotificationSys.GetSysServices(deadlinedCtx)
|
||||
for _, ss := range peerSysServices {
|
||||
anonymizeAddr(&ss)
|
||||
healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, ss)
|
||||
}
|
||||
partialWrite(healthInfo)
|
||||
}
|
||||
}
|
||||
|
||||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
|
@ -1889,6 +1773,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
getAndWriteDrivePerfInfo()
|
||||
getAndWriteNetPerfInfo()
|
||||
getAndWriteSysErrors()
|
||||
getAndWriteSysServices()
|
||||
getAndWriteSysConfig()
|
||||
|
||||
if query.Get("minioinfo") == "true" {
|
||||
infoMessage := getServerInfo(ctx, r)
|
||||
|
@ -1975,7 +1861,7 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
|||
reportCh := make(chan madmin.BucketBandwidthReport)
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
bucketsRequestedString := r.URL.Query().Get("buckets")
|
||||
bucketsRequestedString := r.Form.Get("buckets")
|
||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
||||
go func() {
|
||||
defer close(reportCh)
|
||||
|
@ -2233,8 +2119,8 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
volume := r.URL.Query().Get("volume")
|
||||
file := r.URL.Query().Get("file")
|
||||
volume := r.Form.Get("volume")
|
||||
file := r.Form.Get("file")
|
||||
if len(volume) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
|
||||
return
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
@ -396,9 +397,6 @@ type healSequence struct {
|
|||
// bucket, and object on which heal seq. was initiated
|
||||
bucket, object string
|
||||
|
||||
// A channel of entities (format, buckets, objects) to heal
|
||||
sourceCh chan healSource
|
||||
|
||||
// A channel of entities with heal result
|
||||
respCh chan healResult
|
||||
|
||||
|
@ -648,11 +646,7 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
|
|||
h.currentStatus.StartTime = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
if h.sourceCh == nil {
|
||||
go h.traverseAndHeal(objAPI)
|
||||
} else {
|
||||
go h.healFromSourceCh()
|
||||
}
|
||||
go h.traverseAndHeal(objAPI)
|
||||
|
||||
select {
|
||||
case err, ok := <-h.traverseAndHealDoneCh:
|
||||
|
@ -696,45 +690,35 @@ func (h *healSequence) logHeal(healType madmin.HealItemType) {
|
|||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
globalHealConfigMu.Lock()
|
||||
opts := globalHealConfig
|
||||
globalHealConfigMu.Unlock()
|
||||
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
respCh: h.respCh,
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
if opts.Bitrot {
|
||||
task.opts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(opts.IOCount, opts.Sleep)
|
||||
task.opts.ScanMode = globalHealConfig.ScanMode()
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
|
||||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
select {
|
||||
case globalBackgroundHealRoutine.tasks <- task:
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and
|
||||
// return the error and not calculate this object
|
||||
// as part of the metrics.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
// Return the error so that caller can handle it.
|
||||
return res.err
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
|
@ -759,11 +743,6 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
|||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted, we should ignore this object and return success.
|
||||
if isErrObjectNotFound(res.err) || isErrVersionNotFound(res.err) {
|
||||
return nil
|
||||
}
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
|
@ -776,48 +755,6 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
|||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) healItemsFromSourceCh() error {
|
||||
for {
|
||||
select {
|
||||
case source, ok := <-h.sourceCh:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var itemType madmin.HealItemType
|
||||
switch source.bucket {
|
||||
case nopHeal:
|
||||
continue
|
||||
case SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
default:
|
||||
if source.object == "" {
|
||||
itemType = madmin.HealItemBucket
|
||||
} else {
|
||||
itemType = madmin.HealItemObject
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
}
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) healFromSourceCh() {
|
||||
h.healItemsFromSourceCh()
|
||||
}
|
||||
|
||||
func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
// Start healing the config prefix.
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
|
@ -862,11 +799,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
|||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemBucketMetadata)
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
@ -915,9 +847,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
|||
// healBucket - traverses and heals given bucket
|
||||
func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly bool) error {
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if bucketsOnly {
|
||||
|
@ -931,9 +861,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
|||
oi, err := objAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
|
||||
if err == nil {
|
||||
if err = h.healObject(bucket, h.object, oi.VersionID); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -943,11 +870,7 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
|||
}
|
||||
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
// Object might have been deleted, by the time heal
|
||||
// was attempted we ignore this object an move on.
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -963,5 +886,9 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
|||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq()
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/klauspost/compress/gzhttp"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -48,16 +49,12 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminAPIVersionPrefix,
|
||||
}
|
||||
|
||||
gz := func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return h
|
||||
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err != nil {
|
||||
// Static params, so this is very unlikely.
|
||||
logger.Fatal(err, "Unable to initialize server")
|
||||
}
|
||||
|
||||
wrapper, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err == nil {
|
||||
gz = func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return wrapper(h).(http.HandlerFunc)
|
||||
}
|
||||
}
|
||||
for _, adminVersion := range adminVersions {
|
||||
// Restart and stop MinIO service.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(gz(httpTraceAll(adminAPI.ServiceHandler))).Queries("action", "{action:.*}")
|
||||
|
@ -207,7 +204,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/speedtest").HandlerFunc(httpTraceHdrs(adminAPI.SpeedtestHandler))
|
||||
|
||||
// HTTP Trace
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(adminAPI.TraceHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(gz(http.HandlerFunc(adminAPI.TraceHandler)))
|
||||
|
||||
// Console Logs
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(gz(httpTraceAll(adminAPI.ConsoleLogHandler)))
|
||||
|
|
|
@ -187,7 +187,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
|||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionTier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
@ -36,11 +37,11 @@ import (
|
|||
|
||||
const (
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
||||
)
|
||||
|
||||
// LocationResponse - format for location response.
|
||||
|
@ -570,6 +571,14 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
|||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
||||
case crypto.S3KMS:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
||||
case crypto.SSEC:
|
||||
content.UserMetadata[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
}
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
@ -208,15 +209,10 @@ func registerAPIRouter(router *mux.Router) {
|
|||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
gz := func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return h
|
||||
}
|
||||
|
||||
wrapper, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err == nil {
|
||||
gz = func(h http.HandlerFunc) http.HandlerFunc {
|
||||
return wrapper(h).(http.HandlerFunc)
|
||||
}
|
||||
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
|
||||
if err != nil {
|
||||
// Static params, so this is very unlikely.
|
||||
logger.Fatal(err, "Unable to initialize server")
|
||||
}
|
||||
|
||||
for _, router := range routers {
|
||||
|
@ -245,7 +241,7 @@ func registerAPIRouter(router *mux.Router) {
|
|||
collectAPIStats("listobjectparts", maxClients(gz(httpTraceAll(api.ListObjectPartsHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("completemutipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
collectAPIStats("completemultipartupload", maxClients(gz(httpTraceAll(api.CompleteMultipartUploadHandler))))).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("newmultipartupload", maxClients(gz(httpTraceAll(api.NewMultipartUploadHandler))))).Queries("uploads", "")
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
@ -61,13 +62,13 @@ func isRequestSignatureV2(r *http.Request) bool {
|
|||
|
||||
// Verify if request has AWS PreSign Version '4'.
|
||||
func isRequestPresignedSignatureV4(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()[xhttp.AmzCredential]
|
||||
_, ok := r.Form[xhttp.AmzCredential]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Verify request has AWS PreSign Version '2'.
|
||||
func isRequestPresignedSignatureV2(r *http.Request) bool {
|
||||
_, ok := r.URL.Query()[xhttp.AmzAccessKeyID]
|
||||
_, ok := r.Form[xhttp.AmzAccessKeyID]
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -102,6 +103,14 @@ const (
|
|||
|
||||
// Get request authentication type.
|
||||
func getRequestAuthType(r *http.Request) authType {
|
||||
if r.URL != nil {
|
||||
var err error
|
||||
r.Form, err = url.ParseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logger.LogIf(r.Context(), err)
|
||||
return authTypeUnknown
|
||||
}
|
||||
}
|
||||
if isRequestSignatureV2(r) {
|
||||
return authTypeSignedV2
|
||||
} else if isRequestPresignedSignatureV2(r) {
|
||||
|
@ -116,7 +125,7 @@ func getRequestAuthType(r *http.Request) authType {
|
|||
return authTypeJWT
|
||||
} else if isRequestPostPolicySignatureV4(r) {
|
||||
return authTypePostPolicy
|
||||
} else if _, ok := r.URL.Query()[xhttp.Action]; ok {
|
||||
} else if _, ok := r.Form[xhttp.Action]; ok {
|
||||
return authTypeSTS
|
||||
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
|
||||
return authTypeAnonymous
|
||||
|
@ -146,12 +155,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
|||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, nil, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, claims, owner, ErrNone
|
||||
return cred, cred.Claims, owner, ErrNone
|
||||
}
|
||||
|
||||
// checkAdminRequestAuth checks for authentication and authorization for the incoming
|
||||
|
@ -183,7 +187,7 @@ func getSessionToken(r *http.Request) (token string) {
|
|||
if token != "" {
|
||||
return token
|
||||
}
|
||||
return r.URL.Query().Get(xhttp.AmzSecurityToken)
|
||||
return r.Form.Get(xhttp.AmzSecurityToken)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
|
@ -309,12 +313,6 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
|||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
// LocationConstraint is valid only for CreateBucketAction.
|
||||
var locationConstraint string
|
||||
if action == policy.CreateBucketAction {
|
||||
|
@ -379,10 +377,10 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
|||
Groups: cred.Groups,
|
||||
Action: iampolicy.Action(action),
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
|
@ -396,10 +394,10 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
|
|||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, owner, ErrNone
|
||||
|
@ -444,7 +442,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
|||
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
|
||||
var contentSHA256 []byte
|
||||
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
|
||||
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
if sha256Sum, ok := r.Form[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
|
||||
contentSHA256, err = hex.DecodeString(sha256Sum[0])
|
||||
if err != nil {
|
||||
return ErrContentSHA256Mismatch
|
||||
|
@ -489,20 +487,7 @@ func setAuthHandler(h http.Handler) http.Handler {
|
|||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if isSupportedS3AuthType(aType) {
|
||||
// Let top level caller validate for anonymous and known signed requests.
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeJWT {
|
||||
// Validate Authorization header if its valid for JWT request.
|
||||
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
} else if aType == authTypeSTS {
|
||||
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
@ -511,75 +496,39 @@ func setAuthHandler(h http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, nil, ErrSignatureVersionNotSupported
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalServerRegion
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, nil, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, claims, ErrNone
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
conditions := getConditionValues(r, "", "", nil)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
|
||||
}
|
||||
if retMode == objectlock.RetGovernance && byPassSet {
|
||||
byPassSet = globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.BypassGovernanceRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
})
|
||||
}
|
||||
if globalPolicySys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectRetentionAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: false,
|
||||
ObjectName: objectName,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
if byPassSet || retSet {
|
||||
return ErrNone
|
||||
}
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, claims)
|
||||
conditions := getConditionValues(r, "", cred.AccessKey, cred.Claims)
|
||||
conditions["object-lock-mode"] = []string{string(retMode)}
|
||||
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
|
||||
if retDays > 0 {
|
||||
|
@ -594,7 +543,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
|||
ObjectName: objectName,
|
||||
ConditionValues: conditions,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
})
|
||||
}
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
|
@ -605,7 +554,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
|||
ConditionValues: conditions,
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
retSet = true
|
||||
}
|
||||
|
@ -634,11 +583,6 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
|||
return s3Err
|
||||
}
|
||||
|
||||
claims, s3Err := checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if cred.AccessKey != "" {
|
||||
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey
|
||||
}
|
||||
|
@ -672,10 +616,10 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
|||
Groups: cred.Groups,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
ObjectName: objectName,
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
return ErrNone
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
req *http.Request
|
||||
authT authType
|
||||
}
|
||||
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
|
||||
testCases := []testCase{
|
||||
// Test case - 1
|
||||
// Check for generic signature v4 header.
|
||||
|
@ -54,6 +55,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
"Content-Encoding": []string{streamingContentEncoding},
|
||||
},
|
||||
Method: http.MethodPut,
|
||||
Body: nopCloser,
|
||||
},
|
||||
authT: authTypeStreamingSigned,
|
||||
},
|
||||
|
@ -113,6 +115,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
"Content-Type": []string{"multipart/form-data"},
|
||||
},
|
||||
Method: http.MethodPost,
|
||||
Body: nopCloser,
|
||||
},
|
||||
authT: authTypePostPolicy,
|
||||
},
|
||||
|
@ -220,6 +223,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
|||
q := inputReq.URL.Query()
|
||||
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
|
||||
inputReq.URL.RawQuery = q.Encode()
|
||||
inputReq.ParseForm()
|
||||
|
||||
actualResult := isRequestPresignedSignatureV2(inputReq)
|
||||
if testCase.expectedResult != actualResult {
|
||||
|
@ -254,6 +258,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
|||
q := inputReq.URL.Query()
|
||||
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
|
||||
inputReq.URL.RawQuery = q.Encode()
|
||||
inputReq.ParseForm()
|
||||
|
||||
actualResult := isRequestPresignedSignatureV4(inputReq)
|
||||
if testCase.expectedResult != actualResult {
|
||||
|
|
|
@ -19,10 +19,9 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
|
@ -35,7 +34,7 @@ type healTask struct {
|
|||
versionID string
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
respCh chan healResult
|
||||
}
|
||||
|
||||
// healResult represents a healing result with a possible error
|
||||
|
@ -46,54 +45,37 @@ type healResult struct {
|
|||
|
||||
// healRoutine receives heal tasks, to heal buckets, objects and format.json
|
||||
type healRoutine struct {
|
||||
tasks chan healTask
|
||||
doneCh chan struct{}
|
||||
tasks chan healTask
|
||||
workers int
|
||||
}
|
||||
|
||||
// Add a new task in the tasks queue
|
||||
func (h *healRoutine) queueHealTask(task healTask) {
|
||||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// At max 10 attempts to wait with 100 millisecond interval before proceeding
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
func systemIO() int {
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
maxIOFn := func() int {
|
||||
return maxIO + int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
|
||||
return int(globalHTTPListen.NumSubscribers()) + int(globalTrace.NumSubscribers())
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq() {
|
||||
var currentIO func() int
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
currentIO = httpServer.GetRequestCount
|
||||
}
|
||||
|
||||
tmpMaxWait := maxWait
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Any requests in progress, delay the heal.
|
||||
for httpServer.GetRequestCount() >= maxIOFn() {
|
||||
if tmpMaxWait > 0 {
|
||||
if tmpMaxWait < waitTick {
|
||||
time.Sleep(tmpMaxWait)
|
||||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
tmpMaxWait = tmpMaxWait - waitTick
|
||||
}
|
||||
if tmpMaxWait <= 0 {
|
||||
if intDataUpdateTracker.debug {
|
||||
logger.Info("waitForLowHTTPReq: waited max %s, resuming", maxWait)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
globalHealConfig.Wait(currentIO, systemIO)
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
|
@ -105,6 +87,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
|||
var err error
|
||||
switch task.bucket {
|
||||
case nopHeal:
|
||||
task.respCh <- healResult{err: errSkipFile}
|
||||
continue
|
||||
case SlashSeparator:
|
||||
res, err = healDiskFormat(ctx, objAPI, task.opts)
|
||||
|
@ -115,10 +98,8 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
|||
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
|
||||
}
|
||||
}
|
||||
task.responseCh <- healResult{result: res, err: err}
|
||||
|
||||
case <-h.doneCh:
|
||||
return
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
@ -126,9 +107,13 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
|||
}
|
||||
|
||||
func newHealRoutine() *healRoutine {
|
||||
workers := runtime.GOMAXPROCS(0) / 2
|
||||
if workers == 0 {
|
||||
workers = 4
|
||||
}
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
doneCh: make(chan struct{}),
|
||||
tasks: make(chan healTask),
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -308,14 +308,6 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
|||
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the background healer
|
||||
globalBackgroundHealRoutine = newHealRoutine()
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
// 1. Only the concerned erasure set will be listed and healed
|
||||
// 2. Only the node hosting the disk is responsible to perform the heal
|
||||
|
@ -337,10 +329,10 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
|||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
if len(healDisks) > 0 {
|
||||
// Reformat disks
|
||||
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
|
||||
bgSeq.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
|
||||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- healSource{bucket: nopHeal}
|
||||
bgSeq.queueHealTask(healSource{bucket: nopHeal}, madmin.HealItemMetadata)
|
||||
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
|
|
@ -57,6 +57,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
|||
b.closeWithErr(err)
|
||||
return n, err
|
||||
}
|
||||
if n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
b.closeWithErr(err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
|
@ -32,6 +33,7 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
"github.com/minio/pkg/env"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -52,8 +54,8 @@ type bootstrapRESTServer struct{}
|
|||
// ServerSystemConfig - captures information about server configuration.
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointServerPools
|
||||
MinioEnv map[string]string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
|
@ -82,15 +84,35 @@ func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
|||
s2.MinioEndpoints[i].Endpoints[j])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if !reflect.DeepEqual(s1.MinioEnv, s2.MinioEnv) {
|
||||
return fmt.Errorf("Expected same MINIO_ environment variables and values")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
envs := env.List("MINIO_")
|
||||
envValues := make(map[string]string, len(envs))
|
||||
for _, envK := range envs {
|
||||
// skip certain environment variables as part
|
||||
// of the whitelist and could be configured
|
||||
// differently on each nodes, update skipEnvs()
|
||||
// map if there are such environment values
|
||||
if _, ok := skipEnvs[envK]; ok {
|
||||
continue
|
||||
}
|
||||
envValues[envK] = env.Get(envK, "")
|
||||
}
|
||||
return ServerSystemConfig{
|
||||
MinioPlatform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
|
||||
MinioEndpoints: globalEndpoints,
|
||||
MinioEnv: envValues,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,7 +210,9 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
|||
// after 5 retries start logging that servers are not reachable yet
|
||||
if retries >= 5 {
|
||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers to be online for bootstrap check", len(clnts)/2))
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
if len(offlineEndpoints) > 0 {
|
||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||
}
|
||||
retries = 0 // reset to log again after 5 retries.
|
||||
}
|
||||
offlineEndpoints = nil
|
||||
|
|
|
@ -19,7 +19,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
|
@ -246,7 +245,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.URL.Query())
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.Form)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
return
|
||||
|
@ -345,9 +344,6 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
|||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
|
@ -357,10 +353,10 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
|||
Groups: cred.Groups,
|
||||
Action: iampolicy.ListBucketAction,
|
||||
BucketName: bucketInfo.Name,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: claims,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
bucketsInfo[n] = bucketInfo
|
||||
n++
|
||||
|
@ -454,6 +450,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var (
|
||||
hasLockEnabled, replicateSync bool
|
||||
goi ObjectInfo
|
||||
|
@ -464,6 +461,9 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
hasLockEnabled = true
|
||||
}
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
oss := make([]*objSweeper, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
|
@ -495,16 +495,24 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
}
|
||||
}
|
||||
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(multiDelete(object))
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
opts := oss[index].GetOpts()
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
if gerr == nil {
|
||||
oss[index].SetTransitionState(goi)
|
||||
opts := ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
}
|
||||
|
||||
if replicateDeletes || object.VersionID != "" && hasLockEnabled || !globalTierConfigMgr.Empty() {
|
||||
if !globalTierConfigMgr.Empty() && object.VersionID == "" && opts.VersionSuspended {
|
||||
opts.VersionID = nullVersionID
|
||||
}
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
}
|
||||
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(opts.VersionID).WithVersioning(versioned, suspended)
|
||||
oss[index].SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
|
||||
if replicateDeletes {
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
ObjectName: object.ObjectName,
|
||||
|
@ -526,18 +534,16 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
}
|
||||
}
|
||||
}
|
||||
if object.VersionID != "" {
|
||||
if hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
if object.VersionID != "" && hasLockEnabled {
|
||||
if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, goi, gerr); apiErrCode != ErrNone {
|
||||
apiErr := errorCodes.ToAPIErr(apiErrCode)
|
||||
dErrs[index] = DeleteError{
|
||||
Code: apiErr.Code,
|
||||
Message: apiErr.Description,
|
||||
Key: object.ObjectName,
|
||||
VersionID: object.VersionID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -559,8 +565,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
|
||||
deleteList := toNames(objectsToDelete)
|
||||
dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
|
||||
Versioned: globalBucketVersioningSys.Enabled(bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
|
||||
Versioned: versioned,
|
||||
VersionSuspended: suspended,
|
||||
})
|
||||
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
|
||||
for i := range errs {
|
||||
|
@ -622,16 +628,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
|
||||
}
|
||||
|
||||
// Clean up transitioned objects from remote tier
|
||||
for _, os := range oss {
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
if dobj.ObjectName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
|
@ -654,6 +656,14 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
}
|
||||
|
||||
// Clean up transitioned objects from remote tier
|
||||
for _, os := range oss {
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
}
|
||||
|
||||
// PutBucketHandler - PUT Bucket
|
||||
|
@ -899,41 +909,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, cred.Claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
|
@ -1699,7 +1685,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
|||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
durationStr := r.URL.Query().Get("older-than")
|
||||
durationStr := r.Form.Get("older-than")
|
||||
var (
|
||||
days time.Duration
|
||||
err error
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -75,8 +75,9 @@ func NewLifecycleSys() *LifecycleSys {
|
|||
}
|
||||
|
||||
type expiryTask struct {
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
objInfo ObjectInfo
|
||||
versionExpiry bool
|
||||
restoredObject bool
|
||||
}
|
||||
|
||||
type expiryState struct {
|
||||
|
@ -84,13 +85,18 @@ type expiryState struct {
|
|||
expiryCh chan expiryTask
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
|
||||
// PendingTasks returns the number of pending ILM expiry tasks.
|
||||
func (es *expiryState) PendingTasks() int {
|
||||
return len(es.expiryCh)
|
||||
}
|
||||
|
||||
func (es *expiryState) queueExpiryTask(oi ObjectInfo, restoredObject bool, rmVersion bool) {
|
||||
select {
|
||||
case <-GlobalContext.Done():
|
||||
es.once.Do(func() {
|
||||
close(es.expiryCh)
|
||||
})
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
|
||||
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion, restoredObject: restoredObject}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
@ -109,15 +115,26 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
|||
globalExpiryState = newExpiryState()
|
||||
go func() {
|
||||
for t := range globalExpiryState.expiryCh {
|
||||
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
|
||||
if t.objInfo.TransitionedObject.Status != "" {
|
||||
applyExpiryOnTransitionedObject(ctx, objectAPI, t.objInfo, t.restoredObject)
|
||||
} else {
|
||||
applyExpiryOnNonTransitionedObjects(ctx, objectAPI, t.objInfo, t.versionExpiry)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type transitionState struct {
|
||||
once sync.Once
|
||||
// add future metrics here
|
||||
once sync.Once
|
||||
transitionCh chan ObjectInfo
|
||||
|
||||
ctx context.Context
|
||||
objAPI ObjectLayer
|
||||
mu sync.Mutex
|
||||
numWorkers int
|
||||
killCh chan struct{}
|
||||
|
||||
activeTasks int32
|
||||
}
|
||||
|
||||
func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
|
@ -132,50 +149,71 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
|||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
globalTransitionConcurrent = runtime.GOMAXPROCS(0) / 2
|
||||
globalTransitionState *transitionState
|
||||
)
|
||||
|
||||
func newTransitionState() *transitionState {
|
||||
// fix minimum concurrent transition to 1 for single CPU setup
|
||||
if globalTransitionConcurrent == 0 {
|
||||
globalTransitionConcurrent = 1
|
||||
}
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
transitionCh: make(chan ObjectInfo, 10000),
|
||||
ctx: ctx,
|
||||
objAPI: objAPI,
|
||||
killCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// addWorker creates a new worker to process tasks
|
||||
func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
// Add a new worker.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// PendingTasks returns the number of ILM transition tasks waiting for a worker
|
||||
// goroutine.
|
||||
func (t *transitionState) PendingTasks() int {
|
||||
return len(globalTransitionState.transitionCh)
|
||||
}
|
||||
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
}
|
||||
// ActiveTasks returns the number of active (ongoing) ILM transition tasks.
|
||||
func (t *transitionState) ActiveTasks() int {
|
||||
return int(atomic.LoadInt32(&t.activeTasks))
|
||||
}
|
||||
|
||||
// worker waits for transition tasks
|
||||
func (t *transitionState) worker(ctx context.Context, objectAPI ObjectLayer) {
|
||||
for {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case oi, ok := <-t.transitionCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, 1)
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
}
|
||||
atomic.AddInt32(&t.activeTasks, -1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateWorkers at the end of this function leaves n goroutines waiting for
|
||||
// transition tasks
|
||||
func (t *transitionState) UpdateWorkers(n int) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
for t.numWorkers < n {
|
||||
go t.worker(t.ctx, t.objAPI)
|
||||
t.numWorkers++
|
||||
}
|
||||
|
||||
for t.numWorkers > n {
|
||||
go func() { t.killCh <- struct{}{} }()
|
||||
t.numWorkers--
|
||||
}
|
||||
}
|
||||
|
||||
func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
|
||||
if globalTransitionState == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start with globalTransitionConcurrent.
|
||||
for i := 0; i < globalTransitionConcurrent; i++ {
|
||||
globalTransitionState.addWorker(ctx, objectAPI)
|
||||
}
|
||||
globalTransitionState = newTransitionState(ctx, objectAPI)
|
||||
n := globalAPIConfig.getTransitionWorkers()
|
||||
globalTransitionState.UpdateWorkers(n)
|
||||
}
|
||||
|
||||
var errInvalidStorageClass = errors.New("invalid storage class")
|
||||
|
@ -214,14 +252,15 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob
|
|||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
opts.Expiration = ExpirationOptions{Expire: true}
|
||||
switch action {
|
||||
case expireObj:
|
||||
// When an object is past expiry or when a transitioned object is being
|
||||
// deleted, 'mark' the data in the remote tier for delete.
|
||||
entry := jentry{
|
||||
ObjName: oi.transitionedObjName,
|
||||
VersionID: oi.transitionVersionID,
|
||||
TierName: oi.TransitionTier,
|
||||
ObjName: oi.TransitionedObject.Name,
|
||||
VersionID: oi.TransitionedObject.VersionID,
|
||||
TierName: oi.TransitionedObject.Tier,
|
||||
}
|
||||
if err := globalTierJournal.AddEntry(entry); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -294,16 +333,17 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo)
|
|||
Tier: lc.TransitionTier(oi.ToLifecycleOpts()),
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
VersionSuspended: globalBucketVersioningSys.Suspended(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionTier)
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("transition storage class not configured")
|
||||
}
|
||||
|
@ -320,7 +360,7 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
|||
gopts.length = length
|
||||
}
|
||||
|
||||
reader, err := tgtClient.Get(ctx, oi.transitionedObjName, remoteVersionID(oi.transitionVersionID), gopts)
|
||||
reader, err := tgtClient.Get(ctx, oi.TransitionedObject.Name, remoteVersionID(oi.TransitionedObject.VersionID), gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -458,7 +498,7 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
|||
func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
vid := strings.TrimSpace(r.URL.Query().Get(xhttp.VersionID))
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
|
@ -544,7 +584,7 @@ func (fi FileInfo) IsRemote() bool {
|
|||
// IsRemote returns true if this object version's contents are in its remote
|
||||
// tier.
|
||||
func (oi ObjectInfo) IsRemote() bool {
|
||||
if oi.TransitionStatus != lifecycle.TransitionComplete {
|
||||
if oi.TransitionedObject.Status != lifecycle.TransitionComplete {
|
||||
return false
|
||||
}
|
||||
return !isRestoredObjectOnDisk(oi.UserDefined)
|
||||
|
@ -672,7 +712,7 @@ func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
|
|||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionStatus,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
RemoteTiersImmediately: globalDebugRemoteTiersImmediately,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listBucketVersions query params to their native values.
|
||||
prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues)
|
||||
|
@ -128,7 +128,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
|||
|
||||
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
|
@ -153,7 +153,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
|
@ -195,7 +195,7 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
|||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
|
@ -220,7 +220,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
urlValues := r.Form
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
|
@ -305,7 +305,7 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
|
|||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
|
@ -329,7 +329,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
|||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
|
||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
|
|
|
@ -175,7 +175,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
|||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool, claims map[string]interface{}) (ObjectInfo, APIErrorCode) {
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) (ObjectInfo, APIErrorCode) {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
|
@ -203,7 +203,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
|||
perm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner, claims)
|
||||
owner)
|
||||
return oi, perm
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
|||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner, claims)
|
||||
byPassSet, r, cred, owner)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
|
@ -227,7 +227,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
|||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner, claims)
|
||||
false, r, cred, owner)
|
||||
return oi, compliancePerm
|
||||
}
|
||||
return oi, ErrNone
|
||||
|
@ -235,7 +235,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket,
|
|||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner, claims)
|
||||
objRetention.Mode, byPassSet, r, cred, owner)
|
||||
return oi, perm
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
|
@ -93,6 +94,52 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
|
|||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling Create Bucket and ensure we get one and only one success.
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucketWithLocation"})
|
||||
}
|
||||
|
||||
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
|
||||
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
var start = make(chan struct{})
|
||||
var ok, errs int
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Sync start.
|
||||
<-start
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
if _, ok := err.(BucketExists); !ok {
|
||||
t.Logf("unexpected error: %T: %v", err, err)
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
errs++
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
ok++
|
||||
mu.Unlock()
|
||||
}()
|
||||
}
|
||||
close(start)
|
||||
wg.Wait()
|
||||
if ok != 1 {
|
||||
t.Fatalf("want 1 ok, got %d", ok)
|
||||
}
|
||||
if errs != n-1 {
|
||||
t.Fatalf("want %d errors, got %d", n-1, errs)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
|
||||
|
|
|
@ -77,10 +77,10 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
|||
}
|
||||
}
|
||||
|
||||
vid := r.URL.Query().Get("versionId")
|
||||
vid := r.Form.Get(xhttp.VersionID)
|
||||
if vid == "" {
|
||||
if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil {
|
||||
vid = u.Query().Get("versionId")
|
||||
vid = u.Query().Get(xhttp.VersionID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,8 +143,8 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
|||
}
|
||||
}
|
||||
|
||||
var cloneURLValues = url.Values{}
|
||||
for k, v := range r.URL.Query() {
|
||||
cloneURLValues := make(url.Values, len(r.Form))
|
||||
for k, v := range r.Form {
|
||||
cloneURLValues[k] = v
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
|
@ -40,7 +40,6 @@ import (
|
|||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const throttleDeadline = 1 * time.Hour
|
||||
|
@ -134,16 +133,7 @@ func getMustReplicateOptions(o ObjectInfo, op replication.Type) mustReplicateOpt
|
|||
|
||||
// mustReplicate returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
|
||||
// a synchronous manner.
|
||||
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, opts mustReplicateOptions) (replicate bool, sync bool) {
|
||||
if s3Err := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, "", r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
|
||||
return
|
||||
}
|
||||
return mustReplicater(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// mustReplicater returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
|
||||
// a synchronous manner.
|
||||
func mustReplicater(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (replicate bool, sync bool) {
|
||||
func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (replicate bool, sync bool) {
|
||||
if globalIsGateway {
|
||||
return replicate, sync
|
||||
}
|
||||
|
@ -316,6 +306,39 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||
return
|
||||
}
|
||||
|
||||
// Lock the object name before starting replication operation.
|
||||
// Use separate lock that doesn't collide with regular objects.
|
||||
lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get lock for object: %s bucket:%s arn:%s", dobj.ObjectName, bucket, rcfg.RoleArn))
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
Host: "Internal: [Replication]",
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
})
|
||||
return
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
// early return if already replicated delete marker for existing object replication
|
||||
if dobj.DeleteMarkerVersionID != "" && dobj.OpType == replication.ExistingObjectReplicationType {
|
||||
_, err := tgt.StatObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.StatObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}})
|
||||
if isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
|
@ -669,6 +692,24 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Lock the object name before starting replication.
|
||||
// Use separate lock that doesn't collide with regular objects.
|
||||
lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+object)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [Replication]",
|
||||
})
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to get lock for object: %s bucket:%s arn:%s", object, bucket, cfg.RoleArn))
|
||||
return
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
var closeOnDefer bool
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
|
@ -726,7 +767,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
if rtype == replicateNone {
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed
|
||||
if objInfo.ReplicationStatus == replication.Pending || objInfo.ReplicationStatus == replication.Failed {
|
||||
if objInfo.ReplicationStatus == replication.Pending || objInfo.ReplicationStatus == replication.Failed || ri.OpType == replication.ExistingObjectReplicationType {
|
||||
// if metadata is not updated for some reason after replication, such as
|
||||
// 503 encountered while updating metadata - make sure to set ReplicationStatus
|
||||
// as Completed.
|
||||
|
@ -742,6 +783,9 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
for k, v := range objInfo.UserDefined {
|
||||
popts.UserDefined[k] = v
|
||||
}
|
||||
if ri.OpType == replication.ExistingObjectReplicationType {
|
||||
popts.UserDefined[xhttp.MinIOReplicationResetStatus] = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), ri.ResetID)
|
||||
}
|
||||
popts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Completed.String()
|
||||
if objInfo.UserTags != "" {
|
||||
popts.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
|
@ -896,16 +940,16 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
|
|||
)
|
||||
|
||||
for _, partInfo := range objInfo.Parts {
|
||||
hr, err = hash.NewReader(r, partInfo.Size, "", "", partInfo.Size)
|
||||
hr, err = hash.NewReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, "", "", opts.ServerSideEncryption)
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, "", "", opts.ServerSideEncryption)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pInfo.Size != partInfo.Size {
|
||||
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.Size)
|
||||
if pInfo.Size != partInfo.ActualSize {
|
||||
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, miniogo.CompletePart{
|
||||
PartNumber: pInfo.PartNumber,
|
||||
|
@ -1386,7 +1430,7 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo) bool {
|
|||
// Ignore previous replication status when deciding if object can be re-replicated
|
||||
objInfo := oi.Clone()
|
||||
objInfo.ReplicationStatus = replication.StatusType("")
|
||||
replicate, _ = mustReplicater(ctx, oi.Bucket, oi.Name, getMustReplicateOptions(objInfo, replication.ExistingObjectReplicationType))
|
||||
replicate, _ = mustReplicate(ctx, oi.Bucket, oi.Name, getMustReplicateOptions(objInfo, replication.ExistingObjectReplicationType))
|
||||
}
|
||||
return c.resync(oi, replicate)
|
||||
}
|
||||
|
|
|
@ -92,8 +92,6 @@ func init() {
|
|||
},
|
||||
})
|
||||
|
||||
globalTransitionState = newTransitionState()
|
||||
|
||||
console.SetColor("Debug", fcolor.New())
|
||||
|
||||
gob.Register(StorageErr(""))
|
||||
|
@ -154,6 +152,9 @@ func minioConfigToConsoleFeatures() {
|
|||
}
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalServerRegion)
|
||||
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
|
||||
if globalSubnetLicense != "" {
|
||||
os.Setenv("CONSOLE_SUBNET_LICENSE", globalSubnetLicense)
|
||||
}
|
||||
}
|
||||
|
||||
func initConsoleServer() (*restapi.Server, error) {
|
||||
|
@ -215,11 +216,6 @@ func initConsoleServer() (*restapi.Server, error) {
|
|||
}
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "gateway") {
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
uiErr := config.ErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
|
||||
|
@ -603,6 +599,8 @@ func handleCommonEnvVars() {
|
|||
if tiers := env.Get("_MINIO_DEBUG_REMOTE_TIERS_IMMEDIATELY", ""); tiers != "" {
|
||||
globalDebugRemoteTiersImmediately = strings.Split(tiers, ",")
|
||||
}
|
||||
|
||||
globalSubnetLicense = env.Get(config.EnvMinIOSubnetLicense, "")
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
|
|
|
@ -227,7 +227,9 @@ var (
|
|||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
func validateConfig(s config.Config, setDriveCounts []int) error {
|
||||
func validateConfig(s config.Config) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
|
||||
// We must have a global lock for this so nobody else modifies env while we do.
|
||||
defer env.LockSetEnv()()
|
||||
|
||||
|
@ -250,7 +252,10 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
|||
}
|
||||
|
||||
if globalIsErasure {
|
||||
for _, setDriveCount := range setDriveCounts {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
for _, setDriveCount := range objAPI.SetDriveCounts() {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -265,7 +270,7 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objAPI := newObjectLayerFn()
|
||||
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
|
@ -325,7 +330,7 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
|
|||
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config, setDriveCounts []int) {
|
||||
func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
||||
ctx := GlobalContext
|
||||
|
||||
var err error
|
||||
|
@ -337,7 +342,15 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
|||
}
|
||||
}
|
||||
|
||||
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
|
||||
dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook)
|
||||
if err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
if err == nil && dnsURL != "" {
|
||||
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
|
||||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
|
@ -412,14 +425,13 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
|||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
|
||||
if globalIsErasure {
|
||||
if globalIsErasure && objAPI != nil {
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
|
@ -531,16 +543,18 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
|
|||
}
|
||||
|
||||
// Apply dynamic config values
|
||||
logger.LogIf(ctx, applyDynamicConfig(ctx, newObjectLayerFn(), s))
|
||||
if err := applyDynamicConfig(ctx, objAPI, s); err != nil {
|
||||
if globalIsGateway {
|
||||
logger.FatalIf(err, "Unable to initialize dynamic configuration")
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// applyDynamicConfig will apply dynamic config values.
|
||||
// Dynamic systems should be in config.SubSystemsDynamic as well.
|
||||
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {
|
||||
if objAPI == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read all dynamic configs.
|
||||
// API
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
|
@ -555,8 +569,10 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
|
|||
}
|
||||
|
||||
// Validate if the object layer supports compression.
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
if objAPI != nil {
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
|
||||
// Heal
|
||||
|
@ -573,15 +589,17 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
|
|||
|
||||
// Apply configurations.
|
||||
// We should not fail after this.
|
||||
globalAPIConfig.init(apiConfig, objAPI.SetDriveCounts())
|
||||
var setDriveCounts []int
|
||||
if objAPI != nil {
|
||||
setDriveCounts = objAPI.SetDriveCounts()
|
||||
}
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
|
||||
globalHealConfigMu.Lock()
|
||||
globalHealConfig = healCfg
|
||||
globalHealConfigMu.Unlock()
|
||||
globalHealConfig.Update(healCfg)
|
||||
|
||||
// update dynamic scanner values.
|
||||
scannerCycle.Update(scannerCfg.Cycle)
|
||||
|
@ -706,7 +724,7 @@ func loadConfig(objAPI ObjectLayer) error {
|
|||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCounts())
|
||||
lookupConfigs(srvCfg, objAPI)
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
|
|
|
@ -155,7 +155,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
|||
data, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCounts())
|
||||
lookupConfigs(srvCfg, objAPI)
|
||||
return srvCfg, nil
|
||||
}
|
||||
return nil, err
|
||||
|
@ -166,7 +166,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
|||
minioMetaBucket: path.Join(minioMetaBucket, configFile),
|
||||
})
|
||||
if err != nil {
|
||||
lookupConfigs(srvCfg, objAPI.SetDriveCounts())
|
||||
lookupConfigs(srvCfg, objAPI)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
// Test cross domain xml handler.
|
||||
func TestCrossXMLHandler(t *testing.T) {
|
||||
// Server initialization.
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||
handler := setCrossDomainPolicy(router)
|
||||
srv := httptest.NewServer(handler)
|
||||
|
||||
|
|
|
@ -57,8 +57,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
globalHealConfig heal.Config
|
||||
globalHealConfigMu sync.Mutex
|
||||
globalHealConfig heal.Config
|
||||
|
||||
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
// Sleeper values are updated when config is loaded.
|
||||
|
@ -147,7 +146,7 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
|||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
|
||||
logger.LogIf(ctx, err)
|
||||
err = objAPI.NSScanner(ctx, bf, results)
|
||||
err = objAPI.NSScanner(ctx, bf, results, uint32(nextBloomCycle))
|
||||
logger.LogIf(ctx, err)
|
||||
if err == nil {
|
||||
// Store new cycle...
|
||||
|
@ -320,7 +319,7 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
|||
console.Debugf(logPrefix+"Finished scanner, %v entries (%+v) %s \n", len(s.newCache.Cache), *s.newCache.sizeRecursive(s.newCache.Info.Name), logSuffix)
|
||||
}
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle++
|
||||
s.newCache.Info.NextCycle = cache.Info.NextCycle
|
||||
return s.newCache, nil
|
||||
}
|
||||
|
||||
|
@ -638,6 +637,13 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
|
||||
}
|
||||
|
||||
if bucket != resolver.bucket {
|
||||
// Bucket might be missing as well with abandoned children.
|
||||
// make sure it is created first otherwise healing won't proceed
|
||||
// for objects.
|
||||
_, _ = objAPI.HealBucket(ctx, bucket, madmin.HealOpts{})
|
||||
}
|
||||
|
||||
resolver.bucket = bucket
|
||||
|
||||
foundObjs := false
|
||||
|
@ -668,13 +674,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if !ok {
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If no errors, queue it for healing.
|
||||
// check if we can get one entry atleast
|
||||
// proceed to heal nonetheless.
|
||||
entry, _ = entries.firstFound()
|
||||
}
|
||||
|
||||
|
@ -698,9 +699,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
object: entry.name,
|
||||
versionID: "",
|
||||
}, madmin.HealItemObject)
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
foundObjs = foundObjs || err == nil
|
||||
return
|
||||
}
|
||||
|
@ -715,9 +714,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
object: fiv.Name,
|
||||
versionID: ver.VersionID,
|
||||
}, madmin.HealItemObject)
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
foundObjs = foundObjs || err == nil
|
||||
}
|
||||
},
|
||||
|
@ -856,30 +853,21 @@ func (i *scannerItem) transformMetaDir() {
|
|||
i.objectName = split[len(split)-1]
|
||||
}
|
||||
|
||||
// actionMeta contains information used to apply actions.
|
||||
type actionMeta struct {
|
||||
oi ObjectInfo
|
||||
bitRotScan bool // indicates if bitrot check was requested.
|
||||
}
|
||||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) {
|
||||
if i.debug {
|
||||
if meta.oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
|
||||
if oi.VersionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), oi.VersionID)
|
||||
} else {
|
||||
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
|
||||
}
|
||||
}
|
||||
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
|
||||
if meta.bitRotScan {
|
||||
healOpts.ScanMode = madmin.HealDeepScan
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return 0
|
||||
healOpts := madmin.HealOpts{
|
||||
Remove: healDeleteDangling,
|
||||
ScanMode: globalHealConfig.ScanMode(),
|
||||
}
|
||||
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), oi.VersionID, healOpts)
|
||||
if err != nil && !errors.Is(err, NotImplemented{}) {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0
|
||||
|
@ -887,8 +875,8 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta acti
|
|||
return res.ObjectSize
|
||||
}
|
||||
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
|
||||
size, err := meta.oi.GetActualSize()
|
||||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (applied bool, size int64) {
|
||||
size, err := oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
@ -900,20 +888,20 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta ac
|
|||
return false, size
|
||||
}
|
||||
|
||||
versionID := meta.oi.VersionID
|
||||
versionID := oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.oi.NumVersions,
|
||||
SuccessorModTime: meta.oi.SuccessorModTime,
|
||||
RestoreOngoing: meta.oi.RestoreOngoing,
|
||||
RestoreExpires: meta.oi.RestoreExpires,
|
||||
TransitionStatus: meta.oi.TransitionStatus,
|
||||
UserTags: oi.UserTags,
|
||||
ModTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
RemoteTiersImmediately: globalDebugRemoteTiersImmediately,
|
||||
})
|
||||
if i.debug {
|
||||
|
@ -924,59 +912,20 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta ac
|
|||
}
|
||||
}
|
||||
switch action {
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
|
||||
case lifecycle.DeleteAction, lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
return applyLifecycleAction(action, oi), 0
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
return applyLifecycleAction(action, oi), size
|
||||
default:
|
||||
// No action.
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
|
||||
}
|
||||
return false, size
|
||||
}
|
||||
|
||||
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
|
||||
VersionID: versionID,
|
||||
})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case MethodNotAllowed: // This happens usually for a delete marker
|
||||
if !obj.DeleteMarker { // if this is not a delete marker log and return
|
||||
// Do nothing - heal in the future.
|
||||
logger.LogIf(ctx, err)
|
||||
return false, size
|
||||
}
|
||||
case ObjectNotFound, VersionNotFound:
|
||||
// object not found or version not found return 0
|
||||
return false, 0
|
||||
default:
|
||||
// All other errors proceed.
|
||||
logger.LogIf(ctx, err)
|
||||
return false, size
|
||||
}
|
||||
}
|
||||
|
||||
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
|
||||
if action != lifecycle.NoneAction {
|
||||
applied = applyLifecycleAction(ctx, action, o, obj)
|
||||
}
|
||||
|
||||
if applied {
|
||||
switch action {
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
return true, size
|
||||
}
|
||||
// For all other lifecycle actions that remove data
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, size
|
||||
}
|
||||
|
||||
// applyTierObjSweep removes remote object pending deletion and the free-version
|
||||
// tracking this information.
|
||||
func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, meta actionMeta) {
|
||||
if !meta.oi.tierFreeVersion {
|
||||
func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
|
||||
if !oi.TransitionedObject.FreeVersion {
|
||||
// nothing to be done
|
||||
return
|
||||
}
|
||||
|
@ -989,18 +938,18 @@ func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, meta
|
|||
return err
|
||||
}
|
||||
// Remove the remote object
|
||||
err := deleteObjectFromRemoteTier(ctx, meta.oi.transitionedObjName, meta.oi.transitionVersionID, meta.oi.TransitionTier)
|
||||
err := deleteObjectFromRemoteTier(ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove this free version
|
||||
opts := ObjectOptions{}
|
||||
opts.VersionID = meta.oi.VersionID
|
||||
_, err = o.DeleteObject(ctx, meta.oi.Bucket, meta.oi.Name, opts)
|
||||
_, err = o.DeleteObject(ctx, oi.Bucket, oi.Name, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
})
|
||||
if err == nil {
|
||||
auditLogLifecycle(ctx, meta.oi, ILMFreeVersionDelete)
|
||||
auditLogLifecycle(ctx, oi, ILMFreeVersionDelete)
|
||||
}
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -1012,19 +961,19 @@ func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, meta
|
|||
// The resulting size on disk will always be returned.
|
||||
// The metadata will be compared to consensus on the object layer before any changes are applied.
|
||||
// If no metadata is supplied, -1 is returned if no action is taken.
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta, sizeS *sizeSummary) int64 {
|
||||
i.applyTierObjSweep(ctx, o, meta)
|
||||
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) int64 {
|
||||
i.applyTierObjSweep(ctx, o, oi)
|
||||
|
||||
applied, size := i.applyLifecycle(ctx, o, meta)
|
||||
applied, size := i.applyLifecycle(ctx, o, oi)
|
||||
// For instance, an applied lifecycle means we remove/transitioned an object
|
||||
// from the current deployment, which means we don't have to call healing
|
||||
// routine even if we are asked to do via heal flag.
|
||||
if !applied {
|
||||
if i.heal {
|
||||
size = i.applyHealing(ctx, o, meta)
|
||||
size = i.applyHealing(ctx, o, oi)
|
||||
}
|
||||
// replicate only if lifecycle rules are not applied.
|
||||
i.healReplication(ctx, o, meta.oi.Clone(), sizeS)
|
||||
i.healReplication(ctx, o, oi.Clone(), sizeS)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
@ -1063,17 +1012,9 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj Ob
|
|||
return action
|
||||
}
|
||||
|
||||
func applyTransitionAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) bool {
|
||||
srcOpts := ObjectOptions{}
|
||||
if obj.TransitionStatus == "" {
|
||||
srcOpts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
|
||||
srcOpts.VersionID = obj.VersionID
|
||||
// mark transition as pending
|
||||
obj.UserDefined[ReservedMetadataPrefixLower+TransitionStatus] = lifecycle.TransitionPending
|
||||
obj.metadataOnly = true // Perform only metadata updates.
|
||||
if obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
func applyTransitionRule(obj ObjectInfo) bool {
|
||||
if obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
globalTransitionState.queueTransitionTask(obj)
|
||||
return true
|
||||
|
@ -1097,7 +1038,9 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
|
|||
}
|
||||
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, applyOnVersion bool) bool {
|
||||
opts := ObjectOptions{}
|
||||
opts := ObjectOptions{
|
||||
Expiration: ExpirationOptions{Expire: true},
|
||||
}
|
||||
|
||||
if applyOnVersion {
|
||||
opts.VersionID = obj.VersionID
|
||||
|
@ -1136,22 +1079,20 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
}
|
||||
|
||||
// Apply object, object version, restored object or restored object version action on the given object
|
||||
func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject, applyOnVersion bool) bool {
|
||||
if obj.TransitionStatus != "" {
|
||||
return applyExpiryOnTransitionedObject(ctx, objLayer, obj, restoredObject)
|
||||
}
|
||||
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion)
|
||||
func applyExpiryRule(obj ObjectInfo, restoredObject, applyOnVersion bool) bool {
|
||||
globalExpiryState.queueExpiryTask(obj, restoredObject, applyOnVersion)
|
||||
return true
|
||||
}
|
||||
|
||||
// Perform actions (removal or transitioning of objects), return true the action is successfully performed
|
||||
func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) {
|
||||
func applyLifecycleAction(action lifecycle.Action, obj ObjectInfo) (success bool) {
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||
success = applyExpiryRule(ctx, objLayer, obj, false, action == lifecycle.DeleteVersionAction)
|
||||
success = applyExpiryRule(obj, false, action == lifecycle.DeleteVersionAction)
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
success = applyExpiryRule(ctx, objLayer, obj, true, action == lifecycle.DeleteRestoredVersionAction)
|
||||
success = applyExpiryRule(obj, true, action == lifecycle.DeleteRestoredVersionAction)
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
success = applyTransitionAction(ctx, action, objLayer, obj)
|
||||
success = applyTransitionRule(obj)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -1173,7 +1114,7 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
|
|||
return
|
||||
}
|
||||
// if replication status is Complete on DeleteMarker and existing object resync required
|
||||
if existingObjResync && oi.ReplicationStatus == replication.Completed {
|
||||
if existingObjResync && (oi.ReplicationStatus == replication.Completed) {
|
||||
i.healReplicationDeletes(ctx, o, oi, existingObjResync)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ const (
|
|||
dataUpdateTrackerQueueSize = 0
|
||||
|
||||
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
|
||||
dataUpdateTrackerVersion = 6
|
||||
dataUpdateTrackerVersion = 7
|
||||
dataUpdateTrackerSaveInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
|
@ -397,7 +397,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
|||
return err
|
||||
}
|
||||
switch tmp[0] {
|
||||
case 1, 2, 3, 4, 5:
|
||||
case 1, 2, 3, 4, 5, 6:
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||
}
|
||||
|
|
|
@ -238,6 +238,24 @@ func (e *dataUsageEntry) removeChild(hash dataUsageHash) {
|
|||
}
|
||||
}
|
||||
|
||||
// Create a clone of the entry.
|
||||
func (e dataUsageEntry) clone() dataUsageEntry {
|
||||
// We operate on a copy from the receiver.
|
||||
if e.Children != nil {
|
||||
ch := make(dataUsageHashMap, len(e.Children))
|
||||
for k, v := range e.Children {
|
||||
ch[k] = v
|
||||
}
|
||||
e.Children = ch
|
||||
}
|
||||
if e.ReplicationStats != nil {
|
||||
// Copy to new struct
|
||||
r := *e.ReplicationStats
|
||||
e.ReplicationStats = &r
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// find a path in the cache.
|
||||
// Returns nil if not found.
|
||||
func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
||||
|
@ -672,7 +690,7 @@ func (d *dataUsageCache) clone() dataUsageCache {
|
|||
Cache: make(map[string]dataUsageEntry, len(d.Cache)),
|
||||
}
|
||||
for k, v := range d.Cache {
|
||||
clone.Cache[k] = v
|
||||
clone.Cache[k] = v.clone()
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
|
|
@ -84,7 +84,8 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
|||
}
|
||||
|
||||
for id, usageInfo := range cache.flattenChildrens(*root) {
|
||||
prefix := strings.TrimPrefix(id, bucket+slashSeparator)
|
||||
prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator))
|
||||
// decodeDirObject to avoid any __XL_DIR__ objects
|
||||
m[prefix] += uint64(usageInfo.Size)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
|||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -423,6 +424,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
|||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
@ -204,7 +203,7 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets
|
|||
|
||||
// Only heal on disks where we are sure that healing is needed. We can expand
|
||||
// this list as and when we figure out more errors can be added to this list safely.
|
||||
func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime time.Time) bool {
|
||||
func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, latestMeta FileInfo) bool {
|
||||
switch {
|
||||
case errors.Is(erErr, errFileNotFound) || errors.Is(erErr, errFileVersionNotFound):
|
||||
return true
|
||||
|
@ -222,7 +221,16 @@ func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime t
|
|||
return true
|
||||
}
|
||||
}
|
||||
if !quorumModTime.Equal(meta.ModTime) {
|
||||
if !latestMeta.MetadataEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.TransitionInfoEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.ReplicationInfoEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.ModTime.Equal(meta.ModTime) {
|
||||
return true
|
||||
}
|
||||
if meta.XLV1 {
|
||||
|
@ -295,6 +303,13 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
availableDisks, dataErrs := disksWithAllParts(ctx, storageDisks, partsMetadata,
|
||||
errs, bucket, object, scanMode)
|
||||
|
||||
// Latest FileInfo for reference. If a valid metadata is not
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
// to be healed.
|
||||
|
@ -325,7 +340,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
driveState = madmin.DriveStateCorrupt
|
||||
}
|
||||
|
||||
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], modTime) {
|
||||
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], latestMeta) {
|
||||
outDatedDisks[i] = storageDisks[i]
|
||||
disksToHealCount++
|
||||
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
||||
|
@ -379,20 +394,12 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// Latest FileInfo for reference. If a valid metadata is not
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
cleanFileInfo := func(fi FileInfo) FileInfo {
|
||||
// Returns a copy of the 'fi' with checksums and parts nil'ed.
|
||||
nfi := fi
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
if fi.IsRemote() {
|
||||
nfi.Parts = nil
|
||||
if !fi.IsRemote() {
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
}
|
||||
return nfi
|
||||
}
|
||||
|
@ -425,16 +432,16 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
|
||||
}
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
if !latestMeta.Deleted && !latestMeta.IsRemote() {
|
||||
result.DataBlocks = latestMeta.Erasure.DataBlocks
|
||||
result.ParityBlocks = latestMeta.Erasure.ParityBlocks
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
// Heal each part. erasureHealFile() will write the healed
|
||||
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
||||
// the final location.
|
||||
|
@ -531,22 +538,21 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
|
||||
// record the index of the updated disks
|
||||
partsMetadata[i].Erasure.Index = i + 1
|
||||
|
||||
// dataDir should be empty when
|
||||
// - transitionStatus is complete and not in restored state
|
||||
if partsMetadata[i].IsRemote() {
|
||||
partsMetadata[i].DataDir = ""
|
||||
}
|
||||
|
||||
// Attempt a rename now from healed data to final location.
|
||||
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Remove any remaining parts from outdated disks from before transition.
|
||||
if partsMetadata[i].IsRemote() {
|
||||
rmDataDir := partsMetadata[i].DataDir
|
||||
disk.DeleteVol(ctx, pathJoin(bucket, encodeDirObject(object), rmDataDir), true)
|
||||
}
|
||||
|
||||
for i, v := range result.Before.Drives {
|
||||
if v.Endpoint == disk.String() {
|
||||
result.After.Drives[i].State = madmin.DriveStateOk
|
||||
|
@ -879,6 +885,12 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
|
|||
|
||||
// HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true.
|
||||
func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) {
|
||||
defer func() {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Create context that also contains information about the object and bucket.
|
||||
// The top level handler might not have this information.
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -145,6 +146,205 @@ func TestHealing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHealingDanglingObject(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resetGlobalHealState()
|
||||
defer resetGlobalHealState()
|
||||
|
||||
nDisks := 16
|
||||
fsDirs, err := getRandomDisks(nDisks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//defer removeRoots(fsDirs)
|
||||
|
||||
// Everything is fine, should return nil
|
||||
objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
object := getRandomObjectName()
|
||||
data := bytes.Repeat([]byte("a"), 128*1024)
|
||||
|
||||
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket - %v", err)
|
||||
}
|
||||
|
||||
// Enable versioning.
|
||||
globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, []byte(`<VersioningConfiguration><Status>Enabled</Status></VersioningConfiguration>`))
|
||||
|
||||
_, err = objLayer.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{
|
||||
Versioned: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0400); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create delete marker under quorum.
|
||||
objInfo, err := objLayer.DeleteObject(ctx, bucket, object, ObjectOptions{Versioned: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPreHeal.NumVersions != 1 {
|
||||
t.Fatalf("Expected versions 1, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
|
||||
if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true},
|
||||
func(bucket, object, vid string) error {
|
||||
_, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{Remove: true})
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPostHeal.NumVersions != 2 {
|
||||
t.Fatalf("Expected versions 2, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
|
||||
if objInfo.DeleteMarker {
|
||||
if _, err = objLayer.DeleteObject(ctx, bucket, object, ObjectOptions{
|
||||
Versioned: true,
|
||||
VersionID: objInfo.VersionID,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0400); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
rd := mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", "")
|
||||
_, err = objLayer.PutObject(ctx, bucket, object, rd, ObjectOptions{
|
||||
Versioned: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
fileInfoPreHeal, err = disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPreHeal.NumVersions != 1 {
|
||||
t.Fatalf("Expected versions 1, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
|
||||
if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true},
|
||||
func(bucket, object, vid string) error {
|
||||
_, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{Remove: true})
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fileInfoPostHeal, err = disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPostHeal.NumVersions != 2 {
|
||||
t.Fatalf("Expected versions 2, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
|
||||
rd = mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", "")
|
||||
objInfo, err = objLayer.PutObject(ctx, bucket, object, rd, ObjectOptions{
|
||||
Versioned: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0400); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create delete marker under quorum.
|
||||
_, err = objLayer.DeleteObject(ctx, bucket, object, ObjectOptions{
|
||||
Versioned: true,
|
||||
VersionID: objInfo.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, fsDir := range fsDirs[:4] {
|
||||
if err = os.Chmod(fsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
fileInfoPreHeal, err = disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPreHeal.NumVersions != 3 {
|
||||
t.Fatalf("Expected versions 3, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
|
||||
if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true},
|
||||
func(bucket, object, vid string) error {
|
||||
_, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{Remove: true})
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fileInfoPostHeal, err = disks[0].ReadVersion(context.Background(), bucket, object, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fileInfoPostHeal.NumVersions != 2 {
|
||||
t.Fatalf("Expected versions 2, got %d", fileInfoPreHeal.NumVersions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealObjectCorrupted(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -166,8 +366,8 @@ func TestHealObjectCorrupted(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
bucket := getRandomBucketName()
|
||||
object := getRandomObjectName()
|
||||
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
||||
var opts ObjectOptions
|
||||
|
||||
|
|
|
@ -162,11 +162,13 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
|||
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
|
||||
}
|
||||
|
||||
objInfo.TransitionStatus = fi.TransitionStatus
|
||||
objInfo.transitionedObjName = fi.TransitionedObjName
|
||||
objInfo.transitionVersionID = fi.TransitionVersionID
|
||||
objInfo.tierFreeVersion = fi.TierFreeVersion()
|
||||
objInfo.TransitionTier = fi.TransitionTier
|
||||
objInfo.TransitionedObject = TransitionedObject{
|
||||
Name: fi.TransitionedObjName,
|
||||
VersionID: fi.TransitionVersionID,
|
||||
Status: fi.TransitionStatus,
|
||||
FreeVersion: fi.TierFreeVersion(),
|
||||
Tier: fi.TransitionTier,
|
||||
}
|
||||
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of
|
||||
|
@ -205,6 +207,43 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
|||
return objInfo
|
||||
}
|
||||
|
||||
// TransitionInfoEquals returns true if transition related information are equal, false otherwise.
|
||||
func (fi FileInfo) TransitionInfoEquals(ofi FileInfo) bool {
|
||||
switch {
|
||||
case fi.TransitionStatus != ofi.TransitionStatus,
|
||||
fi.TransitionTier != ofi.TransitionTier,
|
||||
fi.TransitionedObjName != ofi.TransitionedObjName,
|
||||
fi.TransitionVersionID != ofi.TransitionVersionID:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MetadataEquals returns true if FileInfos Metadata maps are equal, false otherwise.
|
||||
func (fi FileInfo) MetadataEquals(ofi FileInfo) bool {
|
||||
if len(fi.Metadata) != len(ofi.Metadata) {
|
||||
return false
|
||||
}
|
||||
for k, v := range fi.Metadata {
|
||||
if ov, ok := ofi.Metadata[k]; !ok || ov != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReplicationInfoEquals returns true if server-side replication related fields are equal, false otherwise.
|
||||
func (fi FileInfo) ReplicationInfoEquals(ofi FileInfo) bool {
|
||||
switch {
|
||||
case fi.MarkDeleted != ofi.MarkDeleted,
|
||||
fi.DeleteMarkerReplicationStatus != ofi.DeleteMarkerReplicationStatus,
|
||||
fi.VersionPurgeStatus != ofi.VersionPurgeStatus,
|
||||
fi.Metadata[xhttp.AmzBucketReplicationStatus] != ofi.Metadata[xhttp.AmzBucketReplicationStatus]:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// objectPartIndex - returns the index of matching object part number.
|
||||
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
|
||||
for i, part := range parts {
|
||||
|
@ -276,6 +315,18 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
|
|||
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
|
||||
// make sure that length of Data is same
|
||||
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
|
||||
|
||||
// ILM transition fields
|
||||
h.Write([]byte(meta.TransitionStatus))
|
||||
h.Write([]byte(meta.TransitionTier))
|
||||
h.Write([]byte(meta.TransitionedObjName))
|
||||
h.Write([]byte(meta.TransitionVersionID))
|
||||
|
||||
// Server-side replication fields
|
||||
h.Write([]byte(fmt.Sprintf("%v", meta.MarkDeleted)))
|
||||
h.Write([]byte(meta.DeleteMarkerReplicationStatus))
|
||||
h.Write([]byte(meta.VersionPurgeStatus))
|
||||
h.Write([]byte(meta.Metadata[xhttp.AmzBucketReplicationStatus]))
|
||||
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
|
||||
h.Reset()
|
||||
}
|
||||
|
|
|
@ -215,3 +215,62 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransitionInfoEquals(t *testing.T) {
|
||||
inputs := []struct {
|
||||
tier string
|
||||
remoteObjName string
|
||||
remoteVersionID string
|
||||
status string
|
||||
}{
|
||||
{
|
||||
tier: "S3TIER-1",
|
||||
remoteObjName: mustGetUUID(),
|
||||
remoteVersionID: mustGetUUID(),
|
||||
status: "complete",
|
||||
},
|
||||
{
|
||||
tier: "S3TIER-2",
|
||||
remoteObjName: mustGetUUID(),
|
||||
remoteVersionID: mustGetUUID(),
|
||||
status: "complete",
|
||||
},
|
||||
}
|
||||
|
||||
var i uint
|
||||
for i = 0; i < 8; i++ {
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
TransitionVersionID: inputs[0].remoteVersionID,
|
||||
TransitionStatus: inputs[0].status,
|
||||
}
|
||||
ofi := fi
|
||||
if i&(1<<0) != 0 {
|
||||
ofi.TransitionTier = inputs[1].tier
|
||||
}
|
||||
if i&(1<<1) != 0 {
|
||||
ofi.TransitionedObjName = inputs[1].remoteObjName
|
||||
}
|
||||
if i&(1<<2) != 0 {
|
||||
ofi.TransitionVersionID = inputs[1].remoteVersionID
|
||||
}
|
||||
actual := fi.TransitionInfoEquals(ofi)
|
||||
if i == 0 && !actual {
|
||||
t.Fatalf("Test %d: Expected FileInfo's transition info to be equal: fi %v ofi %v", i, fi, ofi)
|
||||
}
|
||||
if i != 0 && actual {
|
||||
t.Fatalf("Test %d: Expected FileInfo's transition info to be inequal: fi %v ofi %v", i, fi, ofi)
|
||||
}
|
||||
}
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
TransitionVersionID: inputs[0].remoteVersionID,
|
||||
TransitionStatus: inputs[0].status,
|
||||
}
|
||||
ofi := FileInfo{}
|
||||
if fi.TransitionInfoEquals(ofi) {
|
||||
t.Fatalf("Expected to be inequal: fi %v ofi %v", fi, ofi)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ func (er erasureObjects) renameAll(ctx context.Context, bucket, prefix string) {
|
|||
wg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
disk.RenameFile(ctx, bucket, prefix, minioMetaTmpBucket, mustGetUUID())
|
||||
disk.RenameFile(ctx, bucket, prefix, minioMetaTmpDeletedBucket, mustGetUUID())
|
||||
}(disk)
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -347,8 +347,9 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||
// Fill all the necessary metadata.
|
||||
// Update `xl.meta` content on each disks.
|
||||
for index := range partsMetadata {
|
||||
partsMetadata[index].Metadata = opts.UserDefined
|
||||
partsMetadata[index].Fresh = true
|
||||
partsMetadata[index].ModTime = modTime
|
||||
partsMetadata[index].Metadata = opts.UserDefined
|
||||
}
|
||||
|
||||
uploadID := mustGetUUID()
|
||||
|
|
|
@ -256,26 +256,6 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// This hack is needed to avoid a bug found when overwriting
|
||||
// the inline data object with a un-inlined version, for the
|
||||
// time being we need this as we have released inline-data
|
||||
// version already and this bug is already present in newer
|
||||
// releases.
|
||||
//
|
||||
// This mainly happens with objects < smallFileThreshold when
|
||||
// they are overwritten with un-inlined objects >= smallFileThreshold,
|
||||
// due to a bug in RenameData() the fi.Data is not niled leading to
|
||||
// GetObject thinking that fi.Data is valid while fi.Size has
|
||||
// changed already.
|
||||
if fi.InlineData() {
|
||||
shardFileSize := erasure.ShardFileSize(fi.Size)
|
||||
if shardFileSize >= 0 && shardFileSize >= smallFileThreshold {
|
||||
for i := range metaArr {
|
||||
metaArr[i].Data = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var healOnce sync.Once
|
||||
|
||||
// once we have obtained a common FileInfo i.e latest, we should stick
|
||||
|
@ -461,7 +441,7 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
|||
|
||||
}
|
||||
objInfo = fi.ToObjectInfo(bucket, object)
|
||||
if !fi.VersionPurgeStatus.Empty() {
|
||||
if !fi.VersionPurgeStatus.Empty() && opts.VersionID != "" {
|
||||
// Make sure to return object info to provide extra information.
|
||||
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
@ -477,6 +457,35 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
|||
return objInfo, nil
|
||||
}
|
||||
|
||||
// getObjectInfoAndQuroum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
|
||||
func (er erasureObjects) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) {
|
||||
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
||||
if err != nil {
|
||||
return objInfo, getWriteQuorum(len(er.getDisks())), toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
wquorum = fi.Erasure.DataBlocks
|
||||
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
|
||||
wquorum++
|
||||
}
|
||||
|
||||
objInfo = fi.ToObjectInfo(bucket, object)
|
||||
if !fi.VersionPurgeStatus.Empty() && opts.VersionID != "" {
|
||||
// Make sure to return object info to provide extra information.
|
||||
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
if fi.Deleted {
|
||||
if opts.VersionID == "" || opts.DeleteMarker {
|
||||
return objInfo, wquorum, toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
// Make sure to return object info to provide extra information.
|
||||
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
return objInfo, wquorum, nil
|
||||
}
|
||||
|
||||
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
|
||||
// Undo rename object on disks where RenameFile succeeded.
|
||||
|
||||
|
@ -582,6 +591,142 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
|
|||
return evalDisks(disks, errs), err
|
||||
}
|
||||
|
||||
func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
data := r.Reader
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
if opts.UserDefined == nil {
|
||||
opts.UserDefined = make(map[string]string)
|
||||
}
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
// Get parity and data drive count based on storage class metadata
|
||||
parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
||||
if parityDrives <= 0 {
|
||||
parityDrives = er.defaultParityCount
|
||||
}
|
||||
dataDrives := len(storageDisks) - parityDrives
|
||||
|
||||
// we now know the number of blocks this object needs for data and parity.
|
||||
// writeQuorum is dataBlocks + 1
|
||||
writeQuorum := dataDrives
|
||||
if dataDrives == parityDrives {
|
||||
writeQuorum++
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 {
|
||||
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
||||
return ObjectInfo{}, toObjectErr(errInvalidArgument)
|
||||
}
|
||||
|
||||
// Initialize parts metadata
|
||||
partsMetadata := make([]FileInfo, len(storageDisks))
|
||||
|
||||
fi := newFileInfo(pathJoin(minioMetaBucket, key), dataDrives, parityDrives)
|
||||
fi.DataDir = mustGetUUID()
|
||||
|
||||
// Initialize erasure metadata.
|
||||
for index := range partsMetadata {
|
||||
partsMetadata[index] = fi
|
||||
}
|
||||
|
||||
// Order disks according to erasure distribution
|
||||
var onlineDisks []StorageAPI
|
||||
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
|
||||
|
||||
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
|
||||
}
|
||||
|
||||
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
|
||||
var buffer []byte
|
||||
switch size := data.Size(); {
|
||||
case size == 0:
|
||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
||||
case size >= fi.Erasure.BlockSize:
|
||||
buffer = er.bp.Get()
|
||||
defer er.bp.Put(buffer)
|
||||
case size < fi.Erasure.BlockSize:
|
||||
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
||||
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
||||
}
|
||||
|
||||
if len(buffer) > int(fi.Erasure.BlockSize) {
|
||||
buffer = buffer[:fi.Erasure.BlockSize]
|
||||
}
|
||||
|
||||
shardFileSize := erasure.ShardFileSize(data.Size())
|
||||
writers := make([]io.Writer, len(onlineDisks))
|
||||
inlineBuffers := make([]*bytes.Buffer, len(onlineDisks))
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
|
||||
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
|
||||
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
||||
closeBitrotWriters(writers)
|
||||
if erasureErr != nil {
|
||||
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key)
|
||||
}
|
||||
|
||||
// Should return IncompleteBody{} error when reader has fewer bytes
|
||||
// than specified in request header.
|
||||
if n < data.Size() {
|
||||
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
||||
}
|
||||
|
||||
for i, w := range writers {
|
||||
if w == nil {
|
||||
onlineDisks[i] = nil
|
||||
continue
|
||||
}
|
||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
Hash: bitrotWriterSum(w),
|
||||
})
|
||||
}
|
||||
|
||||
modTime := UTCNow()
|
||||
|
||||
// Fill all the necessary metadata.
|
||||
// Update `xl.meta` content on each disks.
|
||||
for index := range partsMetadata {
|
||||
partsMetadata[index].Size = n
|
||||
partsMetadata[index].Fresh = true
|
||||
partsMetadata[index].ModTime = modTime
|
||||
partsMetadata[index].Metadata = opts.UserDefined
|
||||
}
|
||||
|
||||
// Set an additional header when data is inlined.
|
||||
for index := range partsMetadata {
|
||||
partsMetadata[index].SetInlineData()
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
fi = partsMetadata[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaBucket, key, partsMetadata, writeQuorum); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
|
||||
}
|
||||
|
||||
return fi.ToObjectInfo(minioMetaBucket, key), nil
|
||||
}
|
||||
|
||||
// PutObject - creates an object upon reading from the input stream
|
||||
// until EOF, erasure codes the data across all disk and additionally
|
||||
// writes `xl.meta` which carries the necessary metadata for future
|
||||
|
@ -733,6 +878,15 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
} else if shardFileSize < smallFileThreshold/8 {
|
||||
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
|
||||
}
|
||||
} else {
|
||||
// If compressed, use actual size to determine.
|
||||
if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 {
|
||||
if !opts.Versioned && sz < smallFileThreshold {
|
||||
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
|
||||
} else if sz < smallFileThreshold/8 {
|
||||
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == nil {
|
||||
|
@ -740,7 +894,11 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
}
|
||||
|
||||
if len(inlineBuffers) > 0 {
|
||||
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
|
||||
sz := shardFileSize
|
||||
if sz < 0 {
|
||||
sz = data.ActualSize()
|
||||
}
|
||||
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, sz))
|
||||
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
|
||||
continue
|
||||
}
|
||||
|
@ -936,6 +1094,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
DeleteMarkerReplicationStatus: objects[i].DeleteMarkerReplicationStatus,
|
||||
VersionPurgeStatus: objects[i].VersionPurgeStatus,
|
||||
}
|
||||
versions[i].SetTierFreeVersionID(mustGetUUID())
|
||||
if opts.Versioned {
|
||||
versions[i].VersionID = uuid
|
||||
}
|
||||
|
@ -1061,9 +1220,40 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
|||
return ObjectInfo{}, toObjectErr(er.deletePrefix(ctx, bucket, object), bucket, object)
|
||||
}
|
||||
|
||||
var lc *lifecycle.Lifecycle
|
||||
if opts.Expiration.Expire {
|
||||
// Check if the current bucket has a configured lifecycle policy
|
||||
lc, _ = globalLifecycleSys.Get(bucket)
|
||||
}
|
||||
|
||||
// expiration attempted on a bucket with no lifecycle
|
||||
// rules shall be rejected.
|
||||
if lc == nil && opts.Expiration.Expire {
|
||||
if opts.VersionID != "" {
|
||||
return objInfo, VersionNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionID: opts.VersionID,
|
||||
}
|
||||
}
|
||||
return objInfo, ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
versionFound := true
|
||||
objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
|
||||
goi, gerr := er.GetObjectInfo(ctx, bucket, object, opts)
|
||||
goi, writeQuorum, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts)
|
||||
if gerr != nil && goi.Name == "" {
|
||||
switch gerr.(type) {
|
||||
case InsufficientReadQuorum:
|
||||
|
@ -1077,19 +1267,34 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
|||
}
|
||||
}
|
||||
|
||||
if opts.Expiration.Expire {
|
||||
action := evalActionFromLifecycle(ctx, *lc, goi, false)
|
||||
var isErr bool
|
||||
switch action {
|
||||
case lifecycle.NoneAction:
|
||||
isErr = true
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
isErr = true
|
||||
}
|
||||
if isErr {
|
||||
if goi.VersionID != "" {
|
||||
return goi, VersionNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionID: goi.VersionID,
|
||||
}
|
||||
}
|
||||
return goi, ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer NSUpdated(bucket, object)
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
writeQuorum := len(storageDisks)/2 + 1
|
||||
|
||||
var markDelete bool
|
||||
// Determine whether to mark object deleted for replication
|
||||
if goi.VersionID != "" {
|
||||
|
@ -1346,7 +1551,6 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer NSUpdated(bucket, object)
|
||||
|
||||
// Acquire write lock before starting to transition the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
|
@ -1376,6 +1580,8 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st
|
|||
if fi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
return nil
|
||||
}
|
||||
defer NSUpdated(bucket, object)
|
||||
|
||||
if fi.XLV1 {
|
||||
if _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{NoLock: true}); err != nil {
|
||||
return err
|
||||
|
@ -1414,10 +1620,17 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st
|
|||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
writeQuorum := len(storageDisks)/2 + 1
|
||||
// we now know the number of blocks this object needs for data and parity.
|
||||
// writeQuorum is dataBlocks + 1
|
||||
writeQuorum := fi.Erasure.DataBlocks
|
||||
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
|
||||
writeQuorum++
|
||||
}
|
||||
|
||||
if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
|
||||
for _, disk := range storageDisks {
|
||||
if disk != nil && disk.IsOnline() {
|
||||
continue
|
||||
|
|
|
@ -273,12 +273,7 @@ type poolObjInfo struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
// getPoolIdxExisting returns the (first) found object pool index containing an object.
|
||||
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
||||
// If the object does not exist ObjectNotFound error is returned.
|
||||
// If any other error is found, it is returned.
|
||||
// The check is skipped if there is only one zone, and 0, nil is always returned in that case.
|
||||
func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||
func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (idx int, err error) {
|
||||
if z.SinglePool() {
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -294,7 +289,7 @@ func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, obj
|
|||
pinfo := poolObjInfo{
|
||||
PoolIndex: i,
|
||||
}
|
||||
pinfo.ObjInfo, pinfo.Err = pool.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
pinfo.ObjInfo, pinfo.Err = pool.GetObjectInfo(ctx, bucket, object, opts)
|
||||
poolObjInfos[i] = pinfo
|
||||
}(i, pool)
|
||||
}
|
||||
|
@ -331,6 +326,19 @@ func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, obj
|
|||
return -1, toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) getPoolIdxExistingNoLock(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{NoLock: true})
|
||||
}
|
||||
|
||||
// getPoolIdxExisting returns the (first) found object pool index containing an object.
|
||||
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
||||
// If the object does not exist ObjectNotFound error is returned.
|
||||
// If any other error is found, it is returned.
|
||||
// The check is skipped if there is only one zone, and 0, nil is always returned in that case.
|
||||
func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{})
|
||||
}
|
||||
|
||||
// getPoolIdx returns the found previous object and its corresponding pool idx,
|
||||
// if none are found falls back to most available space pool.
|
||||
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
|
||||
|
@ -448,7 +456,7 @@ func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []er
|
|||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo, wantCycle uint32) error {
|
||||
// Updates must be closed before we return.
|
||||
defer close(updates)
|
||||
|
||||
|
@ -493,7 +501,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
|
|||
}
|
||||
}()
|
||||
// Start scanner. Blocks until done.
|
||||
err := erObj.nsScanner(ctx, allBuckets, bf, updates)
|
||||
err := erObj.nsScanner(ctx, allBuckets, bf, wantCycle, updates)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
mu.Lock()
|
||||
|
@ -574,6 +582,15 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
|
|||
func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
|
||||
// Lock the bucket name before creating.
|
||||
lk := z.NewNSLock(minioMetaTmpBucket, bucket+".lck")
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
// Create buckets in parallel across all sets.
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
|
@ -837,27 +854,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
|||
objSets.Add(objects[i].ObjectName)
|
||||
}
|
||||
|
||||
poolObjIdxMap := map[int][]ObjectToDelete{}
|
||||
origIndexMap := map[int][]int{}
|
||||
if !z.SinglePool() {
|
||||
for j, obj := range objects {
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, obj.ObjectName)
|
||||
if isErrObjectNotFound(err) {
|
||||
derrs[j] = err
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
// Unhandled errors return right here.
|
||||
for i := range derrs {
|
||||
derrs[i] = err
|
||||
}
|
||||
return dobjects, derrs
|
||||
}
|
||||
poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj)
|
||||
origIndexMap[idx] = append(origIndexMap[idx], j)
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire a bulk write lock across 'objects'
|
||||
multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...)
|
||||
lkctx, err := multiDeleteLock.GetLock(ctx, globalOperationTimeout)
|
||||
|
@ -874,17 +870,65 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
|||
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
|
||||
}
|
||||
|
||||
for idx, pool := range z.serverPools {
|
||||
objs := poolObjIdxMap[idx]
|
||||
orgIndexes := origIndexMap[idx]
|
||||
deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts)
|
||||
for i, derr := range errs {
|
||||
if derr != nil {
|
||||
derrs[orgIndexes[i]] = derr
|
||||
// Fetch location of up to 10 objects concurrently.
|
||||
poolObjIdxMap := map[int][]ObjectToDelete{}
|
||||
origIndexMap := map[int][]int{}
|
||||
|
||||
var mu sync.Mutex
|
||||
eg := errgroup.WithNErrs(len(objects)).WithConcurrency(10)
|
||||
cctx, cancel := eg.WithCancelOnError(ctx)
|
||||
defer cancel()
|
||||
for j, obj := range objects {
|
||||
j := j
|
||||
obj := obj
|
||||
eg.Go(func() error {
|
||||
idx, err := z.getPoolIdxExistingNoLock(cctx, bucket, obj.ObjectName)
|
||||
if isErrObjectNotFound(err) {
|
||||
derrs[j] = err
|
||||
return nil
|
||||
}
|
||||
dobjects[orgIndexes[i]] = deletedObjects[i]
|
||||
}
|
||||
if err != nil {
|
||||
// unhandled errors return right here.
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj)
|
||||
origIndexMap[idx] = append(origIndexMap[idx], j)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
}, j)
|
||||
}
|
||||
|
||||
if err := eg.WaitErr(); err != nil {
|
||||
for i := range derrs {
|
||||
derrs[i] = err
|
||||
}
|
||||
return dobjects, derrs
|
||||
}
|
||||
|
||||
// Delete concurrently in all server pools.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(z.serverPools))
|
||||
for idx, pool := range z.serverPools {
|
||||
go func(idx int, pool *erasureSets) {
|
||||
defer wg.Done()
|
||||
objs := poolObjIdxMap[idx]
|
||||
if len(objs) > 0 {
|
||||
orgIndexes := origIndexMap[idx]
|
||||
deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts)
|
||||
mu.Lock()
|
||||
for i, derr := range errs {
|
||||
if derr != nil {
|
||||
derrs[orgIndexes[i]] = derr
|
||||
}
|
||||
dobjects[orgIndexes[i]] = deletedObjects[i]
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}(idx, pool)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return dobjects, derrs
|
||||
}
|
||||
|
||||
|
@ -984,6 +1028,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
|||
if err != nil && err != io.EOF {
|
||||
return loi, err
|
||||
}
|
||||
defer merged.truncate(0) // Release when returning
|
||||
if versionMarker == "" {
|
||||
o := listPathOptions{Marker: marker}
|
||||
// If we are not looking for a specific version skip it.
|
||||
|
@ -1024,6 +1069,22 @@ func maxKeysPlusOne(maxKeys int, addOne bool) int {
|
|||
|
||||
func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
var loi ListObjectsInfo
|
||||
|
||||
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
|
||||
// Optimization for certain applications like
|
||||
// - Cohesity
|
||||
// - Actifio, Splunk etc.
|
||||
// which send ListObjects requests where the actual object
|
||||
// itself is the prefix and max-keys=1 in such scenarios
|
||||
// we can simply verify locally if such an object exists
|
||||
// to avoid the need for ListObjects().
|
||||
objInfo, err := z.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
|
||||
if err == nil {
|
||||
loi.Objects = append(loi.Objects, objInfo)
|
||||
return loi, nil
|
||||
}
|
||||
}
|
||||
|
||||
opts := listPathOptions{
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
|
@ -1040,6 +1101,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
|||
}
|
||||
|
||||
merged.forwardPast(opts.Marker)
|
||||
defer merged.truncate(0) // Release when returning
|
||||
|
||||
// Default is recursive, if delimiter is set then list non recursive.
|
||||
objects := merged.fileInfos(bucket, prefix, delimiter)
|
||||
|
@ -1368,20 +1430,8 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, fo
|
|||
return nil
|
||||
}
|
||||
|
||||
// deleteAll will delete a bucket+prefix unconditionally across all disks.
|
||||
// Note that set distribution is ignored so it should only be used in cases where
|
||||
// data is not distributed across sets.
|
||||
// Errors are logged but individual disk failures are not returned.
|
||||
func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix string) {
|
||||
for _, servers := range z.serverPools {
|
||||
for _, set := range servers.sets {
|
||||
set.deleteAll(ctx, bucket, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// renameAll will rename bucket+prefix unconditionally across all disks to
|
||||
// minioMetaTmpBucket + unique uuid,
|
||||
// minioMetaTmpDeletedBucket + unique uuid,
|
||||
// Note that set distribution is ignored so it should only be used in cases where
|
||||
// data is not distributed across sets. Errors are logged but individual
|
||||
// disk failures are not returned.
|
||||
|
@ -1618,11 +1668,14 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
|
|||
}
|
||||
fivs, err := entry.fileInfoVersions(bucket)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
cancel()
|
||||
if err := healObject(bucket, entry.name, ""); err != nil {
|
||||
errCh <- err
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
|
||||
for _, version := range fivs.Versions {
|
||||
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
|
||||
errCh <- err
|
||||
|
@ -1655,9 +1708,13 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
|
|||
agreed: healEntry,
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if ok {
|
||||
healEntry(*entry)
|
||||
if !ok {
|
||||
// check if we can get one entry atleast
|
||||
// proceed to heal nonetheless.
|
||||
entry, _ = entries.firstFound()
|
||||
}
|
||||
|
||||
healEntry(*entry)
|
||||
},
|
||||
finished: nil,
|
||||
}
|
||||
|
@ -1681,9 +1738,6 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver
|
|||
result, err := pool.HealObject(ctx, bucket, object, versionID, opts)
|
||||
result.Object = decodeDirObject(result.Object)
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
return result, nil
|
||||
|
|
|
@ -326,7 +326,7 @@ func (er erasureObjects) cleanupDeletedObjects(ctx context.Context) {
|
|||
|
||||
// nsScanner will start scanning buckets and send updated totals as they are traversed.
|
||||
// Updates are sent on a regular basis and the caller *must* consume them.
|
||||
func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error {
|
||||
func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, wantCycle uint32, updates chan<- dataUsageCache) error {
|
||||
if len(buckets) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
|||
case v, ok := <-bucketResults:
|
||||
if !ok {
|
||||
// Save final state...
|
||||
cache.Info.NextCycle++
|
||||
cache.Info.NextCycle = wantCycle
|
||||
cache.Info.LastUpdate = time.Now()
|
||||
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
|
||||
updates <- cache
|
||||
|
@ -461,12 +461,13 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
|||
cache.Info.BloomFilter = bloom
|
||||
cache.Info.SkipHealing = healing
|
||||
cache.Disks = allDiskIDs
|
||||
cache.Info.NextCycle = wantCycle
|
||||
if cache.Info.Name != bucket.Name {
|
||||
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
|
||||
cache.Info = dataUsageCacheInfo{
|
||||
Name: bucket.Name,
|
||||
LastUpdate: time.Time{},
|
||||
NextCycle: 0,
|
||||
NextCycle: wantCycle,
|
||||
}
|
||||
}
|
||||
// Collect updates.
|
||||
|
|
|
@ -705,8 +705,10 @@ func saveUnformattedFormat(ctx context.Context, storageDisks []StorageAPI, forma
|
|||
if format == nil {
|
||||
continue
|
||||
}
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
return err
|
||||
if storageDisks[index] != nil {
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -268,6 +268,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
|
|||
// Stat to get the size of the file at path.
|
||||
st, err := fr.Stat()
|
||||
if err != nil {
|
||||
fr.Close()
|
||||
err = osErrToFileErr(err)
|
||||
if err != errFileNotFound {
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -277,6 +278,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
|
|||
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
fr.Close()
|
||||
return nil, 0, errIsNotRegular
|
||||
}
|
||||
|
||||
|
@ -284,6 +286,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
|
|||
if offset > 0 {
|
||||
_, err = fr.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
fr.Close()
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
|
|
@ -240,7 +240,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
|||
}
|
||||
|
||||
// NSScanner returns data usage stats of the current FS deployment
|
||||
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo, wantCycle uint32) error {
|
||||
defer close(updates)
|
||||
// Load bucket totals
|
||||
var totalCache dataUsageCache
|
||||
|
@ -386,7 +386,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
|||
}
|
||||
|
||||
oi := fsMeta.ToObjectInfo(bucket, object, fi)
|
||||
sz := item.applyActions(ctx, fs, actionMeta{oi: oi}, &sizeSummary{})
|
||||
sz := item.applyActions(ctx, fs, oi, &sizeSummary{})
|
||||
if sz >= 0 {
|
||||
return sizeSummary{totalSize: sz, versions: 1}, nil
|
||||
}
|
||||
|
|
|
@ -333,6 +333,10 @@ func ErrorRespToObjectError(err error, params ...string) error {
|
|||
err = PartTooSmall{}
|
||||
}
|
||||
|
||||
switch minioErr.StatusCode {
|
||||
case http.StatusMethodNotAllowed:
|
||||
err = toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -384,7 +388,7 @@ func gatewayHandleEnvVars() {
|
|||
|
||||
// shouldMeterRequest checks whether incoming request should be added to prometheus gateway metrics
|
||||
func shouldMeterRequest(req *http.Request) bool {
|
||||
return !(guessIsBrowserReq(req) || guessIsHealthCheckReq(req) || guessIsMetricsReq(req))
|
||||
return req.URL != nil && !strings.HasPrefix(req.URL.Path, minioReservedBucketPath+slashSeparator)
|
||||
}
|
||||
|
||||
// MetricsTransport is a custom wrapper around Transport to track metrics
|
||||
|
|
|
@ -218,8 +218,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
// Set when gateway is enabled
|
||||
globalIsGateway = true
|
||||
|
||||
enableConfigOps := false
|
||||
|
||||
// TODO: We need to move this code with globalConfigSys.Init()
|
||||
// for now keep it here such that "s3" gateway layer initializes
|
||||
// itself properly when KMS is set.
|
||||
|
@ -245,7 +243,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
|
||||
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
|
||||
// operations such as profiling, server info etc.
|
||||
registerAdminRouter(router, enableConfigOps)
|
||||
registerAdminRouter(router, false)
|
||||
|
||||
// Add healthcheck router
|
||||
registerHealthCheckRouter(router)
|
||||
|
@ -303,9 +301,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
logger.FatalIf(globalNotificationSys.Init(GlobalContext, buckets, newObject), "Unable to initialize notification system")
|
||||
}
|
||||
|
||||
// Initialize users credentials and policies in background.
|
||||
globalIAMSys.InitStore(newObject)
|
||||
|
||||
go globalIAMSys.Init(GlobalContext, newObject)
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
|
@ -345,18 +340,14 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
}
|
||||
|
||||
if globalBrowserEnabled {
|
||||
consoleSrv, err := initConsoleServer()
|
||||
globalConsoleSrv, err = initConsoleServer()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize console service")
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-globalOSSignalCh
|
||||
consoleSrv.Shutdown()
|
||||
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
|
||||
}()
|
||||
|
||||
consoleSrv.Serve()
|
||||
} else {
|
||||
<-globalOSSignalCh
|
||||
}
|
||||
<-globalOSSignalCh
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo,
|
|||
}
|
||||
|
||||
// NSScanner - scanner is not implemented for gateway
|
||||
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error {
|
||||
func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo, wantCycle uint32) error {
|
||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
|
|
@ -406,7 +406,7 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
// Check for bad components in URL query values.
|
||||
for _, vv := range r.URL.Query() {
|
||||
for _, vv := range r.Form {
|
||||
for _, v := range vv {
|
||||
if hasBadPathComponent(v) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
|
@ -436,55 +436,6 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
// For browser requests, when federation is setup we need to
|
||||
// specifically handle download and upload for browser requests.
|
||||
if guessIsBrowserReq(r) {
|
||||
var bucket, _ string
|
||||
switch r.Method {
|
||||
case http.MethodPut:
|
||||
if getRequestAuthType(r) == authTypeJWT {
|
||||
bucket, _ = path2BucketObjectWithBasePath(minioReservedBucketPath+"/upload", r.URL.Path)
|
||||
}
|
||||
case http.MethodGet:
|
||||
if t := r.URL.Query().Get("token"); t != "" {
|
||||
bucket, _ = path2BucketObjectWithBasePath(minioReservedBucketPath+"/download", r.URL.Path)
|
||||
} else if getRequestAuthType(r) != authTypeJWT && !strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
|
||||
bucket, _ = request2BucketObjectName(r)
|
||||
}
|
||||
}
|
||||
if bucket == "" {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
sr, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil {
|
||||
if err == dns.ErrNoEntriesFound {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrNoSuchBucket),
|
||||
r.URL)
|
||||
} else {
|
||||
writeErrorResponse(r.Context(), w, toAPIError(r.Context(), err),
|
||||
r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
|
||||
r.URL.Scheme = "http"
|
||||
if globalIsTLS {
|
||||
r.URL.Scheme = "https"
|
||||
}
|
||||
r.URL.Host = getHostFromSrv(sr)
|
||||
// Make sure we remove any existing headers before
|
||||
// proxying the request to another node.
|
||||
for k := range w.Header() {
|
||||
w.Header().Del(k)
|
||||
}
|
||||
globalForwarder.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
bucket, object := request2BucketObjectName(r)
|
||||
|
||||
// Requests in federated setups for STS type calls which are
|
||||
|
|
|
@ -19,13 +19,13 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/console"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
|
@ -43,12 +43,11 @@ func newBgHealSequence() *healSequence {
|
|||
|
||||
hs := madmin.HealOpts{
|
||||
// Remove objects that do not have read-quorum
|
||||
Remove: true,
|
||||
ScanMode: madmin.HealNormalScan,
|
||||
Remove: healDeleteDangling,
|
||||
ScanMode: globalHealConfig.ScanMode(),
|
||||
}
|
||||
|
||||
return &healSequence{
|
||||
sourceCh: make(chan healSource),
|
||||
respCh: make(chan healResult),
|
||||
startTime: UTCNow(),
|
||||
clientToken: bgHealingUUID,
|
||||
|
@ -133,6 +132,11 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
|
|||
return status.Sets[i].ID < status.Sets[j].ID
|
||||
})
|
||||
|
||||
backendInfo := o.BackendInfo()
|
||||
status.SCParity = make(map[string]int)
|
||||
status.SCParity[storageclass.STANDARD] = backendInfo.StandardSCParity
|
||||
status.SCParity[storageclass.RRS] = backendInfo.RRSCParity
|
||||
|
||||
return status, true
|
||||
|
||||
}
|
||||
|
@ -165,6 +169,8 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
scanMode := globalHealConfig.ScanMode()
|
||||
|
||||
// Heal all buckets with all objects
|
||||
for _, bucket := range buckets {
|
||||
if tracker.isHealed(bucket.Name) {
|
||||
|
@ -183,10 +189,10 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
tracker.Object = ""
|
||||
tracker.Bucket = bucket.Name
|
||||
// Heal current bucket
|
||||
if _, err := er.HealBucket(ctx, bucket.Name, madmin.HealOpts{}); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if _, err := er.HealBucket(ctx, bucket.Name, madmin.HealOpts{
|
||||
ScanMode: scanMode,
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
|
@ -195,7 +201,12 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
|
||||
disks, _ := er.getOnlineDisksWithHealing()
|
||||
if len(disks) == 0 {
|
||||
return errors.New("healErasureSet: No non-healing disks found")
|
||||
// all disks are healing in this set, this is allowed
|
||||
// so we simply proceed to next bucket, marking the bucket
|
||||
// as done as there are no objects to heal.
|
||||
tracker.bucketDone(bucket.Name)
|
||||
logger.LogIf(ctx, tracker.update(ctx))
|
||||
continue
|
||||
}
|
||||
|
||||
// Limit listing to 3 drives.
|
||||
|
@ -221,21 +232,28 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
fivs, err := entry.fileInfoVersions(bucket.Name)
|
||||
if err != nil {
|
||||
err := bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket.Name,
|
||||
object: entry.name,
|
||||
versionID: "",
|
||||
}, madmin.HealItemObject)
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
|
||||
for _, version := range fivs.Versions {
|
||||
if _, err := er.HealObject(ctx, bucket.Name, version.Name, version.VersionID, madmin.HealOpts{
|
||||
ScanMode: madmin.HealNormalScan, Remove: healDeleteDangling}); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
// If not deleted, assume they failed.
|
||||
tracker.ItemsFailed++
|
||||
tracker.BytesFailed += uint64(version.Size)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
if _, err := er.HealObject(ctx, bucket.Name, version.Name,
|
||||
version.VersionID, madmin.HealOpts{
|
||||
ScanMode: scanMode,
|
||||
Remove: healDeleteDangling,
|
||||
}); err != nil {
|
||||
// If not deleted, assume they failed.
|
||||
tracker.ItemsFailed++
|
||||
tracker.BytesFailed += uint64(version.Size)
|
||||
logger.LogIf(ctx, err)
|
||||
} else {
|
||||
tracker.ItemsHealed++
|
||||
tracker.BytesDone += uint64(version.Size)
|
||||
|
@ -246,6 +264,9 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
if time.Since(tracker.LastUpdate) > time.Minute {
|
||||
logger.LogIf(ctx, tracker.update(ctx))
|
||||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq()
|
||||
}
|
||||
|
||||
// How to resolve partial results.
|
||||
|
@ -265,9 +286,12 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
agreed: healEntry,
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
entry, ok := entries.resolve(&resolver)
|
||||
if ok {
|
||||
healEntry(*entry)
|
||||
if !ok {
|
||||
// check if we can get one entry atleast
|
||||
// proceed to heal nonetheless.
|
||||
entry, _ = entries.firstFound()
|
||||
}
|
||||
healEntry(*entry)
|
||||
},
|
||||
finished: nil,
|
||||
})
|
||||
|
@ -298,14 +322,14 @@ func healObject(bucket, object, versionID string, scan madmin.HealScanMode) {
|
|||
// Get background heal sequence to send elements to heal
|
||||
bgSeq, ok := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if ok {
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
opts: &madmin.HealOpts{
|
||||
Remove: true, // if found dangling purge it.
|
||||
Remove: healDeleteDangling, // if found dangling purge it.
|
||||
ScanMode: scan,
|
||||
},
|
||||
}
|
||||
}, madmin.HealItemObject)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/console/restapi"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/bucket/bandwidth"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
|
@ -216,6 +217,9 @@ var (
|
|||
// The name of this local node, fetched from arguments
|
||||
globalLocalNodeName string
|
||||
|
||||
// The global subnet license
|
||||
globalSubnetLicense string
|
||||
|
||||
globalRemoteEndpoints map[string]Endpoint
|
||||
|
||||
// Global server's network statistics
|
||||
|
@ -312,6 +316,8 @@ var (
|
|||
|
||||
globalTierJournal *tierJournal
|
||||
|
||||
globalConsoleSrv *restapi.Server
|
||||
|
||||
globalDebugRemoteTiersImmediately []string
|
||||
// Add new variable global values here.
|
||||
)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/minio/minio/internal/config/api"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/sys"
|
||||
mem "github.com/shirou/gopsutil/v3/mem"
|
||||
)
|
||||
|
||||
type apiConfig struct {
|
||||
|
@ -39,6 +39,7 @@ type apiConfig struct {
|
|||
totalDriveCount int
|
||||
replicationWorkers int
|
||||
replicationFailedWorkers int
|
||||
transitionWorkers int
|
||||
}
|
||||
|
||||
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
||||
|
@ -47,29 +48,41 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||
|
||||
t.clusterDeadline = cfg.ClusterDeadline
|
||||
t.corsAllowOrigins = cfg.CorsAllowOrigin
|
||||
maxSetDrives := 0
|
||||
for _, setDriveCount := range setDriveCounts {
|
||||
t.totalDriveCount += setDriveCount
|
||||
if setDriveCount > maxSetDrives {
|
||||
maxSetDrives = setDriveCount
|
||||
}
|
||||
}
|
||||
|
||||
var apiRequestsMaxPerNode int
|
||||
if cfg.RequestsMax <= 0 {
|
||||
stats, err := sys.GetStats()
|
||||
var maxMem uint64
|
||||
memStats, err := mem.VirtualMemory()
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
// Default to 8 GiB, not critical.
|
||||
stats.TotalRAM = 8 << 30
|
||||
maxMem = 8 << 30
|
||||
} else {
|
||||
maxMem = memStats.Available / 2
|
||||
}
|
||||
|
||||
// max requests per node is calculated as
|
||||
// total_ram / ram_per_request
|
||||
// ram_per_request is (2MiB+128KiB) * driveCount \
|
||||
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
||||
apiRequestsMaxPerNode = int(stats.TotalRAM / uint64(t.totalDriveCount*(blockSizeLarge+blockSizeSmall)+int(blockSizeV1*2+blockSizeV2*2)))
|
||||
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*(blockSizeLarge+blockSizeSmall)+int(blockSizeV1*2+blockSizeV2*2)))
|
||||
|
||||
if globalIsErasure {
|
||||
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
|
||||
}
|
||||
} else {
|
||||
apiRequestsMaxPerNode = cfg.RequestsMax
|
||||
if len(globalEndpoints.Hostnames()) > 0 {
|
||||
apiRequestsMaxPerNode /= len(globalEndpoints.Hostnames())
|
||||
}
|
||||
}
|
||||
|
||||
if cap(t.requestsPool) < apiRequestsMaxPerNode {
|
||||
// Only replace if needed.
|
||||
// Existing requests will use the previous limit,
|
||||
|
@ -87,6 +100,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||
}
|
||||
t.replicationFailedWorkers = cfg.ReplicationFailedWorkers
|
||||
t.replicationWorkers = cfg.ReplicationWorkers
|
||||
if globalTransitionState != nil && cfg.TransitionWorkers != t.transitionWorkers {
|
||||
globalTransitionState.UpdateWorkers(cfg.TransitionWorkers)
|
||||
}
|
||||
t.transitionWorkers = cfg.TransitionWorkers
|
||||
}
|
||||
|
||||
func (t *apiConfig) getListQuorum() int {
|
||||
|
@ -173,3 +190,10 @@ func (t *apiConfig) getReplicationWorkers() int {
|
|||
|
||||
return t.replicationWorkers
|
||||
}
|
||||
|
||||
func (t *apiConfig) getTransitionWorkers() int {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
return t.transitionWorkers
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ var userMetadataKeyPrefixes = []string{
|
|||
|
||||
// extractMetadata extracts metadata from HTTP header and HTTP queryString.
|
||||
func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) {
|
||||
query := r.URL.Query()
|
||||
query := r.Form
|
||||
header := r.Header
|
||||
metadata = make(map[string]string)
|
||||
// Extract all query values.
|
||||
|
@ -213,15 +213,6 @@ func getReqAccessCred(r *http.Request, region string) (cred auth.Credentials) {
|
|||
if cred.AccessKey == "" {
|
||||
cred, _, _ = getReqAccessKeyV2(r)
|
||||
}
|
||||
if cred.AccessKey == "" {
|
||||
claims, owner, _ := webRequestAuthenticate(r)
|
||||
if owner {
|
||||
return globalActiveCred
|
||||
}
|
||||
if claims != nil {
|
||||
cred, _ = globalIAMSys.GetUser(claims.AccessKey)
|
||||
}
|
||||
}
|
||||
return cred
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||
ctx, cancel := context.WithTimeout(ctx, globalAPIConfig.getClusterDeadline())
|
||||
defer cancel()
|
||||
|
||||
opts := HealthOptions{Maintenance: r.URL.Query().Get("maintenance") == "true"}
|
||||
opts := HealthOptions{Maintenance: r.Form.Get("maintenance") == "true"}
|
||||
result := objLayer.Health(ctx, opts)
|
||||
if result.WriteQuorum > 0 {
|
||||
w.Header().Set(xhttp.MinIOWriteQuorum, strconv.Itoa(result.WriteQuorum))
|
||||
|
@ -95,6 +95,17 @@ func ReadinessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
}
|
||||
|
||||
if globalIsGateway && globalEtcdClient != nil {
|
||||
// Borrowed from https://github.com/etcd-io/etcd/blob/main/etcdctl/ctlv3/command/ep_command.go#L118
|
||||
ctx, cancel := context.WithTimeout(r.Context(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
// etcd unreachable throw an error for readiness.
|
||||
if _, err := globalEtcdClient.Get(ctx, "health"); err != nil {
|
||||
writeErrorResponse(r.Context(), w, toAPIError(r.Context(), err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||
}
|
||||
|
||||
|
@ -104,5 +115,17 @@ func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||
// Service not initialized yet
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
}
|
||||
|
||||
if globalIsGateway && globalEtcdClient != nil {
|
||||
// Borrowed from https://github.com/etcd-io/etcd/blob/main/etcdctl/ctlv3/command/ep_command.go#L118
|
||||
ctx, cancel := context.WithTimeout(r.Context(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
// etcd unreachable throw an error for readiness.
|
||||
if _, err := globalEtcdClient.Get(ctx, "health"); err != nil {
|
||||
writeErrorResponse(r.Context(), w, toAPIError(r.Context(), err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||
}
|
||||
|
|
|
@ -38,12 +38,12 @@ type ConnStats struct {
|
|||
}
|
||||
|
||||
// Increase total input bytes
|
||||
func (s *ConnStats) incInputBytes(n int) {
|
||||
func (s *ConnStats) incInputBytes(n int64) {
|
||||
atomic.AddUint64(&s.totalInputBytes, uint64(n))
|
||||
}
|
||||
|
||||
// Increase total output bytes
|
||||
func (s *ConnStats) incOutputBytes(n int) {
|
||||
func (s *ConnStats) incOutputBytes(n int64) {
|
||||
atomic.AddUint64(&s.totalOutputBytes, uint64(n))
|
||||
}
|
||||
|
||||
|
@ -58,12 +58,12 @@ func (s *ConnStats) getTotalOutputBytes() uint64 {
|
|||
}
|
||||
|
||||
// Increase outbound input bytes
|
||||
func (s *ConnStats) incS3InputBytes(n int) {
|
||||
func (s *ConnStats) incS3InputBytes(n int64) {
|
||||
atomic.AddUint64(&s.s3InputBytes, uint64(n))
|
||||
}
|
||||
|
||||
// Increase outbound output bytes
|
||||
func (s *ConnStats) incS3OutputBytes(n int) {
|
||||
func (s *ConnStats) incS3OutputBytes(n int64) {
|
||||
atomic.AddUint64(&s.s3OutputBytes, uint64(n))
|
||||
}
|
||||
|
||||
|
|
36
cmd/iam.go
36
cmd/iam.go
|
@ -608,6 +608,10 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
// IAM sub-system, make sure that we do not move the above codeblock elsewhere.
|
||||
if err := migrateIAMConfigsEtcdToEncrypted(retryCtx, globalEtcdClient); err != nil {
|
||||
txnLk.Unlock(lkctx.Cancel)
|
||||
if errors.Is(err, errEtcdUnreachable) {
|
||||
logger.Info("Connection to etcd timed out. Retrying..")
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to decrypt an encrypted ETCD backend for IAM users and policies: %w", err))
|
||||
logger.LogIf(ctx, errors.New("IAM sub-system is partially initialized, some users may not be available"))
|
||||
return
|
||||
|
@ -1177,6 +1181,10 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||
return auth.Credentials{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
if parentUser == "" {
|
||||
return auth.Credentials{}, errInvalidArgument
|
||||
}
|
||||
|
||||
var policyBuf []byte
|
||||
if opts.sessionPolicy != nil {
|
||||
err := opts.sessionPolicy.Validate()
|
||||
|
@ -1192,9 +1200,35 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||
}
|
||||
}
|
||||
|
||||
// found newly requested service account, to be same as
|
||||
// parentUser, reject such operations.
|
||||
if parentUser == opts.accessKey {
|
||||
return auth.Credentials{}, errIAMActionNotAllowed
|
||||
}
|
||||
|
||||
sys.store.lock()
|
||||
defer sys.store.unlock()
|
||||
|
||||
// Handle validation of incoming service accounts.
|
||||
{
|
||||
cr, found := sys.iamUsersMap[opts.accessKey]
|
||||
// found newly requested service account, to be an existing
|
||||
// user, reject such operations.
|
||||
if found && !cr.IsTemp() && !cr.IsServiceAccount() {
|
||||
return auth.Credentials{}, errIAMActionNotAllowed
|
||||
}
|
||||
// found newly requested service account, to be an existing
|
||||
// temporary user, reject such operations.
|
||||
if found && cr.IsTemp() {
|
||||
return auth.Credentials{}, errIAMActionNotAllowed
|
||||
}
|
||||
// found newly requested service account, to be an existing
|
||||
// service account for another parentUser, reject such operations.
|
||||
if found && cr.IsServiceAccount() && cr.ParentUser != parentUser {
|
||||
return auth.Credentials{}, errIAMActionNotAllowed
|
||||
}
|
||||
}
|
||||
|
||||
cr, found := sys.iamUsersMap[parentUser]
|
||||
// Disallow service accounts to further create more service accounts.
|
||||
if found && cr.IsServiceAccount() {
|
||||
|
@ -1606,7 +1640,7 @@ func (sys *IAMSys) purgeExpiredCredentialsForLDAP(ctx context.Context) {
|
|||
}
|
||||
sys.store.unlock()
|
||||
|
||||
expiredUsers, err := globalLDAPConfig.GetNonExistentUserDistNames(parentUsers)
|
||||
expiredUsers, err := globalLDAPConfig.GetNonEligibleUserDistNames(parentUsers)
|
||||
if err != nil {
|
||||
// Log and return on error - perhaps it'll work the next time.
|
||||
logger.LogIf(GlobalContext, err)
|
||||
|
|
62
cmd/jwt.go
62
cmd/jwt.go
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/minio/minio/internal/auth"
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -97,29 +98,6 @@ func authenticateURL(accessKey, secretKey string) (string, error) {
|
|||
return authenticateJWTUsers(accessKey, secretKey, defaultURLJWTExpiry)
|
||||
}
|
||||
|
||||
// Callback function used for parsing
|
||||
func webTokenCallback(claims *xjwt.MapClaims) ([]byte, error) {
|
||||
if claims.AccessKey == globalActiveCred.AccessKey {
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
ok, _, err := globalIAMSys.IsTempUser(claims.AccessKey)
|
||||
if err != nil {
|
||||
if err == errNoSuchUser {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
cred, ok := globalIAMSys.GetUser(claims.AccessKey)
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return []byte(cred.SecretKey), nil
|
||||
|
||||
}
|
||||
|
||||
// Check if the request is authenticated.
|
||||
// Returns nil if the request is authenticated. errNoAuthToken if token missing.
|
||||
// Returns errAuthentication for all other errors.
|
||||
|
@ -132,10 +110,44 @@ func webRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, bool, error) {
|
|||
return nil, false, err
|
||||
}
|
||||
claims := xjwt.NewMapClaims()
|
||||
if err := xjwt.ParseWithClaims(token, claims, webTokenCallback); err != nil {
|
||||
if err := xjwt.ParseWithClaims(token, claims, func(claims *xjwt.MapClaims) ([]byte, error) {
|
||||
if claims.AccessKey == globalActiveCred.AccessKey {
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
cred, ok := globalIAMSys.GetUser(claims.AccessKey)
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return []byte(cred.SecretKey), nil
|
||||
}); err != nil {
|
||||
return claims, false, errAuthentication
|
||||
}
|
||||
owner := claims.AccessKey == globalActiveCred.AccessKey
|
||||
owner := true
|
||||
if globalActiveCred.AccessKey != claims.AccessKey {
|
||||
// Check if the access key is part of users credentials.
|
||||
ucred, ok := globalIAMSys.GetUser(claims.AccessKey)
|
||||
if !ok {
|
||||
return nil, false, errInvalidAccessKeyID
|
||||
}
|
||||
|
||||
// get embedded claims
|
||||
eclaims, s3Err := checkClaimsFromToken(req, ucred)
|
||||
if s3Err != ErrNone {
|
||||
return nil, false, errAuthentication
|
||||
}
|
||||
|
||||
for k, v := range eclaims {
|
||||
claims.MapClaims[k] = v
|
||||
}
|
||||
|
||||
// Now check if we have a sessionPolicy.
|
||||
if _, ok = eclaims[iampolicy.SessionPolicyName]; ok {
|
||||
owner = false
|
||||
} else {
|
||||
owner = globalActiveCred.AccessKey == ucred.ParentUser
|
||||
}
|
||||
}
|
||||
|
||||
return claims, owner, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r
|
|||
}
|
||||
}
|
||||
|
||||
values := r.URL.Query()
|
||||
values := r.Form
|
||||
|
||||
var prefix string
|
||||
if len(values[peerRESTListenPrefix]) > 1 {
|
||||
|
|
|
@ -227,13 +227,24 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
|
|||
default:
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if len(args.UID) != 0 {
|
||||
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
|
||||
if len(args.UID) == 0 {
|
||||
for _, resource := range args.Resources {
|
||||
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
for _, resource := range args.Resources {
|
||||
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
|
||||
|
||||
lockFound := false
|
||||
for _, lris := range l.lockMap {
|
||||
for _, lri := range lris {
|
||||
if lri.UID == args.UID {
|
||||
l.removeEntry(lri.Name, dsync.LockArgs{UID: lri.UID}, &lris)
|
||||
lockFound = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
|
||||
return lockFound, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,24 +256,18 @@ func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refresh
|
|||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
|
||||
resource := args.Resources[0] // refresh check is always per resource.
|
||||
|
||||
// Lock found, proceed to verify if belongs to given uid.
|
||||
lri, ok := l.lockMap[resource]
|
||||
if !ok {
|
||||
// lock doesn't exist yet, return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check whether uid is still active
|
||||
for i := range lri {
|
||||
if lri[i].UID == args.UID && lri[i].Owner == args.Owner {
|
||||
lri[i].TimeLastRefresh = UTCNow()
|
||||
return true, nil
|
||||
lockFound := false
|
||||
for _, lri := range l.lockMap {
|
||||
// Check whether uid is still active
|
||||
for i := range lri {
|
||||
if lri[i].UID == args.UID {
|
||||
lri[i].TimeLastRefresh = UTCNow()
|
||||
lockFound = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return lockFound, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -63,15 +63,16 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
|
||||
func getLockArgs(r *http.Request) (args dsync.LockArgs, err error) {
|
||||
quorum, err := strconv.Atoi(r.URL.Query().Get(lockRESTQuorum))
|
||||
values := r.Form
|
||||
quorum, err := strconv.Atoi(values.Get(lockRESTQuorum))
|
||||
if err != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
args = dsync.LockArgs{
|
||||
Owner: r.URL.Query().Get(lockRESTOwner),
|
||||
UID: r.URL.Query().Get(lockRESTUID),
|
||||
Source: r.URL.Query().Get(lockRESTSource),
|
||||
Owner: values.Get(lockRESTOwner),
|
||||
UID: values.Get(lockRESTUID),
|
||||
Source: values.Get(lockRESTSource),
|
||||
Quorum: quorum,
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/console"
|
||||
)
|
||||
|
||||
|
@ -37,6 +38,9 @@ type metaCacheEntry struct {
|
|||
|
||||
// cached contains the metadata if decoded.
|
||||
cached *FileInfo
|
||||
|
||||
// Indicates the entry can be reused and only one reference to metadata is expected.
|
||||
reusable bool
|
||||
}
|
||||
|
||||
// isDir returns if the entry is representing a prefix directory.
|
||||
|
@ -101,18 +105,21 @@ func resolveEntries(a, b *metaCacheEntry, bucket string) *metaCacheEntry {
|
|||
return a
|
||||
}
|
||||
|
||||
if !aFi.ModTime.Equal(bFi.ModTime) {
|
||||
if aFi.NumVersions == bFi.NumVersions {
|
||||
if aFi.ModTime.Equal(bFi.ModTime) {
|
||||
return a
|
||||
}
|
||||
if aFi.ModTime.After(bFi.ModTime) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
if aFi.NumVersions > bFi.NumVersions {
|
||||
return a
|
||||
if bFi.NumVersions > aFi.NumVersions {
|
||||
return b
|
||||
}
|
||||
|
||||
return b
|
||||
return a
|
||||
}
|
||||
|
||||
// isInDir returns whether the entry is in the dir when considering the separator.
|
||||
|
@ -160,6 +167,10 @@ func (e *metaCacheEntry) fileInfo(bucket string) (*FileInfo, error) {
|
|||
}, nil
|
||||
}
|
||||
if e.cached == nil {
|
||||
if len(e.metadata) == 0 {
|
||||
// only happens if the entry is not found.
|
||||
return nil, errFileNotFound
|
||||
}
|
||||
fi, err := getFileInfo(e.metadata, bucket, e.name, "", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -262,6 +273,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
|
|||
|
||||
// Get new entry metadata
|
||||
if _, err := entry.fileInfo(r.bucket); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -305,15 +317,25 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
|
|||
sort.Slice(r.candidates, func(i, j int) bool {
|
||||
return r.candidates[i].n > r.candidates[j].n
|
||||
})
|
||||
|
||||
// Check if we have enough.
|
||||
if r.candidates[0].n < r.objQuorum {
|
||||
return nil, false
|
||||
}
|
||||
if r.candidates[0].n > r.candidates[1].n {
|
||||
return r.candidates[0].e, true
|
||||
|
||||
// if r.objQuorum == 1 then it is guaranteed that
|
||||
// this resolver is for HealObjects(), so use resolveEntries()
|
||||
// instead to resolve candidates, this check is only useful
|
||||
// for regular cases of ListObjects()
|
||||
if r.candidates[0].n > r.candidates[1].n && r.objQuorum > 1 {
|
||||
ok := r.candidates[0].e != nil && r.candidates[0].e.name != ""
|
||||
return r.candidates[0].e, ok
|
||||
}
|
||||
|
||||
e := resolveEntries(r.candidates[0].e, r.candidates[1].e, r.bucket)
|
||||
// Tie between two, resolve using modtime+versions.
|
||||
return resolveEntries(r.candidates[0].e, r.candidates[1].e, r.bucket), true
|
||||
ok := e != nil && e.name != ""
|
||||
return e, ok
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -345,6 +367,8 @@ type metaCacheEntriesSorted struct {
|
|||
o metaCacheEntries
|
||||
// list id is not serialized
|
||||
listID string
|
||||
// Reuse buffers
|
||||
reuse bool
|
||||
}
|
||||
|
||||
// shallowClone will create a shallow clone of the array objects,
|
||||
|
@ -490,6 +514,13 @@ func (m *metaCacheEntriesSorted) forwardTo(s string) {
|
|||
idx := sort.Search(len(m.o), func(i int) bool {
|
||||
return m.o[i].name >= s
|
||||
})
|
||||
if m.reuse {
|
||||
for i, entry := range m.o[:idx] {
|
||||
metaDataPoolPut(entry.metadata)
|
||||
m.o[i].metadata = nil
|
||||
}
|
||||
}
|
||||
|
||||
m.o = m.o[idx:]
|
||||
}
|
||||
|
||||
|
@ -501,6 +532,12 @@ func (m *metaCacheEntriesSorted) forwardPast(s string) {
|
|||
idx := sort.Search(len(m.o), func(i int) bool {
|
||||
return m.o[i].name > s
|
||||
})
|
||||
if m.reuse {
|
||||
for i, entry := range m.o[:idx] {
|
||||
metaDataPoolPut(entry.metadata)
|
||||
m.o[i].metadata = nil
|
||||
}
|
||||
}
|
||||
m.o = m.o[idx:]
|
||||
}
|
||||
|
||||
|
@ -715,6 +752,12 @@ func (m *metaCacheEntriesSorted) truncate(n int) {
|
|||
return
|
||||
}
|
||||
if len(m.o) > n {
|
||||
if m.reuse {
|
||||
for i, entry := range m.o[n:] {
|
||||
metaDataPoolPut(entry.metadata)
|
||||
m.o[n+i].metadata = nil
|
||||
}
|
||||
}
|
||||
m.o = m.o[:n]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
o.parseMarker()
|
||||
o.BaseDir = baseDirFromPrefix(o.Prefix)
|
||||
o.Transient = o.Transient || isReservedOrInvalidBucket(o.Bucket, false)
|
||||
o.SetFilter()
|
||||
if o.Transient {
|
||||
o.Create = false
|
||||
}
|
||||
|
@ -162,6 +163,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
if o.pool < len(z.serverPools) && o.set < len(z.serverPools[o.pool].sets) {
|
||||
o.debugln("Resuming", o)
|
||||
entries, err = z.serverPools[o.pool].sets[o.set].streamMetadataParts(ctx, *o)
|
||||
entries.reuse = true // We read from stream and are not sharing results.
|
||||
if err == nil {
|
||||
return entries, nil
|
||||
}
|
||||
|
@ -185,9 +187,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 5*time.Second)
|
||||
defer cancel()
|
||||
if c, err := rpc.GetMetacacheListing(ctx, *o); err == nil {
|
||||
c, err := rpc.GetMetacacheListing(ctx, *o)
|
||||
if err == nil {
|
||||
c.error = "no longer used"
|
||||
c.status = scanStateError
|
||||
rpc.UpdateMetacacheListing(ctx, *c)
|
||||
}
|
||||
}()
|
||||
o.ID = ""
|
||||
|
@ -198,8 +202,8 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
// Create filter for results.
|
||||
o.debugln("Raw List", o)
|
||||
filterCh := make(chan metaCacheEntry, o.Limit)
|
||||
filteredResults := o.gatherResults(filterCh)
|
||||
listCtx, cancelList := context.WithCancel(ctx)
|
||||
filteredResults := o.gatherResults(listCtx, filterCh)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
var listErr error
|
||||
|
@ -217,6 +221,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
if listErr != nil && !errors.Is(listErr, context.Canceled) {
|
||||
return entries, listErr
|
||||
}
|
||||
entries.reuse = true
|
||||
truncated := entries.len() > o.Limit || err == nil
|
||||
entries.truncate(o.Limit)
|
||||
if !o.Transient && truncated {
|
||||
|
@ -341,7 +346,7 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions
|
|||
inCh := make(chan metaCacheEntry, metacacheBlockSize)
|
||||
outCh := make(chan metaCacheEntry, o.Limit)
|
||||
|
||||
filteredResults := o.gatherResults(outCh)
|
||||
filteredResults := o.gatherResults(ctx, outCh)
|
||||
|
||||
mc := o.newMetacache()
|
||||
meta := metaCacheRPC{meta: &mc, cancel: cancel, rpc: globalNotificationSys.restClientFromHash(o.Bucket), o: *o}
|
||||
|
@ -363,13 +368,33 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions
|
|||
o.debugln("listAndSave: listing", o.ID, "finished with ", err)
|
||||
}(*o)
|
||||
|
||||
// Keep track of when we return since we no longer have to send entries to output.
|
||||
var funcReturned bool
|
||||
var funcReturnedMu sync.Mutex
|
||||
defer func() {
|
||||
funcReturnedMu.Lock()
|
||||
funcReturned = true
|
||||
funcReturnedMu.Unlock()
|
||||
}()
|
||||
// Write listing to results and saver.
|
||||
go func() {
|
||||
var returned bool
|
||||
for entry := range inCh {
|
||||
outCh <- entry
|
||||
if !returned {
|
||||
funcReturnedMu.Lock()
|
||||
returned = funcReturned
|
||||
funcReturnedMu.Unlock()
|
||||
outCh <- entry
|
||||
if returned {
|
||||
close(outCh)
|
||||
}
|
||||
}
|
||||
entry.reusable = returned
|
||||
saveCh <- entry
|
||||
}
|
||||
close(outCh)
|
||||
if !returned {
|
||||
close(outCh)
|
||||
}
|
||||
close(saveCh)
|
||||
}()
|
||||
|
||||
|
|
|
@ -131,20 +131,31 @@ func (o *listPathOptions) debugln(data ...interface{}) {
|
|||
|
||||
// gatherResults will collect all results on the input channel and filter results according to the options.
|
||||
// Caller should close the channel when done.
|
||||
// The returned function will return the results once there is enough or input is closed.
|
||||
func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) {
|
||||
// The returned function will return the results once there is enough or input is closed,
|
||||
// or the context is canceled.
|
||||
func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) {
|
||||
var resultsDone = make(chan metaCacheEntriesSorted)
|
||||
// Copy so we can mutate
|
||||
resCh := resultsDone
|
||||
var done bool
|
||||
var mu sync.Mutex
|
||||
resErr := io.EOF
|
||||
|
||||
go func() {
|
||||
var results metaCacheEntriesSorted
|
||||
var returned bool
|
||||
for entry := range in {
|
||||
if resCh == nil {
|
||||
if returned {
|
||||
// past limit
|
||||
continue
|
||||
}
|
||||
mu.Lock()
|
||||
returned = done
|
||||
mu.Unlock()
|
||||
if returned {
|
||||
resCh = nil
|
||||
continue
|
||||
}
|
||||
if !o.IncludeDirectories && entry.isDir() {
|
||||
continue
|
||||
}
|
||||
|
@ -167,6 +178,7 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
|
|||
resErr = nil
|
||||
resCh <- results
|
||||
resCh = nil
|
||||
returned = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -178,7 +190,15 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
|
|||
}
|
||||
}()
|
||||
return func() (metaCacheEntriesSorted, error) {
|
||||
return <-resultsDone, resErr
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
mu.Lock()
|
||||
done = true
|
||||
mu.Unlock()
|
||||
return metaCacheEntriesSorted{}, ctx.Err()
|
||||
case r := <-resultsDone:
|
||||
return r, resErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -648,9 +668,8 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
|
|||
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)))
|
||||
logger.LogIf(ctx, err)
|
||||
custom := b.headerKV()
|
||||
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
|
||||
_, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
|
||||
UserDefined: custom,
|
||||
NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
|
||||
})
|
||||
if err != nil {
|
||||
mc.setErr(err.Error())
|
||||
|
@ -677,6 +696,8 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
|
|||
switch err.(type) {
|
||||
case ObjectNotFound:
|
||||
return err
|
||||
case StorageErr:
|
||||
return err
|
||||
case InsufficientReadQuorum:
|
||||
default:
|
||||
logger.LogIf(ctx, err)
|
||||
|
|
|
@ -56,10 +56,11 @@ const metacacheStreamVersion = 2
|
|||
|
||||
// metacacheWriter provides a serializer of metacache objects.
|
||||
type metacacheWriter struct {
|
||||
mw *msgp.Writer
|
||||
creator func() error
|
||||
closer func() error
|
||||
blockSize int
|
||||
mw *msgp.Writer
|
||||
creator func() error
|
||||
closer func() error
|
||||
blockSize int
|
||||
reuseBlocks bool
|
||||
|
||||
streamErr error
|
||||
streamWg sync.WaitGroup
|
||||
|
@ -141,6 +142,9 @@ func (w *metacacheWriter) write(objs ...metaCacheEntry) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if w.reuseBlocks || o.reusable {
|
||||
metaDataPoolPut(o.metadata)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -354,10 +358,14 @@ func (r *metacacheReader) next() (metaCacheEntry, error) {
|
|||
r.err = err
|
||||
return m, err
|
||||
}
|
||||
m.metadata, err = r.mr.ReadBytes(nil)
|
||||
m.metadata, err = r.mr.ReadBytes(metaDataPoolGet())
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if len(m.metadata) == 0 && cap(m.metadata) >= metaDataReadDefault {
|
||||
metaDataPoolPut(m.metadata)
|
||||
m.metadata = nil
|
||||
}
|
||||
r.err = err
|
||||
return m, err
|
||||
}
|
||||
|
@ -510,13 +518,17 @@ func (r *metacacheReader) readN(n int, inclDeleted, inclDirs bool, prefix string
|
|||
r.mr.R.Skip(1)
|
||||
return metaCacheEntriesSorted{o: res}, io.EOF
|
||||
}
|
||||
if meta.metadata, err = r.mr.ReadBytes(nil); err != nil {
|
||||
if meta.metadata, err = r.mr.ReadBytes(metaDataPoolGet()); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
r.err = err
|
||||
return metaCacheEntriesSorted{o: res}, err
|
||||
}
|
||||
if len(meta.metadata) == 0 {
|
||||
metaDataPoolPut(meta.metadata)
|
||||
meta.metadata = nil
|
||||
}
|
||||
if !inclDirs && meta.isDir() {
|
||||
continue
|
||||
}
|
||||
|
@ -565,13 +577,17 @@ func (r *metacacheReader) readAll(ctx context.Context, dst chan<- metaCacheEntry
|
|||
r.err = err
|
||||
return err
|
||||
}
|
||||
if meta.metadata, err = r.mr.ReadBytes(nil); err != nil {
|
||||
if meta.metadata, err = r.mr.ReadBytes(metaDataPoolGet()); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
r.err = err
|
||||
return err
|
||||
}
|
||||
if len(meta.metadata) == 0 {
|
||||
metaDataPoolPut(meta.metadata)
|
||||
meta.metadata = nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.err = ctx.Err()
|
||||
|
|
|
@ -76,6 +76,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
|
||||
// Use a small block size to start sending quickly
|
||||
w := newMetacacheWriter(wr, 16<<10)
|
||||
w.reuseBlocks = true // We are not sharing results, so reuse buffers.
|
||||
defer w.Close()
|
||||
out, err := w.stream()
|
||||
if err != nil {
|
||||
|
@ -120,7 +121,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
s.walkMu.Lock()
|
||||
entries, err := s.ListDir(ctx, opts.Bucket, current, -1)
|
||||
s.walkMu.Unlock()
|
||||
if err != nil {
|
||||
// Folder could have gone away in-between
|
||||
if err != errVolumeNotFound && err != errFileNotFound {
|
||||
|
@ -138,9 +141,15 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
dirObjects := make(map[string]struct{})
|
||||
for i, entry := range entries {
|
||||
if len(prefix) > 0 && !strings.HasPrefix(entry, prefix) {
|
||||
// Do do not retain the file, since it doesn't
|
||||
// match the prefix.
|
||||
entries[i] = ""
|
||||
continue
|
||||
}
|
||||
if len(forward) > 0 && entry < forward {
|
||||
// Do do not retain the file, since its
|
||||
// lexially smaller than 'forward'
|
||||
entries[i] = ""
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(entry, slashSeparator) {
|
||||
|
@ -164,7 +173,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
// If root was an object return it as such.
|
||||
if HasSuffix(entry, xlStorageFormatFile) {
|
||||
var meta metaCacheEntry
|
||||
s.walkMu.Lock()
|
||||
meta.metadata, err = s.readMetadata(pathJoin(volumeDir, current, entry))
|
||||
s.walkMu.Unlock()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
|
@ -179,7 +190,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
// Check legacy.
|
||||
if HasSuffix(entry, xlStorageFormatFileV1) {
|
||||
var meta metaCacheEntry
|
||||
s.walkMu.Lock()
|
||||
meta.metadata, err = xioutil.ReadFile(pathJoin(volumeDir, current, entry))
|
||||
s.walkMu.Unlock()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
|
@ -196,7 +209,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
// Process in sort order.
|
||||
sort.Strings(entries)
|
||||
dirStack := make([]string, 0, 5)
|
||||
prefix = "" // Remove prefix after first level.
|
||||
prefix = "" // Remove prefix after first level as we have already filtered the list.
|
||||
if len(forward) > 0 {
|
||||
idx := sort.SearchStrings(entries, forward)
|
||||
if idx > 0 {
|
||||
|
@ -235,7 +248,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
meta.name = meta.name[:len(meta.name)-1] + globalDirSuffixWithSlash
|
||||
}
|
||||
|
||||
s.walkMu.Lock()
|
||||
meta.metadata, err = s.readMetadata(pathJoin(volumeDir, meta.name, xlStorageFormatFile))
|
||||
s.walkMu.Unlock()
|
||||
switch {
|
||||
case err == nil:
|
||||
// It was an object
|
||||
|
@ -243,8 +258,10 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
meta.name = strings.TrimSuffix(meta.name, globalDirSuffixWithSlash) + slashSeparator
|
||||
}
|
||||
out <- meta
|
||||
case osIsNotExist(err):
|
||||
case osIsNotExist(err), isSysErrIsDir(err):
|
||||
s.walkMu.Lock()
|
||||
meta.metadata, err = xioutil.ReadFile(pathJoin(volumeDir, meta.name, xlStorageFormatFileV1))
|
||||
s.walkMu.Unlock()
|
||||
if err == nil {
|
||||
// It was an object
|
||||
out <- meta
|
||||
|
@ -324,7 +341,7 @@ func (s *storageRESTServer) WalkDirHandler(w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
|
||||
var reportNotFound bool
|
||||
if v := vars[storageRESTReportNotFound]; v != "" {
|
||||
if v := r.Form.Get(storageRESTReportNotFound); v != "" {
|
||||
reportNotFound, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
|
@ -332,8 +349,8 @@ func (s *storageRESTServer) WalkDirHandler(w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
}
|
||||
|
||||
prefix := r.URL.Query().Get(storageRESTPrefixFilter)
|
||||
forward := r.URL.Query().Get(storageRESTForwardFilter)
|
||||
prefix := r.Form.Get(storageRESTPrefixFilter)
|
||||
forward := r.Form.Get(storageRESTForwardFilter)
|
||||
writer := streamHTTPResponse(w)
|
||||
writer.CloseWithError(s.storage.WalkDir(r.Context(), WalkDirOptions{
|
||||
Bucket: volume,
|
||||
|
|
|
@ -149,5 +149,5 @@ func (m *metacache) delete(ctx context.Context) {
|
|||
logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be *erasureServerPools"))
|
||||
return
|
||||
}
|
||||
ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id))
|
||||
ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id))
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ const (
|
|||
softwareSubsystem MetricSubsystem = "software"
|
||||
sysCallSubsystem MetricSubsystem = "syscall"
|
||||
usageSubsystem MetricSubsystem = "usage"
|
||||
ilmSubsystem MetricSubsystem = "ilm"
|
||||
)
|
||||
|
||||
// MetricName are the individual names for the metric.
|
||||
|
@ -121,6 +122,10 @@ const (
|
|||
upTime = "uptime_seconds"
|
||||
memory = "resident_memory_bytes"
|
||||
cpu = "cpu_total_seconds"
|
||||
|
||||
expiryPendingTasks MetricName = "expiry_pending_tasks"
|
||||
transitionPendingTasks MetricName = "transition_pending_tasks"
|
||||
transitionActiveTasks MetricName = "transition_active_tasks"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -249,6 +254,7 @@ func GetGeneratorsForPeer() []MetricsGenerator {
|
|||
getMinioVersionMetrics,
|
||||
getNetworkMetrics,
|
||||
getS3TTFBMetric,
|
||||
getILMNodeMetrics,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
@ -1000,6 +1006,66 @@ func getS3TTFBMetric() MetricsGroup {
|
|||
}
|
||||
}
|
||||
|
||||
func getTransitionPendingTasksMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: ilmSubsystem,
|
||||
Name: transitionPendingTasks,
|
||||
Help: "Number of pending ILM transition tasks in the queue.",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func getTransitionActiveTasksMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: ilmSubsystem,
|
||||
Name: transitionActiveTasks,
|
||||
Help: "Number of active ILM transition tasks.",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func getExpiryPendingTasksMD() MetricDescription {
|
||||
return MetricDescription{
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: ilmSubsystem,
|
||||
Name: expiryPendingTasks,
|
||||
Help: "Number of pending ILM expiry tasks in the queue.",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func getILMNodeMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
id: "ILMNodeMetrics",
|
||||
cachedRead: cachedRead,
|
||||
read: func(_ context.Context) []Metric {
|
||||
expPendingTasks := Metric{
|
||||
Description: getExpiryPendingTasksMD(),
|
||||
}
|
||||
trPendingTasks := Metric{
|
||||
Description: getTransitionPendingTasksMD(),
|
||||
}
|
||||
trActiveTasks := Metric{
|
||||
Description: getTransitionActiveTasksMD(),
|
||||
}
|
||||
if globalExpiryState != nil {
|
||||
expPendingTasks.Value = float64(globalExpiryState.PendingTasks())
|
||||
}
|
||||
if globalTransitionState != nil {
|
||||
trPendingTasks.Value = float64(globalTransitionState.PendingTasks())
|
||||
trActiveTasks.Value = float64(globalTransitionState.ActiveTasks())
|
||||
}
|
||||
return []Metric{
|
||||
expPendingTasks,
|
||||
trPendingTasks,
|
||||
trActiveTasks,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinioVersionMetrics() MetricsGroup {
|
||||
return MetricsGroup{
|
||||
id: "MinioVersionMetrics",
|
||||
|
|
21
cmd/mrf.go
21
cmd/mrf.go
|
@ -27,8 +27,6 @@ import (
|
|||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
var mrfHealingOpts = madmin.HealOpts{ScanMode: madmin.HealNormalScan, Remove: healDeleteDangling}
|
||||
|
||||
const (
|
||||
mrfInfoResetInterval = 10 * time.Second
|
||||
mrfOpsQueueSize = 10000
|
||||
|
@ -185,6 +183,11 @@ func (m *mrfState) healRoutine() {
|
|||
idler := time.NewTimer(mrfInfoResetInterval)
|
||||
defer idler.Stop()
|
||||
|
||||
var mrfHealingOpts = madmin.HealOpts{
|
||||
ScanMode: globalHealConfig.ScanMode(),
|
||||
Remove: healDeleteDangling,
|
||||
}
|
||||
|
||||
for {
|
||||
idler.Reset(mrfInfoResetInterval)
|
||||
select {
|
||||
|
@ -214,17 +217,9 @@ func (m *mrfState) healRoutine() {
|
|||
|
||||
// Heal objects
|
||||
for _, u := range mrfOperations {
|
||||
waitForLowHTTPReq(globalHealConfig.IOCount, globalHealConfig.Sleep)
|
||||
if _, err := m.objectAPI.HealObject(m.ctx, u.bucket, u.object, u.versionID, mrfHealingOpts); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
// If not deleted, assume they failed.
|
||||
logger.LogIf(m.ctx, err)
|
||||
} else {
|
||||
m.mu.Lock()
|
||||
m.itemsHealed++
|
||||
m.pendingItems--
|
||||
m.mu.Unlock()
|
||||
}
|
||||
// If not deleted, assume they failed.
|
||||
logger.LogIf(m.ctx, err)
|
||||
} else {
|
||||
m.mu.Lock()
|
||||
m.itemsHealed++
|
||||
|
@ -238,6 +233,8 @@ func (m *mrfState) healRoutine() {
|
|||
delete(m.pendingOps, u)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
waitForLowHTTPReq()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
31
cmd/net.go
31
cmd/net.go
|
@ -22,6 +22,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -46,20 +47,30 @@ func mustSplitHostPort(hostPort string) (host, port string) {
|
|||
// mustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error.
|
||||
func mustGetLocalIP4() (ipList set.StringSet) {
|
||||
ipList = set.NewStringSet()
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
ifs, err := net.Interfaces()
|
||||
logger.FatalIf(err, "Unable to get IP addresses of this host")
|
||||
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
for _, interf := range ifs {
|
||||
addrs, err := interf.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if runtime.GOOS == "windows" && interf.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if ip.To4() != nil {
|
||||
ipList.Add(ip.String())
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
|
||||
if ip.To4() != nil {
|
||||
ipList.Add(ip.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -995,12 +995,7 @@ func (sys *NotificationSys) GetCPUs(ctx context.Context) []madmin.CPUs {
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
|
@ -1025,12 +1020,7 @@ func (sys *NotificationSys) GetPartitions(ctx context.Context) []madmin.Partitio
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
|
@ -1055,17 +1045,73 @@ func (sys *NotificationSys) GetOSInfo(ctx context.Context) []madmin.OSInfo {
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
// GetSysConfig - Get information about system config
|
||||
// (only the config that are of concern to minio)
|
||||
func (sys *NotificationSys) GetSysConfig(ctx context.Context) []madmin.SysConfig {
|
||||
reply := make([]madmin.SysConfig, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetSysConfig(ctx)
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
// GetSysServices - Get information about system services
|
||||
// (only the services that are of concern to minio)
|
||||
func (sys *NotificationSys) GetSysServices(ctx context.Context) []madmin.SysServices {
|
||||
reply := make([]madmin.SysServices, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetSELinuxInfo(ctx)
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *peerRESTClient, err error) {
|
||||
addr := peerClient.host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
nodeInfo.SetAddr(addr)
|
||||
nodeInfo.SetError(err.Error())
|
||||
}
|
||||
|
||||
// GetSysErrors - Memory information
|
||||
func (sys *NotificationSys) GetSysErrors(ctx context.Context) []madmin.SysErrors {
|
||||
reply := make([]madmin.SysErrors, len(sys.peerClients))
|
||||
|
@ -1085,12 +1131,7 @@ func (sys *NotificationSys) GetSysErrors(ctx context.Context) []madmin.SysErrors
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
|
@ -1115,12 +1156,7 @@ func (sys *NotificationSys) GetMemInfo(ctx context.Context) []madmin.MemInfo {
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
|
@ -1145,12 +1181,7 @@ func (sys *NotificationSys) GetProcInfo(ctx context.Context) []madmin.ProcInfo {
|
|||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
sys.addNodeErr(&reply[index], sys.peerClients[index], err)
|
||||
}
|
||||
}
|
||||
return reply
|
||||
|
@ -1286,8 +1317,13 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
|||
uniqueID := fmt.Sprintf("%X", eventTime.UnixNano())
|
||||
|
||||
respElements := map[string]string{
|
||||
"x-amz-request-id": args.RespElements["requestId"],
|
||||
"x-minio-origin-endpoint": globalMinioEndpoint, // MinIO specific custom elements.
|
||||
"x-amz-request-id": args.RespElements["requestId"],
|
||||
"x-minio-origin-endpoint": func() string {
|
||||
if globalMinioEndpoint != "" {
|
||||
return globalMinioEndpoint
|
||||
}
|
||||
return getAPIEndpoints()[0]
|
||||
}(), // MinIO specific custom elements.
|
||||
}
|
||||
// Add deployment as part of
|
||||
if globalDeploymentID != "" {
|
||||
|
@ -1333,7 +1369,13 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
|||
newEvent.S3.Object.ETag = args.Object.ETag
|
||||
newEvent.S3.Object.Size = args.Object.Size
|
||||
newEvent.S3.Object.ContentType = args.Object.ContentType
|
||||
newEvent.S3.Object.UserMetadata = args.Object.UserDefined
|
||||
newEvent.S3.Object.UserMetadata = make(map[string]string, len(args.Object.UserDefined))
|
||||
for k, v := range args.Object.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
newEvent.S3.Object.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return newEvent
|
||||
|
@ -1411,6 +1453,9 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...
|
|||
|
||||
// GetClusterMetrics - gets the cluster metrics from all nodes excluding self.
|
||||
func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) chan Metric {
|
||||
if sys == nil {
|
||||
return nil
|
||||
}
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
peerChannels := make([]<-chan Metric, len(sys.peerClients))
|
||||
for index := range sys.peerClients {
|
||||
|
|
|
@ -113,16 +113,8 @@ type ObjectInfo struct {
|
|||
// to a delete marker on an object.
|
||||
DeleteMarker bool
|
||||
|
||||
// tierFreeVersion is true if this is a free-version
|
||||
tierFreeVersion bool
|
||||
// TransitionStatus indicates if transition is complete/pending
|
||||
TransitionStatus string
|
||||
// Name of transitioned object on remote tier
|
||||
transitionedObjName string
|
||||
// VersionID on the the remote tier
|
||||
transitionVersionID string
|
||||
// Name of remote tier object has transitioned to
|
||||
TransitionTier string
|
||||
// Transitioned object information
|
||||
TransitionedObject TransitionedObject
|
||||
|
||||
// RestoreExpires indicates date a restored object expires
|
||||
RestoreExpires time.Time
|
||||
|
@ -200,7 +192,7 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) {
|
|||
VersionID: o.VersionID,
|
||||
IsLatest: o.IsLatest,
|
||||
DeleteMarker: o.DeleteMarker,
|
||||
TransitionStatus: o.TransitionStatus,
|
||||
TransitionedObject: o.TransitionedObject,
|
||||
RestoreExpires: o.RestoreExpires,
|
||||
RestoreOngoing: o.RestoreOngoing,
|
||||
ContentType: o.ContentType,
|
||||
|
@ -354,6 +346,15 @@ type ListMultipartsInfo struct {
|
|||
EncodingType string // Not supported yet.
|
||||
}
|
||||
|
||||
// TransitionedObject transitioned object tier and status.
|
||||
type TransitionedObject struct {
|
||||
Name string
|
||||
VersionID string
|
||||
Tier string
|
||||
FreeVersion bool
|
||||
Status string
|
||||
}
|
||||
|
||||
// DeletedObjectInfo - container for list objects versions deleted objects.
|
||||
type DeletedObjectInfo struct {
|
||||
// Name of the bucket.
|
||||
|
|
|
@ -686,3 +686,9 @@ func isErrPreconditionFailed(err error) bool {
|
|||
_, ok := err.(PreConditionFailed)
|
||||
return ok
|
||||
}
|
||||
|
||||
// isErrMethodNotAllowed - Check if error type is MethodNotAllowed.
|
||||
func isErrMethodNotAllowed(err error) bool {
|
||||
var methodNotAllowed MethodNotAllowed
|
||||
return errors.As(err, &methodNotAllowed)
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ type ObjectOptions struct {
|
|||
DeleteMarkerReplicationStatus string // Is only set in DELETE operations
|
||||
VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted.
|
||||
Transition TransitionOptions
|
||||
Expiration ExpirationOptions
|
||||
|
||||
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
|
||||
ProxyRequest bool // only set for GET/HEAD in active-active replication scenario
|
||||
|
@ -63,6 +64,11 @@ type ObjectOptions struct {
|
|||
MaxParity bool
|
||||
}
|
||||
|
||||
// ExpirationOptions represents object options for object expiration at objectLayer.
|
||||
type ExpirationOptions struct {
|
||||
Expire bool
|
||||
}
|
||||
|
||||
// TransitionOptions represents object options for transition ObjectLayer operation
|
||||
type TransitionOptions struct {
|
||||
Status string
|
||||
|
@ -103,7 +109,7 @@ type ObjectLayer interface {
|
|||
|
||||
// Storage operations.
|
||||
Shutdown(context.Context) error
|
||||
NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo) error
|
||||
NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- madmin.DataUsageInfo, wantCycle uint32) error
|
||||
|
||||
BackendInfo() madmin.BackendInfo
|
||||
StorageInfo(ctx context.Context) (StorageInfo, []error)
|
||||
|
|
|
@ -84,6 +84,11 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
{testBuckets[4], "file1/guidSplunk-aaaa/file", "content", nil},
|
||||
{testBuckets[5], "dir/day_id=2017-10-10/issue", "content", nil},
|
||||
{testBuckets[5], "dir/day_id=2017-10-11/issue", "content", nil},
|
||||
{testBuckets[5], "foo/201910/1122", "content", nil},
|
||||
{testBuckets[5], "foo/201910/1112", "content", nil},
|
||||
{testBuckets[5], "foo/201910/2112", "content", nil},
|
||||
{testBuckets[5], "foo/201910_txt", "content", nil},
|
||||
{testBuckets[5], "201910/foo/bar/xl.meta/1.txt", "content", nil},
|
||||
}
|
||||
for _, object := range testObjects {
|
||||
md5Bytes := md5.Sum([]byte(object.content))
|
||||
|
@ -477,6 +482,31 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
IsTruncated: true,
|
||||
Prefixes: []string{"dir/day_id=2017-10-10/"},
|
||||
},
|
||||
// ListObjectsResult-37 list with prefix match 2 levels deep
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "foo/201910/1112"},
|
||||
{Name: "foo/201910/1122"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-38 list with prefix match 1 level deep
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "foo/201910/1112"},
|
||||
{Name: "foo/201910/1122"},
|
||||
{Name: "foo/201910/2112"},
|
||||
{Name: "foo/201910_txt"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-39 list with prefix match 1 level deep
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "201910/foo/bar/xl.meta/1.txt"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -602,6 +632,11 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
{testBuckets[4], "file1/", "", "guidSplunk", 1000, resultCases[35], nil, true},
|
||||
// Test listing at prefix with expected prefix markers
|
||||
{testBuckets[5], "dir/", "", SlashSeparator, 1, resultCases[36], nil, true},
|
||||
// Test listing with prefix match
|
||||
{testBuckets[5], "foo/201910/11", "", "", 1000, resultCases[37], nil, true},
|
||||
{testBuckets[5], "foo/201910", "", "", 1000, resultCases[38], nil, true},
|
||||
// Test listing with prefix match with 'xl.meta'
|
||||
{testBuckets[5], "201910/foo/bar", "", "", 1000, resultCases[39], nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
|
|
@ -83,7 +83,7 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
|
|||
|
||||
var partNumber int
|
||||
var err error
|
||||
if pn := r.URL.Query().Get(xhttp.PartNumber); pn != "" {
|
||||
if pn := r.Form.Get(xhttp.PartNumber); pn != "" {
|
||||
partNumber, err = strconv.Atoi(pn)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
|
@ -93,7 +93,7 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
|
|||
}
|
||||
}
|
||||
|
||||
vid := strings.TrimSpace(r.URL.Query().Get(xhttp.VersionID))
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
|
@ -219,7 +219,8 @@ func delOpts(ctx context.Context, r *http.Request, bucket, object string) (opts
|
|||
// get ObjectOptions for PUT calls from encryption headers and metadata
|
||||
func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
vid := strings.TrimSpace(r.URL.Query().Get(xhttp.VersionID))
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
vid := strings.TrimSpace(r.Form.Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
|
@ -266,6 +267,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
|||
UserDefined: metadata,
|
||||
VersionID: vid,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
MTime: mtime,
|
||||
}, nil
|
||||
}
|
||||
|
@ -273,6 +275,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
|||
opts, err = getOpts(ctx, r, bucket, object)
|
||||
opts.VersionID = vid
|
||||
opts.Versioned = versioned
|
||||
opts.VersionSuspended = versionSuspended
|
||||
opts.UserDefined = metadata
|
||||
return
|
||||
}
|
||||
|
@ -290,6 +293,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
|||
UserDefined: metadata,
|
||||
VersionID: vid,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
MTime: mtime,
|
||||
}, nil
|
||||
}
|
||||
|
@ -300,6 +304,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
|||
}
|
||||
opts.VersionID = vid
|
||||
opts.Versioned = versioned
|
||||
opts.VersionSuspended = versionSuspended
|
||||
opts.MTime = mtime
|
||||
return opts, nil
|
||||
}
|
||||
|
|
|
@ -884,6 +884,16 @@ func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]strin
|
|||
return newMeta
|
||||
}
|
||||
|
||||
// compressOpts are the options for writing compressed data.
|
||||
var compressOpts []s2.WriterOption
|
||||
|
||||
func init() {
|
||||
if runtime.GOARCH == "amd64" {
|
||||
// On amd64 we have assembly and can use stronger compression.
|
||||
compressOpts = append(compressOpts, s2.WriterBetterCompression())
|
||||
}
|
||||
}
|
||||
|
||||
// newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
|
||||
// Use Close to ensure resources are released on incomplete streams.
|
||||
//
|
||||
|
@ -894,7 +904,7 @@ func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
|
|||
pr, pw := io.Pipe()
|
||||
// Copy input to compressor
|
||||
go func() {
|
||||
comp := s2.NewWriter(pw)
|
||||
comp := s2.NewWriter(pw, compressOpts...)
|
||||
cn, err := io.Copy(comp, r)
|
||||
if err != nil {
|
||||
comp.Close()
|
||||
|
|
|
@ -450,8 +450,16 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
||||
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
|
||||
action := evalActionFromLifecycle(ctx, *lc, objInfo, false)
|
||||
if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction {
|
||||
globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction)
|
||||
var success bool
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
// Restored object delete would be still allowed to proceed as success
|
||||
// since transition behavior is slightly different.
|
||||
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
|
||||
}
|
||||
if success {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
|
||||
return
|
||||
}
|
||||
|
@ -491,7 +499,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||
setPartsCountHeaders(w, objInfo)
|
||||
}
|
||||
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
statusCodeWritten := false
|
||||
httpWriter := ioutil.WriteOnClose(w)
|
||||
|
@ -656,8 +664,16 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
||||
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
|
||||
action := evalActionFromLifecycle(ctx, *lc, objInfo, false)
|
||||
if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction {
|
||||
globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction)
|
||||
var success bool
|
||||
switch action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||
success = applyExpiryRule(objInfo, false, action == lifecycle.DeleteVersionAction)
|
||||
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
|
||||
// Restored object delete would be still allowed to proceed as success
|
||||
// since transition behavior is slightly different.
|
||||
applyExpiryRule(objInfo, true, action == lifecycle.DeleteRestoredVersionAction)
|
||||
}
|
||||
if success {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
|
||||
return
|
||||
}
|
||||
|
@ -739,7 +755,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||
}
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
// Successful response.
|
||||
if rs != nil || opts.PartNumber > 0 {
|
||||
|
@ -806,7 +822,7 @@ func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta m
|
|||
// to the destination metadata.
|
||||
sc := r.Header.Get(xhttp.AmzStorageClass)
|
||||
if sc == "" {
|
||||
sc = r.URL.Query().Get(xhttp.AmzStorageClass)
|
||||
sc = r.Form.Get(xhttp.AmzStorageClass)
|
||||
}
|
||||
|
||||
// if x-amz-metadata-directive says REPLACE then
|
||||
|
@ -900,18 +916,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); ok {
|
||||
if globalIsGateway {
|
||||
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
@ -1307,7 +1314,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
if rs := r.Header.Get(xhttp.AmzBucketReplicationStatus); rs != "" {
|
||||
srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = rs
|
||||
}
|
||||
if ok, _ := mustReplicate(ctx, r, dstBucket, dstObject, getMustReplicateOptions(srcInfo, replication.UnsetReplicationType)); ok {
|
||||
if ok, _ := mustReplicate(ctx, dstBucket, dstObject, getMustReplicateOptions(srcInfo, replication.UnsetReplicationType)); ok {
|
||||
srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
}
|
||||
// Store the preserved compression metadata.
|
||||
|
@ -1381,12 +1388,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
objInfo.ModTime = remoteObjInfo.LastModified
|
||||
} else {
|
||||
|
||||
os := newObjSweeper(dstBucket, dstObject)
|
||||
os := newObjSweeper(dstBucket, dstObject).WithVersioning(dstOpts.Versioned, dstOpts.VersionSuspended)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
if !srcInfo.metadataOnly {
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1404,13 +1413,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
}
|
||||
|
||||
objInfo.ETag = getDecryptedETag(r.Header, objInfo, false)
|
||||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, getMustReplicateOptions(objInfo, replication.UnsetReplicationType)); replicate {
|
||||
if replicate, sync := mustReplicate(ctx, dstBucket, dstObject, getMustReplicateOptions(objInfo, replication.UnsetReplicationType)); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
|
@ -1455,18 +1467,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); ok {
|
||||
if globalIsGateway {
|
||||
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
@ -1654,7 +1657,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok, _ := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
if ok, _ := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: metadata,
|
||||
}, replication.ObjectReplicationType)); ok {
|
||||
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
|
@ -1702,11 +1705,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(metadata)
|
||||
|
||||
oc := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := oc.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
oc.SetTransitionState(goi)
|
||||
os := newObjSweeper(bucket, object).WithVersioning(opts.Versioned, opts.VersionSuspended)
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the object..
|
||||
|
@ -1747,15 +1752,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
}
|
||||
}
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
if replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: metadata,
|
||||
}, replication.ObjectReplicationType)); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, oc.Sweep())
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
@ -1770,6 +1772,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
}
|
||||
|
||||
// PutObjectExtractHandler - PUT Object extract is an extended API
|
||||
|
@ -1791,18 +1798,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); ok {
|
||||
if globalIsGateway {
|
||||
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
@ -1985,7 +1983,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if ok, _ := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
if ok, _ := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: metadata,
|
||||
}, replication.ObjectReplicationType)); ok {
|
||||
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
|
@ -2043,7 +2041,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
if replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: metadata,
|
||||
}, replication.ObjectReplicationType)); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
|
@ -2077,18 +2075,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); ok {
|
||||
if globalIsGateway {
|
||||
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
@ -2157,7 +2146,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok, _ := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
if ok, _ := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
|
||||
UserDefined: metadata,
|
||||
}, replication.ObjectReplicationType)); ok {
|
||||
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
|
@ -2264,8 +2253,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
uploadID := r.URL.Query().Get(xhttp.UploadID)
|
||||
partIDString := r.URL.Query().Get(xhttp.PartNumber)
|
||||
uploadID := r.Form.Get(xhttp.UploadID)
|
||||
partIDString := r.Form.Get(xhttp.PartNumber)
|
||||
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
|
@ -2528,18 +2517,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := crypto.IsRequested(r.Header); ok {
|
||||
if globalIsGateway {
|
||||
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
@ -2591,8 +2570,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
uploadID := r.URL.Query().Get(xhttp.UploadID)
|
||||
partIDString := r.URL.Query().Get(xhttp.PartNumber)
|
||||
uploadID := r.Form.Get(xhttp.UploadID)
|
||||
partIDString := r.Form.Get(xhttp.PartNumber)
|
||||
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
|
@ -2812,7 +2791,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
||||
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
|
@ -2851,7 +2830,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.URL.Query())
|
||||
uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.Form)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
|
@ -2997,7 +2976,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
}
|
||||
|
||||
// Get upload id.
|
||||
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
||||
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
|
@ -3106,11 +3085,15 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
os := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
os := newObjSweeper(bucket, object).WithVersioning(versioned, suspended)
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
@ -3160,13 +3143,10 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
}
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(objInfo, replication.ObjectReplicationType)); replicate {
|
||||
if replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.ObjectReplicationType)); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
|
||||
|
@ -3180,6 +3160,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete objectAPIHandlers
|
||||
|
@ -3209,11 +3194,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
_, err := globalDNSConfig.Get(bucket)
|
||||
if err != nil && err != dns.ErrNotImplemented {
|
||||
|
@ -3232,16 +3212,20 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
gerr error
|
||||
)
|
||||
|
||||
var goiOpts ObjectOptions
|
||||
os := newObjSweeper(bucket, object).WithVersion(singleDelete(*r))
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
os := newObjSweeper(bucket, object).WithVersion(opts.VersionID).WithVersioning(opts.Versioned, opts.VersionSuspended)
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
goiOpts = os.GetOpts()
|
||||
goiOpts := os.GetOpts()
|
||||
goi, gerr = getObjectInfo(ctx, bucket, object, goiOpts)
|
||||
if gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
os.SetTransitionState(goi.TransitionedObject)
|
||||
}
|
||||
|
||||
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
|
@ -3354,8 +3338,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
if !globalTierConfigMgr.Empty() {
|
||||
os.Sweep()
|
||||
}
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldHandler - set legal hold configuration to object,
|
||||
|
@ -3425,7 +3410,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status))
|
||||
replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType))
|
||||
replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType))
|
||||
if replicate {
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
}
|
||||
|
@ -3548,7 +3533,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
cred, owner, claims, s3Err := validateSignature(getRequestAuthType(r), r)
|
||||
cred, owner, s3Err := validateSignature(getRequestAuthType(r), r)
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
|
@ -3588,7 +3573,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
objInfo, s3Err := enforceRetentionBypassForPut(ctx, r, bucket, object, getObjectInfo, objRetention, cred, owner, claims)
|
||||
objInfo, s3Err := enforceRetentionBypassForPut(ctx, r, bucket, object, getObjectInfo, objRetention, cred, owner)
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
|
@ -3604,7 +3589,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = ""
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = ""
|
||||
}
|
||||
replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType))
|
||||
replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType))
|
||||
if replicate {
|
||||
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
}
|
||||
|
@ -3805,7 +3790,7 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
|
|||
|
||||
oi := objInfo.Clone()
|
||||
oi.UserTags = tagsStr
|
||||
replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType))
|
||||
replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType))
|
||||
if replicate {
|
||||
opts.UserDefined = make(map[string]string)
|
||||
opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
|
@ -3880,7 +3865,7 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
replicate, sync := mustReplicate(ctx, r, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType))
|
||||
replicate, sync := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType))
|
||||
if replicate {
|
||||
opts.UserDefined = make(map[string]string)
|
||||
opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
||||
|
@ -3952,13 +3937,14 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if objInfo.TransitionStatus != lifecycle.TransitionComplete {
|
||||
if objInfo.TransitionedObject.Status != lifecycle.TransitionComplete {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
|
@ -557,12 +558,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
|
||||
if actualError.BucketName != testCase.bucketName {
|
||||
t.Fatalf("Test %d: %s: Unexpected bucket name, expected %s, got %s", i+1, instanceType, testCase.bucketName, actualError.BucketName)
|
||||
}
|
||||
|
||||
if actualError.Key != testCase.objectName {
|
||||
t.Fatalf("Test %d: %s: Unexpected object name, expected %s, got %s", i+1, instanceType, testCase.objectName, actualError.Key)
|
||||
if path.Clean(actualError.Resource) != pathJoin(SlashSeparator, testCase.bucketName, testCase.objectName) {
|
||||
t.Fatalf("Test %d: %s: Unexpected resource, expected %s, got %s", i+1, instanceType, pathJoin(SlashSeparator, testCase.bucketName, testCase.objectName), actualError.Resource)
|
||||
}
|
||||
|
||||
// Verify response of the V2 signed HTTP request.
|
||||
|
@ -606,12 +603,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
|
||||
if actualError.BucketName != testCase.bucketName {
|
||||
t.Fatalf("Test %d: %s: Unexpected bucket name, expected %s, got %s", i+1, instanceType, testCase.bucketName, actualError.BucketName)
|
||||
}
|
||||
|
||||
if actualError.Key != testCase.objectName {
|
||||
t.Fatalf("Test %d: %s: Unexpected object name, expected %s, got %s", i+1, instanceType, testCase.objectName, actualError.Key)
|
||||
if path.Clean(actualError.Resource) != pathJoin(SlashSeparator, testCase.bucketName, testCase.objectName) {
|
||||
t.Fatalf("Test %d: %s: Unexpected resource, expected %s, got %s", i+1, instanceType, pathJoin(SlashSeparator, testCase.bucketName, testCase.objectName), actualError.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -918,7 +911,6 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket
|
|||
|
||||
mkGetReqWithPartNumber := func(oindex int, oi ObjectInput, partNumber int) {
|
||||
object := oi.objectName
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
queries := url.Values{}
|
||||
queries.Add("partNumber", strconv.Itoa(partNumber))
|
||||
|
@ -930,6 +922,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket
|
|||
object, oindex, partNumber, err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
|
||||
// Check response code (we make only valid requests in this test)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build freebsd || openbsd || netbsd
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build (linux || darwin) && !appengine
|
||||
// +build linux darwin
|
||||
// +build !appengine
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build darwin || freebsd || openbsd || netbsd
|
||||
// +build darwin freebsd openbsd netbsd
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux && !appengine
|
||||
// +build linux,!appengine
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build plan9 || solaris
|
||||
// +build plan9 solaris
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
@ -95,7 +96,7 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er
|
|||
|
||||
maxEntries := 1000
|
||||
if opts.count > 0 && opts.count < maxEntries {
|
||||
maxEntries = count
|
||||
maxEntries = opts.count
|
||||
}
|
||||
|
||||
done := false
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build (linux && !appengine) || darwin || freebsd || netbsd || openbsd
|
||||
// +build linux,!appengine darwin freebsd netbsd openbsd
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
|
|
|
@ -400,7 +400,29 @@ func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo
|
|||
return info, err
|
||||
}
|
||||
|
||||
// GetSysErrors - fetch memory information for a remote node.
|
||||
// GetSELinuxInfo - fetch SELinux information for a remote node.
|
||||
func (client *peerRESTClient) GetSELinuxInfo(ctx context.Context) (info madmin.SysServices, err error) {
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodSysServices, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// GetSysConfig - fetch sys config for a remote node.
|
||||
func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.SysConfig, err error) {
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodSysConfig, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// GetSysErrors - fetch sys errors for a remote node.
|
||||
func (client *peerRESTClient) GetSysErrors(ctx context.Context) (info madmin.SysErrors, err error) {
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodSysErrors, nil, nil, -1)
|
||||
if err != nil {
|
||||
|
@ -992,9 +1014,18 @@ func (client *peerRESTClient) Speedtest(ctx context.Context, size, concurrent in
|
|||
return SpeedtestResult{}, err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
waitReader, err := waitForHTTPResponse(respBody)
|
||||
if err != nil {
|
||||
return SpeedtestResult{}, err
|
||||
}
|
||||
|
||||
dec := gob.NewDecoder(respBody)
|
||||
var result SpeedtestResult
|
||||
err = dec.Decode(&result)
|
||||
return result, err
|
||||
err = gob.NewDecoder(waitReader).Decode(&result)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if result.Error != "" {
|
||||
return result, errors.New(result.Error)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ const (
|
|||
peerRESTMethodMemInfo = "/meminfo"
|
||||
peerRESTMethodProcInfo = "/procinfo"
|
||||
peerRESTMethodSysErrors = "/syserrors"
|
||||
peerRESTMethodSysServices = "/sysservices"
|
||||
peerRESTMethodSysConfig = "/sysconfig"
|
||||
peerRESTMethodDispatchNetInfo = "/dispatchnetinfo"
|
||||
peerRESTMethodDeleteBucketMetadata = "/deletebucketmetadata"
|
||||
peerRESTMethodLoadBucketMetadata = "/loadbucketmetadata"
|
||||
|
|
|
@ -136,7 +136,7 @@ func (s *peerRESTServer) LoadPolicyMappingHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
_, isGroup := r.URL.Query()[peerRESTIsGroup]
|
||||
_, isGroup := r.Form[peerRESTIsGroup]
|
||||
if err := globalIAMSys.LoadPolicyMapping(objAPI, userOrGroup, isGroup); err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
|
@ -510,7 +510,43 @@ func (s *peerRESTServer) GetMemInfoHandler(w http.ResponseWriter, r *http.Reques
|
|||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// GetSysErrorsHandler - returns memory information.
|
||||
// GetSysConfigHandler - returns system config information.
|
||||
// (only the config that are of concern to minio)
|
||||
func (s *peerRESTServer) GetSysConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
fmt.Println("Invalid request")
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
info := madmin.GetSysConfig(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// GetSysServicesHandler - returns system services information.
|
||||
// (only the services that are of concern to minio)
|
||||
func (s *peerRESTServer) GetSysServicesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
fmt.Println("Invalid request")
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
info := madmin.GetSysServices(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// GetSysErrorsHandler - returns system level errors
|
||||
func (s *peerRESTServer) GetSysErrorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
|
@ -841,7 +877,7 @@ func (s *peerRESTServer) ListenHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
values := r.URL.Query()
|
||||
values := r.Form
|
||||
|
||||
var prefix string
|
||||
if len(values[peerRESTListenPrefix]) > 1 {
|
||||
|
@ -932,15 +968,13 @@ func (s *peerRESTServer) ListenHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func extractTraceOptsFromPeerRequest(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
|
||||
opts.S3 = r.Form.Get(peerRESTTraceS3) == "true"
|
||||
opts.OS = r.Form.Get(peerRESTTraceOS) == "true"
|
||||
opts.Storage = r.Form.Get(peerRESTTraceStorage) == "true"
|
||||
opts.Internal = r.Form.Get(peerRESTTraceInternal) == "true"
|
||||
opts.OnlyErrors = r.Form.Get(peerRESTTraceErr) == "true"
|
||||
|
||||
q := r.URL.Query()
|
||||
opts.OnlyErrors = q.Get(peerRESTTraceErr) == "true"
|
||||
opts.Storage = q.Get(peerRESTTraceStorage) == "true"
|
||||
opts.Internal = q.Get(peerRESTTraceInternal) == "true"
|
||||
opts.S3 = q.Get(peerRESTTraceS3) == "true"
|
||||
opts.OS = q.Get(peerRESTTraceOS) == "true"
|
||||
|
||||
if t := q.Get(peerRESTTraceThreshold); t != "" {
|
||||
if t := r.Form.Get(peerRESTTraceThreshold); t != "" {
|
||||
d, err := time.ParseDuration(t)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
|
@ -1078,7 +1112,8 @@ func (s *peerRESTServer) GetBandwidth(w http.ResponseWriter, r *http.Request) {
|
|||
s.writeErrorResponse(w, errors.New("invalid request"))
|
||||
return
|
||||
}
|
||||
bucketsString := r.URL.Query().Get("buckets")
|
||||
|
||||
bucketsString := r.Form.Get("buckets")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
|
@ -1123,6 +1158,7 @@ func (s *peerRESTServer) GetPeerMetrics(w http.ResponseWriter, r *http.Request)
|
|||
type SpeedtestResult struct {
|
||||
Uploads uint64
|
||||
Downloads uint64
|
||||
Error string
|
||||
}
|
||||
|
||||
// SpeedtestObject implements "random-read" object reader
|
||||
|
@ -1245,7 +1281,7 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura
|
|||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
return SpeedtestResult{objUploadCount, objDownloadCount}, nil
|
||||
return SpeedtestResult{Uploads: objUploadCount, Downloads: objDownloadCount}, nil
|
||||
}
|
||||
|
||||
func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -1259,9 +1295,9 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
|
||||
sizeStr := r.URL.Query().Get(peerRESTSize)
|
||||
durationStr := r.URL.Query().Get(peerRESTDuration)
|
||||
concurrentStr := r.URL.Query().Get(peerRESTConcurrent)
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
|
@ -1278,17 +1314,15 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
|
|||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
done := keepHTTPResponseAlive(w)
|
||||
|
||||
result, err := selfSpeedtest(r.Context(), size, concurrent, duration)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
result.Error = err.Error()
|
||||
}
|
||||
|
||||
enc := gob.NewEncoder(w)
|
||||
if err := enc.Encode(result); err != nil {
|
||||
s.writeErrorResponse(w, errors.New("Encoding report failed: "+err.Error()))
|
||||
return
|
||||
}
|
||||
done(nil)
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
|
@ -1302,6 +1336,8 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysErrors).HandlerFunc(httpTraceHdrs(server.GetSysErrorsHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysServices).HandlerFunc(httpTraceHdrs(server.GetSysServicesHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysConfig).HandlerFunc(httpTraceHdrs(server.GetSysConfigHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfo).HandlerFunc(httpTraceHdrs(server.GetOSInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDiskHwInfo).HandlerFunc(httpTraceHdrs(server.GetPartitionsHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPUInfo).HandlerFunc(httpTraceHdrs(server.GetCPUsHandler))
|
||||
|
|
|
@ -328,7 +328,13 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
|
|||
continue
|
||||
case errErasureReadQuorum:
|
||||
// no quorum available continue to wait for minimum number of servers.
|
||||
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n", len(endpoints)/2, getElapsedTime())
|
||||
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n",
|
||||
len(endpoints)/2, getElapsedTime())
|
||||
continue
|
||||
case errErasureWriteQuorum:
|
||||
// no quorum available continue to wait for minimum number of servers.
|
||||
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n",
|
||||
(len(endpoints)/2)+1, getElapsedTime())
|
||||
continue
|
||||
case errErasureV3ThisEmpty:
|
||||
// need to wait for this error to be healed, so continue.
|
||||
|
|
|
@ -196,7 +196,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
|
|||
return
|
||||
}
|
||||
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
httpWriter := ioutil.WriteOnClose(w)
|
||||
|
||||
|
@ -467,7 +467,7 @@ func (api objectAPIHandlers) headObjectInArchiveFileHandler(ctx context.Context,
|
|||
}
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
// Successful response.
|
||||
if rs != nil {
|
||||
|
|
|
@ -105,10 +105,18 @@ EXAMPLES:
|
|||
}
|
||||
|
||||
func serverCmdArgs(ctx *cli.Context) []string {
|
||||
v := env.Get(config.EnvArgs, "")
|
||||
v, _, _, err := env.LookupEnv(config.EnvArgs)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
|
||||
config.EnvArgs, os.Getenv(config.EnvArgs))
|
||||
}
|
||||
if v == "" {
|
||||
// Fall back to older ENV MINIO_ENDPOINTS
|
||||
v = env.Get(config.EnvEndpoints, "")
|
||||
// Fall back to older environment value MINIO_ENDPOINTS
|
||||
v, _, _, err = env.LookupEnv(config.EnvEndpoints)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to validate passed arguments in %s:%s",
|
||||
config.EnvEndpoints, os.Getenv(config.EnvEndpoints))
|
||||
}
|
||||
}
|
||||
if v == "" {
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
|
@ -530,7 +538,6 @@ func serverMain(ctx *cli.Context) {
|
|||
if globalIsErasure {
|
||||
initAutoHeal(GlobalContext, newObject)
|
||||
initHealMRF(GlobalContext, newObject)
|
||||
initBackgroundTransition(GlobalContext, newObject)
|
||||
}
|
||||
|
||||
initBackgroundExpiry(GlobalContext, newObject)
|
||||
|
@ -558,6 +565,7 @@ func serverMain(ctx *cli.Context) {
|
|||
|
||||
if globalIsErasure { // to be done after config init
|
||||
initBackgroundReplication(GlobalContext, newObject)
|
||||
initBackgroundTransition(GlobalContext, newObject)
|
||||
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
|
||||
|
@ -582,20 +590,17 @@ func serverMain(ctx *cli.Context) {
|
|||
}
|
||||
|
||||
if globalBrowserEnabled {
|
||||
consoleSrv, err := initConsoleServer()
|
||||
globalConsoleSrv, err = initConsoleServer()
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize console service")
|
||||
}
|
||||
|
||||
go func() {
|
||||
logger.FatalIf(consoleSrv.Serve(), "Unable to initialize console server")
|
||||
logger.FatalIf(globalConsoleSrv.Serve(), "Unable to initialize console server")
|
||||
}()
|
||||
|
||||
<-globalOSSignalCh
|
||||
consoleSrv.Shutdown()
|
||||
} else {
|
||||
<-globalOSSignalCh
|
||||
}
|
||||
|
||||
<-globalOSSignalCh
|
||||
}
|
||||
|
||||
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/sys"
|
||||
)
|
||||
|
||||
|
@ -41,6 +42,10 @@ func setMaxResources() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if maxLimit < 4096 {
|
||||
logger.Info("WARNING: maximum file descriptor limit %d is too low for production servers. At least 4096 is recommended. Fix with \"ulimit -n 4096\"", maxLimit)
|
||||
}
|
||||
|
||||
if err = sys.SetMaxOpenFileLimit(maxLimit, maxLimit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -66,6 +66,10 @@ func handleSignals() {
|
|||
logger.LogIf(context.Background(), oerr)
|
||||
}
|
||||
|
||||
if globalConsoleSrv != nil {
|
||||
logger.LogIf(context.Background(), globalConsoleSrv.Shutdown())
|
||||
}
|
||||
|
||||
return (err == nil && oerr == nil)
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue