Compare commits
57 Commits
master
...
RELEASE.20
Author | SHA1 | Date |
---|---|---|
Anis Elleuch | 1b0054a7cd | |
sinhaashish | c23f33faef | |
Harshavardhana | 317779ddf6 | |
Anis Elleuch | 8c9ed43a5e | |
Harshavardhana | a85fea13f6 | |
Harshavardhana | 0af84282eb | |
Anis Elleuch | 5aff310b80 | |
Anis Elleuch | 851f3c5f0c | |
Anis Elleuch | 5df7bbf9f9 | |
Harshavardhana | a6e8f4aa1c | |
Harshavardhana | 0a276a25cf | |
Harshavardhana | 83ed1f361b | |
Anis Elleuch | 83676a0bc2 | |
Harshavardhana | b1c731c448 | |
Harshavardhana | 38709c84b7 | |
Harshavardhana | 5258a68b45 | |
Harshavardhana | 963f3ee047 | |
Anis Elleuch | 89db553204 | |
Harshavardhana | 1c2f82938f | |
Harshavardhana | 51ad1d983d | |
Anis Elleuch | ed264449b1 | |
Harshavardhana | 0e1dce37ad | |
Andreas Auernhammer | 472d78604b | |
Harshavardhana | d04201e2a6 | |
Harshavardhana | d629ca0a47 | |
Harshavardhana | 65a5e2c6d3 | |
Harshavardhana | 65864bc76a | |
Harshavardhana | 4e6e8c47b5 | |
Harshavardhana | ab6f0c0831 | |
Harshavardhana | ef1ea96044 | |
Harshavardhana | 29e7058ebf | |
Harshavardhana | f864931ab4 | |
Harshavardhana | 96b1377863 | |
Harshavardhana | bff2f9c733 | |
Klaus Post | 5f41f6043d | |
Ritesh H Shukla | 21718705b8 | |
Harshavardhana | 53e0c16976 | |
Harshavardhana | fb78283c0a | |
Harshavardhana | f07c9c58e7 | |
Harshavardhana | bc89e47066 | |
Harshavardhana | 0615d85384 | |
Harshavardhana | 42157eb218 | |
Harshavardhana | fa1cd6dcce | |
Harshavardhana | 745a4b31ba | |
Harshavardhana | 5151c429e4 | |
Klaus Post | dc1a46e5d2 | |
Harshavardhana | 8724d49116 | |
Anis Elleuch | 006c69f716 | |
Harshavardhana | 28974fb5da | |
Harshavardhana | 123cfa7573 | |
Klaus Post | 2439d4fb3c | |
Harshavardhana | 6bd9057bb1 | |
Harshavardhana | 2d878b7081 | |
Harshavardhana | 0570c21671 | |
Klaus Post | 2c0a81bc91 | |
Klaus Post | b0698b4b98 | |
Harshavardhana | 7ec6214e6e |
4
CREDITS
4
CREDITS
|
@ -2884,8 +2884,8 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|||
|
||||
================================================================
|
||||
|
||||
github.com/dgrijalva/jwt-go
|
||||
https://github.com/dgrijalva/jwt-go
|
||||
github.com/golang-jwt/jwt
|
||||
https://github.com/golang-jwt/jwt
|
||||
----------------------------------------------------------------
|
||||
Copyright (c) 2012 Dave Grijalva
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.12
|
||||
FROM alpine:3.12.7
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.12
|
||||
FROM alpine:3.12.7
|
||||
|
||||
LABEL maintainer="MinIO Inc <dev@min.io>"
|
||||
|
||||
|
|
4
Makefile
4
Makefile
|
@ -71,6 +71,10 @@ build: checks
|
|||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
|
||||
hotfix: install
|
||||
|
||||
docker: checks
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
|
|
@ -44,10 +44,21 @@ func releaseTag(version string) string {
|
|||
relPrefix = prefix
|
||||
}
|
||||
|
||||
relSuffix := ""
|
||||
if hotfix := os.Getenv("MINIO_HOTFIX"); hotfix != "" {
|
||||
relSuffix = hotfix
|
||||
}
|
||||
|
||||
relTag := strings.Replace(version, " ", "-", -1)
|
||||
relTag = strings.Replace(relTag, ":", "-", -1)
|
||||
relTag = strings.Replace(relTag, ",", "", -1)
|
||||
return relPrefix + "." + relTag
|
||||
relTag = relPrefix + "." + relTag
|
||||
|
||||
if relSuffix != "" {
|
||||
relTag += "." + relSuffix
|
||||
}
|
||||
|
||||
return relTag
|
||||
}
|
||||
|
||||
// commitID returns the abbreviated commit-id hash of the last commit.
|
||||
|
@ -68,5 +79,12 @@ func commitID() string {
|
|||
}
|
||||
|
||||
func main() {
|
||||
fmt.Println(genLDFlags(time.Now().UTC().Format(time.RFC3339)))
|
||||
var version string
|
||||
if len(os.Args) > 1 {
|
||||
version = os.Args[1]
|
||||
} else {
|
||||
version = time.Now().UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
fmt.Println(genLDFlags(version))
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -41,6 +42,7 @@ import (
|
|||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/message/log"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
@ -59,6 +61,12 @@ const (
|
|||
mgmtClientToken = "clientToken"
|
||||
mgmtForceStart = "forceStart"
|
||||
mgmtForceStop = "forceStop"
|
||||
|
||||
healSetsUUID = "healSetsUUID"
|
||||
healSetsList = "healSetsList"
|
||||
healSetsPrefix = "healSetsPrefix"
|
||||
healSleepDuration = "healSleepDuration"
|
||||
healSleepMaxIO = "healSleepMaxIO"
|
||||
)
|
||||
|
||||
func updateServer(u *url.URL, sha256Sum []byte, lrTime time.Time, mode string) (us madmin.ServerUpdateStatus, err error) {
|
||||
|
@ -445,6 +453,45 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
|||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
}
|
||||
|
||||
// ForceUnlockHandler force unlocks requested resource
|
||||
func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ForceUnlock")
|
||||
|
||||
defer logger.AuditLog(w, r, "ForceUnlock", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ForceUnlockAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
z, ok := objectAPI.(*erasureServerSets)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
|
||||
var args dsync.LockArgs
|
||||
lockersMap := make(map[string]dsync.NetLocker)
|
||||
for _, path := range strings.Split(vars["paths"], ",") {
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
args.Resources = append(args.Resources, path)
|
||||
lockers, _ := z.serverSets[0].getHashedSet(path).getLockers()
|
||||
for _, locker := range lockers {
|
||||
if locker != nil {
|
||||
lockersMap[locker.String()] = locker
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, locker := range lockersMap {
|
||||
locker.ForceUnlock(ctx, args)
|
||||
}
|
||||
}
|
||||
|
||||
// StartProfilingResult contains the status of the starting
|
||||
// profiling action in a given server
|
||||
type StartProfilingResult struct {
|
||||
|
@ -635,6 +682,169 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
|||
return
|
||||
}
|
||||
|
||||
type healInitSetParams struct {
|
||||
taskUUID string
|
||||
setNumbers []int
|
||||
sleepDuration time.Duration
|
||||
sleepForIO int
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// CancelHealSetsHandler - POST /minio/admin/v3/cancel-heal-sets/
|
||||
func (a adminAPIHandlers) CancelHealSetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "CancelHeal")
|
||||
|
||||
defer logger.AuditLog(w, r, "CancelHeal", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this setup has an erasure coded backend.
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
z, ok := objectAPI.(*erasureServerSets)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !z.SingleZone() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
taskUUID := vars[healSetsUUID]
|
||||
if taskUUID == "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError{
|
||||
Code: "XMinioHealNoSuchProcess",
|
||||
Description: "No such heal process is running on the server",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
opts, ok := a.healSetsMap[taskUUID]
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, APIError{
|
||||
Code: "XMinioHealNoSuchProcess",
|
||||
Description: "No such heal process is running on the server",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
}, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
opts.cancel()
|
||||
delete(a.healSetsMap, opts.taskUUID)
|
||||
}
|
||||
|
||||
// HealSetsHandler - POST /minio/admin/v3/heal-sets/
|
||||
func (a adminAPIHandlers) HealSetsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealSets")
|
||||
|
||||
defer logger.AuditLog(w, r, "HealSets", mustGetClaimsFromToken(r))
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this setup has an erasure coded backend.
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
z, ok := objectAPI.(*erasureServerSets)
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !z.SingleZone() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
opts := healInitSetParams{
|
||||
taskUUID: mustGetUUID(),
|
||||
}
|
||||
for _, setIdx := range strings.Split(vars[healSetsList], ",") {
|
||||
if setIdx == "" {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("empty values not allowed")), r.URL)
|
||||
return
|
||||
}
|
||||
i, err := strconv.Atoi(setIdx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if i == 0 {
|
||||
i = 1
|
||||
}
|
||||
opts.setNumbers = append(opts.setNumbers, i-1)
|
||||
}
|
||||
|
||||
opts.sleepDuration = time.Second
|
||||
var err error
|
||||
if v := vars[healSleepDuration]; v != "" {
|
||||
opts.sleepDuration, err = time.ParseDuration(v)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
opts.sleepForIO = 10
|
||||
if v := vars[healSleepMaxIO]; v != "" {
|
||||
opts.sleepForIO, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
buckets, _ := objectAPI.ListBucketsHeal(ctx)
|
||||
ctx, opts.cancel = context.WithCancel(context.Background())
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, setNumber := range opts.setNumbers {
|
||||
wg.Add(1)
|
||||
go func(setNumber int) {
|
||||
defer wg.Done()
|
||||
lbDisks := z.serverSets[0].sets[setNumber].getDisks()
|
||||
if err := healErasureSet(ctx, vars[healSetsPrefix], setNumber, opts.sleepForIO, opts.sleepDuration, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}(setNumber)
|
||||
}
|
||||
wg.Wait()
|
||||
a.mu.Lock()
|
||||
opts.cancel()
|
||||
delete(a.healSetsMap, opts.taskUUID)
|
||||
a.mu.Unlock()
|
||||
if vars[healSetsPrefix] != "" {
|
||||
logger.Info("Healing finished for set '%v' at %s", vars[healSetsList], vars[healSetsPrefix])
|
||||
} else {
|
||||
logger.Info("Healing finished for set '%v'", vars[healSetsList])
|
||||
}
|
||||
}()
|
||||
|
||||
a.mu.Lock()
|
||||
a.healSetsMap[opts.taskUUID] = opts
|
||||
a.mu.Unlock()
|
||||
|
||||
writeSuccessResponseJSON(w, []byte(fmt.Sprintf(`"%s"`, opts.taskUUID)))
|
||||
}
|
||||
|
||||
// HealHandler - POST /minio/admin/v3/heal/
|
||||
// -----------
|
||||
// Start heal processing and return heal status items.
|
||||
|
@ -875,7 +1085,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
|
||||
aggregateHealStateResult, err := getAggregatedBackgroundHealState(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
@ -1284,8 +1494,8 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
|||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
defer cancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
nsLock := objectAPI.NewNSLock(minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -339,7 +340,11 @@ type healSource struct {
|
|||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
throttle struct {
|
||||
maxSleep time.Duration
|
||||
maxIO int
|
||||
}
|
||||
opts *madmin.HealOpts // optional heal option overrides default setting
|
||||
}
|
||||
|
||||
// healSequence - state for each heal sequence initiated on the
|
||||
|
@ -416,7 +421,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
|||
clientToken := mustGetUUID()
|
||||
|
||||
return &healSequence{
|
||||
respCh: make(chan healResult),
|
||||
respCh: make(chan healResult, runtime.GOMAXPROCS(0)),
|
||||
bucket: bucket,
|
||||
object: objPrefix,
|
||||
reportProgress: true,
|
||||
|
@ -656,19 +661,29 @@ func (h *healSequence) healSequenceStart() {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
|
||||
func (h *healSequence) queueHealTask(ctx context.Context, source healSource, healType madmin.HealItemType) error {
|
||||
// Send heal request
|
||||
task := healTask{
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
bucket: source.bucket,
|
||||
object: source.object,
|
||||
versionID: source.versionID,
|
||||
opts: h.settings,
|
||||
responseCh: h.respCh,
|
||||
sleepForIO: globalEndpoints.NEndpoints(),
|
||||
sleepDuration: time.Second,
|
||||
}
|
||||
if source.opts != nil {
|
||||
task.opts = *source.opts
|
||||
}
|
||||
|
||||
if source.throttle.maxIO > 0 {
|
||||
task.sleepForIO = source.throttle.maxIO
|
||||
}
|
||||
|
||||
if source.throttle.maxSleep > 0 {
|
||||
task.sleepDuration = source.throttle.maxSleep
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
|
@ -677,6 +692,8 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
|||
globalBackgroundHealRoutine.queueHealTask(task)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case res := <-h.respCh:
|
||||
if !h.reportProgress {
|
||||
// Object might have been deleted, by the time heal
|
||||
|
@ -746,12 +763,8 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
|||
itemType = madmin.HealItemObject
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(source, itemType); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectExistsAsDirectory:
|
||||
case ObjectNotFound:
|
||||
case VersionNotFound:
|
||||
default:
|
||||
if err := h.queueHealTask(context.Background(), source, itemType); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(h.ctx, fmt.Errorf("Heal attempt failed for %s: %w",
|
||||
pathJoin(source.bucket, source.object), err))
|
||||
}
|
||||
|
@ -821,7 +834,7 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
|
|||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(context.Background(), healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
|
@ -849,7 +862,7 @@ func (h *healSequence) healDiskFormat() error {
|
|||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
return h.queueHealTask(context.Background(), healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
|
@ -891,7 +904,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
|
|||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if err := h.queueHealTask(context.Background(), healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -941,7 +954,7 @@ func (h *healSequence) healObject(bucket, object, versionID string) error {
|
|||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
err := h.queueHealTask(healSource{
|
||||
err := h.queueHealTask(context.Background(), healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
|
@ -34,12 +35,19 @@ const (
|
|||
)
|
||||
|
||||
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
|
||||
type adminAPIHandlers struct{}
|
||||
type adminAPIHandlers struct {
|
||||
mu *sync.Mutex
|
||||
healSetsMap map[string]healInitSetParams
|
||||
}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
adminAPI := adminAPIHandlers{
|
||||
mu: &sync.Mutex{},
|
||||
healSetsMap: make(map[string]healInitSetParams),
|
||||
}
|
||||
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
|
@ -68,11 +76,20 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
|||
/// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceHdrs(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceHdrs(adminAPI.HealHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceHdrs(adminAPI.HealHandler))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/cancel-heal-sets").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.CancelHealSetsHandler)).
|
||||
Queries(healSetsUUID, "{healSetsUUID:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/heal-sets").
|
||||
HandlerFunc(httpTraceHdrs(adminAPI.HealSetsHandler)).
|
||||
Queries(healSetsList, "{healSetsList:.*}",
|
||||
healSetsPrefix, "{healSetsPrefix:.*}",
|
||||
healSleepMaxIO, "{healSleepMaxIO:.*}",
|
||||
healSleepDuration, "{healSleepDuration:.*}")
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceHdrs(adminAPI.BackgroundHealStatusHandler))
|
||||
|
||||
/// Health operations
|
||||
|
||||
|
@ -197,6 +214,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
|||
// Top locks
|
||||
if globalIsDistErasure {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/force-unlock").
|
||||
Queries("paths", "{paths:.*}").HandlerFunc(httpTraceHdrs(adminAPI.ForceUnlockHandler))
|
||||
}
|
||||
|
||||
// HTTP Trace
|
||||
|
|
|
@ -234,6 +234,7 @@ const (
|
|||
ErrServerNotInitialized
|
||||
ErrOperationTimedOut
|
||||
ErrOperationMaxedOut
|
||||
ErrClientDisconnected
|
||||
ErrInvalidRequest
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
|
@ -427,7 +428,7 @@ var errorCodes = errorCodeMap{
|
|||
},
|
||||
ErrInvalidMaxParts: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Argument max-parts must be an integer between 0 and 2147483647",
|
||||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidPartNumberMarker: {
|
||||
|
@ -1216,6 +1217,11 @@ var errorCodes = errorCodeMap{
|
|||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrClientDisconnected: {
|
||||
Code: "ClientDisconnected",
|
||||
Description: "Client disconnected before response was ready",
|
||||
HTTPStatusCode: 499, // No official code, use nginx value.
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
Code: "SlowDown",
|
||||
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate",
|
||||
|
@ -1742,6 +1748,16 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
return ErrNone
|
||||
}
|
||||
|
||||
// Only return ErrClientDisconnected if the provided context is actually canceled.
|
||||
// This way downstream context.Canceled will still report ErrOperationTimedOut
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.Canceled {
|
||||
return ErrClientDisconnected
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errInvalidArgument:
|
||||
apiErr = ErrAdminInvalidArgument
|
||||
|
@ -1966,6 +1982,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrKeyTooLongError
|
||||
case dns.ErrInvalidBucketName:
|
||||
apiErr = ErrInvalidBucketName
|
||||
case dns.ErrBucketConflict:
|
||||
apiErr = ErrBucketAlreadyExists
|
||||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
|
|
|
@ -19,9 +19,14 @@ package cmd
|
|||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config/api"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
@ -48,6 +53,7 @@ func newCachedObjectLayerFn() CacheObjectLayer {
|
|||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
CacheAPI func() CacheObjectLayer
|
||||
Throttler map[string]chan struct{}
|
||||
}
|
||||
|
||||
// getHost tries its best to return the request host.
|
||||
|
@ -60,12 +66,39 @@ func getHost(r *http.Request) string {
|
|||
return r.Host
|
||||
}
|
||||
|
||||
// api throttler constants
|
||||
const (
|
||||
listAPI = "LIST"
|
||||
|
||||
granularDeadline = 10 * time.Second
|
||||
)
|
||||
|
||||
func parseThrottler(throttle string) map[string]chan struct{} {
|
||||
th := make(map[string]chan struct{})
|
||||
for _, v := range strings.Split(throttle, ";") {
|
||||
vs := strings.SplitN(v, "=", 2)
|
||||
if len(vs) == 2 {
|
||||
l, err := strconv.Atoi(vs[1])
|
||||
if err == nil {
|
||||
if l >= len(globalEndpoints.Hostnames()) {
|
||||
l /= len(globalEndpoints.Hostnames())
|
||||
} else {
|
||||
l = 1
|
||||
}
|
||||
th[vs[0]] = make(chan struct{}, l)
|
||||
}
|
||||
}
|
||||
}
|
||||
return th
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(router *mux.Router) {
|
||||
// Initialize API.
|
||||
api := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
CacheAPI: newCachedObjectLayerFn,
|
||||
Throttler: parseThrottler(env.Get(api.EnvAPIRequestsGranularMax, "")),
|
||||
}
|
||||
|
||||
// API Router
|
||||
|
|
|
@ -184,12 +184,12 @@ func getSessionToken(r *http.Request) (token string) {
|
|||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
claims := xjwt.NewMapClaims()
|
||||
if token == "" {
|
||||
return claims.Map(), nil
|
||||
|
@ -236,7 +236,7 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
|
|||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(r.Context(), err, logger.Application)
|
||||
logger.LogIf(GlobalContext, err, logger.Application)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
|
||||
|
@ -257,7 +257,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
|||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
claims, err := getClaimsFromToken(r, token)
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"path"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
@ -29,10 +30,12 @@ import (
|
|||
// path: 'bucket/' or '/bucket/' => Heal bucket
|
||||
// path: 'bucket/object' => Heal object
|
||||
type healTask struct {
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
opts madmin.HealOpts
|
||||
bucket string
|
||||
object string
|
||||
versionID string
|
||||
sleepDuration time.Duration
|
||||
sleepForIO int
|
||||
opts madmin.HealOpts
|
||||
// Healing response will be sent here
|
||||
responseCh chan healResult
|
||||
}
|
||||
|
@ -54,20 +57,32 @@ func (h *healRoutine) queueHealTask(task healTask) {
|
|||
h.tasks <- task
|
||||
}
|
||||
|
||||
func waitForLowHTTPReq(tolerance int32, maxWait time.Duration) {
|
||||
const wait = 10 * time.Millisecond
|
||||
waitCount := maxWait / wait
|
||||
func waitForLowHTTPReq(maxIO int, maxWait time.Duration) {
|
||||
// No need to wait run at full speed.
|
||||
if maxIO <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
waitTick := 100 * time.Millisecond
|
||||
|
||||
// Bucket notification and http trace are not costly, it is okay to ignore them
|
||||
// while counting the number of concurrent connections
|
||||
tolerance += int32(globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers())
|
||||
maxIOFn := func() int {
|
||||
return maxIO + globalHTTPListen.NumSubscribers() + globalHTTPTrace.NumSubscribers()
|
||||
}
|
||||
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Any requests in progress, delay the heal.
|
||||
for (httpServer.GetRequestCount() >= tolerance) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(wait)
|
||||
for httpServer.GetRequestCount() >= int32(maxIOFn()) {
|
||||
if maxWait < waitTick {
|
||||
time.Sleep(maxWait)
|
||||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
maxWait = maxWait - waitTick
|
||||
if maxWait <= 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -82,7 +97,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
|||
}
|
||||
|
||||
// Wait and proceed if there are active requests
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
waitForLowHTTPReq(task.sleepForIO, task.sleepDuration)
|
||||
|
||||
var res madmin.HealResultItem
|
||||
var err error
|
||||
|
@ -111,7 +126,7 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
|
|||
|
||||
func newHealRoutine() *healRoutine {
|
||||
return &healRoutine{
|
||||
tasks: make(chan healTask),
|
||||
tasks: make(chan healTask, runtime.GOMAXPROCS(0)),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerSets, bgSeq *
|
|||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(defaultMonitorNewDiskInterval):
|
||||
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()), time.Second)
|
||||
waitForLowHTTPReq(globalEndpoints.NEndpoints(), time.Second)
|
||||
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
|
@ -168,7 +168,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerSets, bgSeq *
|
|||
logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1))
|
||||
|
||||
lbDisks := z.serverSets[i].sets[setIndex].getOnlineDisks()
|
||||
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
|
||||
if err := healErasureSet(ctx, "", setIndex, 100, time.Second, buckets, lbDisks); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
@ -24,7 +26,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -309,7 +310,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
// err will be nil here as we already called this function
|
||||
// earlier in this request.
|
||||
claims, _ := getClaimsFromToken(r, getSessionToken(r))
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
n := 0
|
||||
// Use the following trick to filter in place
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
|
@ -675,13 +676,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != filepath.Clean(resource[1:]) {
|
||||
|
||||
// Make sure that the URL does not contain object name.
|
||||
if bucket != path.Clean(resource[1:]) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -724,7 +727,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
defer fileBody.Close()
|
||||
|
||||
formValues.Set("Bucket", bucket)
|
||||
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
|
@ -744,12 +746,51 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
}
|
||||
|
||||
// Verify policy signature.
|
||||
errCode := doesPolicySignatureMatch(formValues)
|
||||
cred, errCode := doesPolicySignatureMatch(formValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Once signature is validated, check if the user has
|
||||
// explicit permissions for the user.
|
||||
{
|
||||
token := formValues.Get(xhttp.AmzSecurityToken)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if cred.IsServiceAccount() && token == "" {
|
||||
token = cred.SessionToken
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract claims if any.
|
||||
claims, err := getClaimsFromToken(token)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.PutObjectAction,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
BucketName: bucket,
|
||||
ObjectName: object,
|
||||
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
|
||||
Claims: claims,
|
||||
}) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -758,10 +799,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
|
||||
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
|
||||
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
|
||||
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -986,22 +1028,6 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
|
||||
if _, ok := err.(BucketNotEmpty); ok && (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
} else {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err))
|
||||
|
@ -1010,6 +1036,27 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
|
||||
apiErr := toAPIError(ctx, err)
|
||||
if _, ok := err.(BucketNotEmpty); ok {
|
||||
if globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket) {
|
||||
apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
|
||||
}
|
||||
}
|
||||
if globalDNSConfig != nil {
|
||||
if err2 := globalDNSConfig.Put(bucket); err2 != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, pl1ease fix it manually", err2))
|
||||
}
|
||||
}
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
||||
|
@ -1050,6 +1097,12 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := objectlock.ParseObjectLockConfig(r.Body)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
|
@ -1104,6 +1157,12 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -1141,6 +1200,12 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
tags, err := tags.ParseBucketXML(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
|
@ -1186,6 +1251,12 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -1223,6 +1294,12 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
@ -82,9 +83,27 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
|||
// of the versions of objects in a bucket.
|
||||
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectVersions")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
|
||||
|
||||
pool := api.Throttler[listAPI]
|
||||
if pool != nil {
|
||||
deadlineTimer := time.NewTimer(granularDeadline)
|
||||
defer deadlineTimer.Stop()
|
||||
|
||||
select {
|
||||
case pool <- struct{}{}:
|
||||
defer func() { <-pool }()
|
||||
case <-deadlineTimer.C:
|
||||
// Send a http timeout message
|
||||
writeErrorResponse(ctx, w,
|
||||
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -119,6 +138,7 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
|||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
@ -152,9 +172,27 @@ func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r
|
|||
// MinIO continues to support ListObjectsV1 and V2 for supporting legacy tools.
|
||||
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2M")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
|
||||
|
||||
pool := api.Throttler[listAPI]
|
||||
if pool != nil {
|
||||
deadlineTimer := time.NewTimer(granularDeadline)
|
||||
defer deadlineTimer.Stop()
|
||||
|
||||
select {
|
||||
case pool <- struct{}{}:
|
||||
defer func() { <-pool }()
|
||||
case <-deadlineTimer.C:
|
||||
// Send a http timeout message
|
||||
writeErrorResponse(ctx, w,
|
||||
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -229,9 +267,27 @@ func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *htt
|
|||
// MinIO continues to support ListObjectsV1 for supporting legacy tools.
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
|
||||
|
||||
pool := api.Throttler[listAPI]
|
||||
if pool != nil {
|
||||
deadlineTimer := time.NewTimer(granularDeadline)
|
||||
defer deadlineTimer.Stop()
|
||||
|
||||
select {
|
||||
case pool <- struct{}{}:
|
||||
defer func() { <-pool }()
|
||||
case <-deadlineTimer.C:
|
||||
// Send a http timeout message
|
||||
writeErrorResponse(ctx, w,
|
||||
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -358,9 +414,27 @@ func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *htt
|
|||
//
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV1")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
|
||||
|
||||
pool := api.Throttler[listAPI]
|
||||
if pool != nil {
|
||||
deadlineTimer := time.NewTimer(granularDeadline)
|
||||
defer deadlineTimer.Stop()
|
||||
|
||||
select {
|
||||
case pool <- struct{}{}:
|
||||
defer func() { <-pool }()
|
||||
case <-deadlineTimer.C:
|
||||
// Send a http timeout message
|
||||
writeErrorResponse(ctx, w,
|
||||
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
||||
r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -393,6 +467,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
|||
if forwardStr == "" {
|
||||
forwardStr = bucket
|
||||
}
|
||||
|
||||
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -271,7 +272,9 @@ func validateConfig(s config.Config, setDriveCount int) error {
|
|||
}
|
||||
}
|
||||
{
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -443,7 +446,9 @@ func lookupConfigs(s config.Config, setDriveCount int) {
|
|||
logger.LogIf(ctx, fmt.Errorf("Unable to read heal config: %w", err))
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
|
||||
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
}, defaultDialTimeout)())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ const (
|
|||
apiRemoteTransportDeadline = "remote_transport_deadline"
|
||||
|
||||
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
|
||||
EnvAPIRequestsGranularMax = "MINIO_API_REQUESTS_GRANULAR_MAX"
|
||||
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
|
||||
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
|
||||
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
|
||||
|
|
|
@ -35,6 +35,8 @@ const (
|
|||
EnvArgs = "MINIO_ARGS"
|
||||
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
|
||||
|
||||
EnvRootDiskThresholdSize = "MINIO_ROOTDISK_THRESHOLD_SIZE"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
EnvEndpoints = "MINIO_ENDPOINTS" // legacy
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
@ -89,17 +89,24 @@ func (c *OperatorDNS) Put(bucket string) error {
|
|||
if err = c.addAuthHeader(req); err != nil {
|
||||
return newError(bucket, err)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if derr := c.Delete(bucket); derr != nil {
|
||||
return newError(bucket, derr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
var errorStringBuilder strings.Builder
|
||||
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
|
||||
xhttp.DrainBody(resp.Body)
|
||||
defer xhttp.DrainBody(resp.Body)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errorStringBuilder strings.Builder
|
||||
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
|
||||
errorString := errorStringBuilder.String()
|
||||
switch resp.StatusCode {
|
||||
case http.StatusConflict:
|
||||
return ErrBucketConflict(Error{bucket, errors.New(errorString)})
|
||||
}
|
||||
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -26,12 +26,20 @@ type Error struct {
|
|||
type ErrInvalidBucketName Error
|
||||
|
||||
func (e ErrInvalidBucketName) Error() string {
|
||||
return "invalid bucket name error: " + e.Err.Error()
|
||||
return e.Bucket + " invalid bucket name error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return "dns related error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// ErrBucketConflict for buckets that already exist
|
||||
type ErrBucketConflict Error
|
||||
|
||||
func (e ErrBucketConflict) Error() string {
|
||||
return e.Bucket + " bucket conflict error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Store dns record store
|
||||
type Store interface {
|
||||
Put(bucket string) error
|
||||
|
|
|
@ -19,7 +19,7 @@ package openid
|
|||
import (
|
||||
"crypto"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
|
|
|
@ -19,7 +19,7 @@ package openid
|
|||
import (
|
||||
"crypto"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
|
|
|
@ -177,11 +177,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
|
|||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var context bytes.Buffer
|
||||
ctx.WriteTo(&context)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context.Bytes())
|
||||
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
|
||||
if err != nil {
|
||||
return key, nil, err
|
||||
}
|
||||
|
@ -200,11 +199,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
|||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var context bytes.Buffer
|
||||
ctx.WriteTo(&context)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
var plainKey []byte
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context.Bytes())
|
||||
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
@ -415,7 +413,7 @@ func (c *kesClient) postRetry(path string, body io.ReadSeeker, limit int64) (io.
|
|||
}
|
||||
|
||||
// If the error is not temp. / retryable => fail the request immediately.
|
||||
if !xnet.IsNetworkOrHostDown(err) &&
|
||||
if !xnet.IsNetworkOrHostDown(err, false) &&
|
||||
!errors.Is(err, io.EOF) &&
|
||||
!errors.Is(err, io.ErrUnexpectedEOF) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) {
|
||||
|
|
|
@ -103,7 +103,6 @@ func (key ObjectKey) Seal(extKey, iv [32]byte, domain, bucket, object string) Se
|
|||
func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucket, object string) error {
|
||||
var (
|
||||
unsealConfig sio.Config
|
||||
decryptedKey bytes.Buffer
|
||||
)
|
||||
switch sealedKey.Algorithm {
|
||||
default:
|
||||
|
@ -122,10 +121,9 @@ func (key *ObjectKey) Unseal(extKey [32]byte, sealedKey SealedKey, domain, bucke
|
|||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
|
||||
}
|
||||
|
||||
if n, err := sio.Decrypt(&decryptedKey, bytes.NewReader(sealedKey.Key[:]), unsealConfig); n != 32 || err != nil {
|
||||
if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil {
|
||||
return ErrSecretKeyMismatch
|
||||
}
|
||||
copy(key[:], decryptedKey.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -165,11 +163,7 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
|||
if !IsETagSealed(etag) {
|
||||
return etag, nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Decrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)})
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ type Context map[string]string
|
|||
//
|
||||
// WriteTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
||||
sortedKeys := make(sort.StringSlice, 0, len(c))
|
||||
for k := range c {
|
||||
|
@ -67,6 +69,53 @@ func (c Context) WriteTo(w io.Writer) (n int64, err error) {
|
|||
return n + int64(nn), err
|
||||
}
|
||||
|
||||
// AppendTo appends the context in a canonical from to dst.
|
||||
//
|
||||
// AppendTo sorts the context keys and writes the sorted
|
||||
// key-value pairs as canonical JSON object to w.
|
||||
//
|
||||
// Note that neither keys nor values are escaped for JSON.
|
||||
func (c Context) AppendTo(dst []byte) (output []byte) {
|
||||
if len(c) == 0 {
|
||||
return append(dst, '{', '}')
|
||||
}
|
||||
|
||||
// out should not escape.
|
||||
out := bytes.NewBuffer(dst)
|
||||
|
||||
// No need to copy+sort
|
||||
if len(c) == 1 {
|
||||
for k, v := range c {
|
||||
out.WriteString(`{"`)
|
||||
out.WriteString(k)
|
||||
out.WriteString(`":"`)
|
||||
out.WriteString(v)
|
||||
out.WriteString(`"}`)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
sortedKeys := make([]string, 0, len(c))
|
||||
for k := range c {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
out.WriteByte('{')
|
||||
for i, k := range sortedKeys {
|
||||
out.WriteByte('"')
|
||||
out.WriteString(k)
|
||||
out.WriteString(`":"`)
|
||||
out.WriteString(c[k])
|
||||
out.WriteByte('"')
|
||||
if i < len(sortedKeys)-1 {
|
||||
out.WriteByte(',')
|
||||
}
|
||||
}
|
||||
out.WriteByte('}')
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
// KMS represents an active and authenticted connection
|
||||
// to a Key-Management-Service. It supports generating
|
||||
// data key generation and unsealing of KMS-generated
|
||||
|
@ -155,13 +204,12 @@ func (kms *masterKeyKMS) Info() (info KMSInfo) {
|
|||
|
||||
func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var (
|
||||
buffer bytes.Buffer
|
||||
derivedKey = kms.deriveKey(keyID, ctx)
|
||||
)
|
||||
if n, err := sio.Decrypt(&buffer, bytes.NewReader(sealedKey), sio.Config{Key: derivedKey[:]}); err != nil || n != 32 {
|
||||
out, err := sio.DecryptBuffer(key[:0], sealedKey, sio.Config{Key: derivedKey[:]})
|
||||
if err != nil || len(out) != 32 {
|
||||
return key, err // TODO(aead): upgrade sio to use sio.Error
|
||||
}
|
||||
copy(key[:], buffer.Bytes())
|
||||
return key, nil
|
||||
}
|
||||
|
||||
|
@ -171,7 +219,7 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
|
|||
}
|
||||
mac := hmac.New(sha256.New, kms.masterKey[:])
|
||||
mac.Write([]byte(keyID))
|
||||
context.WriteTo(mac)
|
||||
mac.Write(context.AppendTo(make([]byte, 0, 128)))
|
||||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ package crypto
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -83,3 +84,32 @@ func TestContextWriteTo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextAppendTo(t *testing.T) {
|
||||
for i, test := range contextWriteToTests {
|
||||
dst := make([]byte, 0, 1024)
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
|
||||
}
|
||||
// Append one more
|
||||
dst = test.Context.AppendTo(dst)
|
||||
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
|
||||
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContext_AppendTo(b *testing.B) {
|
||||
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%"}}
|
||||
for _, test := range tests {
|
||||
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
|
||||
dst := make([]byte, 0, 1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst = test.AppendTo(dst[:0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,15 +204,17 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
|||
}
|
||||
|
||||
// Check whether all extracted values are well-formed
|
||||
iv, err := base64.StdEncoding.DecodeString(b64IV)
|
||||
if err != nil || len(iv) != 32 {
|
||||
var iv [32]byte
|
||||
n, err := base64.StdEncoding.Decode(iv[:], []byte(b64IV))
|
||||
if err != nil || n != 32 {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalIV
|
||||
}
|
||||
if algorithm != SealAlgorithm {
|
||||
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
|
||||
}
|
||||
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
|
||||
if err != nil || len(encryptedKey) != 64 {
|
||||
var encryptedKey [64]byte
|
||||
n, err = base64.StdEncoding.Decode(encryptedKey[:], []byte(b64SealedKey))
|
||||
if err != nil || n != 64 {
|
||||
return keyID, kmsKey, sealedKey, Errorf("The internal sealed key for SSE-S3 is invalid")
|
||||
}
|
||||
if idPresent && kmsKeyPresent { // We are using a KMS -> parse the sealed KMS data key.
|
||||
|
@ -223,8 +225,8 @@ func (s3) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte
|
|||
}
|
||||
|
||||
sealedKey.Algorithm = algorithm
|
||||
copy(sealedKey.IV[:], iv)
|
||||
copy(sealedKey.Key[:], encryptedKey)
|
||||
sealedKey.IV = iv
|
||||
sealedKey.Key = encryptedKey
|
||||
return keyID, kmsKey, sealedKey, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -224,11 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
|
|||
// named key referenced by keyID. It also binds the generated key
|
||||
// cryptographically to the provided context.
|
||||
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/datakey/plaintext/%s", keyID), payload)
|
||||
if err != nil {
|
||||
|
@ -260,12 +258,11 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
|
|||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/decrypt/%s", keyID), payload)
|
||||
|
@ -294,12 +291,11 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
|||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
context := ctx.AppendTo(make([]byte, 0, 128))
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
"context": base64.StdEncoding.EncodeToString(context),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
|
|
|
@ -21,11 +21,13 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
|
@ -57,6 +59,129 @@ var (
|
|||
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
)
|
||||
|
||||
type dynamicSleeper struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Sleep factor
|
||||
factor float64
|
||||
|
||||
// maximum sleep cap,
|
||||
// set to <= 0 to disable.
|
||||
maxSleep time.Duration
|
||||
|
||||
// Don't sleep at all, if time taken is below this value.
|
||||
// This is to avoid too small costly sleeps.
|
||||
minSleep time.Duration
|
||||
|
||||
// cycle will be closed
|
||||
cycle chan struct{}
|
||||
}
|
||||
|
||||
// newDynamicSleeper
|
||||
func newDynamicSleeper(factor float64, maxWait time.Duration) *dynamicSleeper {
|
||||
return &dynamicSleeper{
|
||||
factor: factor,
|
||||
cycle: make(chan struct{}),
|
||||
maxSleep: maxWait,
|
||||
minSleep: 100 * time.Microsecond,
|
||||
}
|
||||
}
|
||||
|
||||
// Timer returns a timer that has started.
|
||||
// When the returned function is called it will wait.
|
||||
func (d *dynamicSleeper) Timer(ctx context.Context) func() {
|
||||
t := time.Now()
|
||||
return func() {
|
||||
doneAt := time.Now()
|
||||
for {
|
||||
// Grab current values
|
||||
d.mu.RLock()
|
||||
minWait, maxWait := d.minSleep, d.maxSleep
|
||||
factor := d.factor
|
||||
cycle := d.cycle
|
||||
d.mu.RUnlock()
|
||||
elapsed := doneAt.Sub(t)
|
||||
// Don't sleep for really small amount of time
|
||||
wantSleep := time.Duration(float64(elapsed) * factor)
|
||||
if wantSleep <= minWait {
|
||||
return
|
||||
}
|
||||
if maxWait > 0 && wantSleep > maxWait {
|
||||
wantSleep = maxWait
|
||||
}
|
||||
timer := time.NewTimer(wantSleep)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
return
|
||||
case <-cycle:
|
||||
if !timer.Stop() {
|
||||
// We expired.
|
||||
<-timer.C
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep sleeps the specified time multiplied by the sleep factor.
|
||||
// If the factor is updated the sleep will be done again with the new factor.
|
||||
func (d *dynamicSleeper) Sleep(ctx context.Context, base time.Duration) {
|
||||
for {
|
||||
// Grab current values
|
||||
d.mu.RLock()
|
||||
minWait, maxWait := d.minSleep, d.maxSleep
|
||||
factor := d.factor
|
||||
cycle := d.cycle
|
||||
d.mu.RUnlock()
|
||||
// Don't sleep for really small amount of time
|
||||
wantSleep := time.Duration(float64(base) * factor)
|
||||
if wantSleep <= minWait {
|
||||
return
|
||||
}
|
||||
if maxWait > 0 && wantSleep > maxWait {
|
||||
wantSleep = maxWait
|
||||
}
|
||||
timer := time.NewTimer(wantSleep)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
return
|
||||
case <-cycle:
|
||||
if !timer.Stop() {
|
||||
// We expired.
|
||||
<-timer.C
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the current settings and cycle all waiting.
|
||||
// Parameters are the same as in the contructor.
|
||||
func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
if math.Abs(d.factor-factor) < 1e-10 && d.maxSleep == maxWait {
|
||||
return nil
|
||||
}
|
||||
// Update values and cycle waiting.
|
||||
close(d.cycle)
|
||||
d.factor = factor
|
||||
d.maxSleep = maxWait
|
||||
d.cycle = make(chan struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// initDataCrawler will start the crawler unless disabled.
|
||||
func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||
|
@ -69,10 +194,10 @@ func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
|||
// There should only ever be one crawler running per cluster.
|
||||
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 crawler is running on the cluster.
|
||||
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "runDataCrawler.lock")
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataCrawler.lock")
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
err := locker.GetLock(dataCrawlerLeaderLockTimeout)
|
||||
err := locker.GetLock(ctx, dataCrawlerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(dataCrawlStartDelay)))
|
||||
continue
|
||||
|
@ -489,7 +614,10 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
// Dynamic time delay.
|
||||
t := UTCNow()
|
||||
|
||||
err = objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{Recursive: true, Remove: healDeleteDangling},
|
||||
err = objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
|
||||
Recursive: true,
|
||||
Remove: healDeleteDangling,
|
||||
},
|
||||
func(bucket, object, versionID string) error {
|
||||
// Wait for each heal as per crawler frequency.
|
||||
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
|
||||
|
@ -497,11 +625,12 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
|||
defer func() {
|
||||
t = UTCNow()
|
||||
}()
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
return bgSeq.queueHealTask(ctx,
|
||||
healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
})
|
||||
|
||||
sleepDuration(time.Since(t), f.dataUsageCrawlMult)
|
||||
|
|
|
@ -456,9 +456,12 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
|
|||
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
b := d.serialize()
|
||||
size := int64(len(b))
|
||||
r, err := hash.NewReader(bytes.NewReader(b), size, "", "", size, false)
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(d.serializeTo(pw))
|
||||
}()
|
||||
defer pr.Close()
|
||||
r, err := hash.NewReader(pr, -1, "", "", -1, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -480,32 +483,33 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
|||
const dataUsageCacheVer = 2
|
||||
|
||||
// serialize the contents of the cache.
|
||||
func (d *dataUsageCache) serialize() []byte {
|
||||
// Prepend version and compress.
|
||||
dst := make([]byte, 0, d.Msgsize()+1)
|
||||
dst = append(dst, dataUsageCacheVer)
|
||||
buf := bytes.NewBuffer(dst)
|
||||
enc, err := zstd.NewWriter(buf,
|
||||
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
|
||||
// Add version and compress.
|
||||
_, err := dst.Write([]byte{dataUsageCacheVer})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := zstd.NewWriter(dst,
|
||||
zstd.WithEncoderLevel(zstd.SpeedFastest),
|
||||
zstd.WithWindowSize(1<<20),
|
||||
zstd.WithEncoderConcurrency(2))
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
mEnc := msgp.NewWriter(enc)
|
||||
err = d.EncodeMsg(mEnc)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
err = mEnc.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mEnc.Flush()
|
||||
err = enc.Close()
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
return buf.Bytes()
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserialize the supplied byte slice into the cache.
|
||||
|
|
|
@ -656,14 +656,17 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := want.serialize()
|
||||
var got dataUsageCache
|
||||
err = got.deserialize(bytes.NewBuffer(b))
|
||||
var buf bytes.Buffer
|
||||
err = want.serializeTo(&buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("serialized size:", buf.Len(), "bytes")
|
||||
var got dataUsageCache
|
||||
err = got.deserialize(&buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("serialized size:", len(b), "bytes")
|
||||
if got.Info.LastUpdate.IsZero() {
|
||||
t.Error("lastupdate not set")
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ type diskCache struct {
|
|||
// nsMutex namespace lock
|
||||
nsMutex *nsLockMap
|
||||
// Object functions pointing to the corresponding functions of backend implementation.
|
||||
NewNSLockFn func(ctx context.Context, cachePath string) RWLocker
|
||||
NewNSLockFn func(cachePath string) RWLocker
|
||||
}
|
||||
|
||||
// Inits the disk cache dir if it is not initialized already.
|
||||
|
@ -175,8 +175,8 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
|||
}
|
||||
go cache.purgeWait(ctx)
|
||||
cache.diskSpaceAvailable(0) // update if cache usage is already high.
|
||||
cache.NewNSLockFn = func(ctx context.Context, cachePath string) RWLocker {
|
||||
return cache.nsMutex.NewNSLock(ctx, nil, cachePath, "")
|
||||
cache.NewNSLockFn = func(cachePath string) RWLocker {
|
||||
return cache.nsMutex.NewNSLock(nil, cachePath, "")
|
||||
}
|
||||
return &cache, nil
|
||||
}
|
||||
|
@ -419,8 +419,8 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
|||
// if partial object is cached.
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err = cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -501,8 +501,8 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
|||
// incHitsOnly is true if metadata update is incrementing only the hit counter
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachedPath)
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
cLock := c.NewNSLockFn(cachedPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
@ -666,8 +666,8 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
|||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachePath)
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
@ -866,8 +866,8 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
|||
// Get returns ObjectInfo and reader for object from disk cache
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
|
@ -930,8 +930,8 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
|
|
@ -19,10 +19,14 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/sio"
|
||||
|
@ -622,3 +626,89 @@ func TestGetDefaultOpts(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
func Test_decryptObjectInfo(t *testing.T) {
|
||||
var testSet []struct {
|
||||
Bucket string
|
||||
Name string
|
||||
UserDef map[string]string
|
||||
}
|
||||
file, err := os.Open("testdata/decryptObjectInfo.json.zst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
dec, err := zstd.NewReader(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer dec.Close()
|
||||
js := json.NewDecoder(dec)
|
||||
err = js.Decode(&testSet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("MINIO_KMS_MASTER_KEY", "my-minio-key:6368616e676520746869732070617373776f726420746f206120736563726574")
|
||||
defer os.Setenv("MINIO_KMS_MASTER_KEY", "")
|
||||
GlobalKMS, err = crypto.NewKMS(crypto.KMSConfig{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var dst [32]byte
|
||||
for i := range testSet {
|
||||
t.Run(fmt.Sprint("case-", i), func(t *testing.T) {
|
||||
test := &testSet[i]
|
||||
_, err := decryptObjectInfo(dst[:], test.Bucket, test.Name, test.UserDef)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark_decryptObjectInfo(b *testing.B) {
|
||||
var testSet []struct {
|
||||
Bucket string
|
||||
Name string
|
||||
UserDef map[string]string
|
||||
}
|
||||
file, err := os.Open("testdata/decryptObjectInfo.json.zst")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
dec, err := zstd.NewReader(file)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer dec.Close()
|
||||
js := json.NewDecoder(dec)
|
||||
err = js.Decode(&testSet)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("MINIO_KMS_MASTER_KEY", "my-minio-key:6368616e676520746869732070617373776f726420746f206120736563726574")
|
||||
defer os.Setenv("MINIO_KMS_MASTER_KEY", "")
|
||||
GlobalKMS, err = crypto.NewKMS(crypto.KMSConfig{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(len(testSet)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var dst [32]byte
|
||||
for i := range testSet {
|
||||
test := &testSet[i]
|
||||
_, err := decryptObjectInfo(dst[:], test.Bucket, test.Name, test.UserDef)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,7 +17,9 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -33,6 +35,7 @@ import (
|
|||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
|
@ -744,6 +747,72 @@ func GetProxyEndpointLocalIndex(proxyEps []ProxyEndpoint) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
func httpDo(clnt *http.Client, req *http.Request, f func(*http.Response, error) error) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Run the HTTP request in a goroutine and pass the response to f.
|
||||
c := make(chan error, 1)
|
||||
req = req.WithContext(ctx)
|
||||
go func() { c <- f(clnt.Do(req)) }()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
<-c // Wait for f to return.
|
||||
return ctx.Err()
|
||||
case err := <-c:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func getOnlineProxyEndpointIdx() int {
|
||||
type reqIndex struct {
|
||||
Request *http.Request
|
||||
Idx int
|
||||
}
|
||||
|
||||
proxyRequests := make(map[*http.Client]reqIndex, len(globalProxyEndpoints))
|
||||
for i, proxyEp := range globalProxyEndpoints {
|
||||
proxyEp := proxyEp
|
||||
serverURL := &url.URL{
|
||||
Scheme: proxyEp.Scheme,
|
||||
Host: proxyEp.Host,
|
||||
Path: pathJoin(healthCheckPathPrefix, healthCheckLivenessPath),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, serverURL.String(), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
proxyRequests[&http.Client{
|
||||
Transport: proxyEp.Transport,
|
||||
}] = reqIndex{
|
||||
Request: req,
|
||||
Idx: i,
|
||||
}
|
||||
}
|
||||
|
||||
for c, r := range proxyRequests {
|
||||
if err := httpDo(c, r.Request, func(resp *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
if v := resp.Header.Get(xhttp.MinIOServerStatus); v == unavailable {
|
||||
return errors.New(v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
continue
|
||||
}
|
||||
return r.Idx
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// GetProxyEndpoints - get all endpoints that can be used to proxy list request.
|
||||
func GetProxyEndpoints(endpointServerSets EndpointServerSets) ([]ProxyEndpoint, error) {
|
||||
var proxyEps []ProxyEndpoint
|
||||
|
|
|
@ -167,7 +167,14 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
// consider the offline disks as consistent.
|
||||
continue
|
||||
}
|
||||
if len(meta.Erasure.Distribution) != len(onlineDisks) {
|
||||
// Erasure distribution seems to have lesser
|
||||
// number of items than number of online disks.
|
||||
inconsistent++
|
||||
continue
|
||||
}
|
||||
if meta.Erasure.Distribution[i] != meta.Erasure.Index {
|
||||
// Mismatch indexes with distribution order
|
||||
inconsistent++
|
||||
}
|
||||
}
|
||||
|
@ -193,6 +200,16 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
if !meta.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(meta.Erasure.Distribution) != len(onlineDisks) {
|
||||
// Erasure distribution is not the same as onlineDisks
|
||||
// attempt a fix if possible, assuming other entries
|
||||
// might have the right erasure distribution.
|
||||
partsMetadata[i] = FileInfo{}
|
||||
dataErrs[i] = errFileCorrupt
|
||||
continue
|
||||
}
|
||||
|
||||
// Since erasure.Distribution is trustable we can fix the mismatching erasure.Index
|
||||
if meta.Erasure.Distribution[i] != meta.Erasure.Index {
|
||||
partsMetadata[i] = FileInfo{}
|
||||
|
|
|
@ -98,16 +98,13 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints
|
|||
|
||||
errs := g.Wait()
|
||||
|
||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, writeQuorum-1)
|
||||
if reducedErr == errVolumeNotFound {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Initialize heal result info
|
||||
res = madmin.HealResultItem{
|
||||
Type: madmin.HealItemBucket,
|
||||
Bucket: bucket,
|
||||
DiskCount: len(storageDisks),
|
||||
Type: madmin.HealItemBucket,
|
||||
Bucket: bucket,
|
||||
DiskCount: len(storageDisks),
|
||||
ParityBlocks: len(storageDisks) / 2,
|
||||
DataBlocks: len(storageDisks) / 2,
|
||||
}
|
||||
|
||||
for i := range beforeState {
|
||||
|
@ -118,6 +115,18 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints
|
|||
})
|
||||
}
|
||||
|
||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, writeQuorum-1)
|
||||
if reducedErr == errVolumeNotFound {
|
||||
for i := range beforeState {
|
||||
res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{
|
||||
UUID: "",
|
||||
Endpoint: storageEndpoints[i],
|
||||
State: madmin.DriveStateOk,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Initialize sync waitgroup.
|
||||
g = errgroup.WithNErrs(len(storageDisks))
|
||||
|
||||
|
@ -221,8 +230,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
partsMetadata []FileInfo, errs []error, latestFileInfo FileInfo,
|
||||
dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) {
|
||||
|
||||
dataBlocks := latestFileInfo.Erasure.DataBlocks
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
storageEndpoints := er.getEndpoints()
|
||||
|
||||
|
@ -306,7 +313,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
|
||||
// If less than read quorum number of disks have all the parts
|
||||
// of the data, we can't reconstruct the erasure-coded data.
|
||||
if numAvailableDisks < dataBlocks {
|
||||
if numAvailableDisks < result.DataBlocks {
|
||||
// Check if er.meta, and corresponding parts are also missing.
|
||||
if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok {
|
||||
writeQuorum := m.Erasure.DataBlocks + 1
|
||||
|
@ -338,9 +345,9 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
|
||||
// Latest FileInfo for reference. If a valid metadata is not
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, pErr := pickValidFileInfo(ctx, partsMetadata, modTime, dataBlocks)
|
||||
if pErr != nil {
|
||||
return result, toObjectErr(pErr, bucket, object)
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, result.DataBlocks)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
cleanFileInfo := func(fi FileInfo) FileInfo {
|
||||
|
|
|
@ -140,7 +140,12 @@ func readVersionFromDisks(ctx context.Context, disks []StorageAPI, bucket, objec
|
|||
}
|
||||
metadataArray[index], err = disks[index].ReadVersion(ctx, bucket, object, versionID, checkDataDir)
|
||||
if err != nil {
|
||||
if err != errFileNotFound && err != errVolumeNotFound && err != errFileVersionNotFound {
|
||||
if !IsErr(err, []error{
|
||||
errFileNotFound,
|
||||
errVolumeNotFound,
|
||||
errFileVersionNotFound,
|
||||
errDiskNotFound,
|
||||
}...) {
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", disks[index].String())
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
|
|
@ -90,9 +90,12 @@ func (fi FileInfo) IsValid() bool {
|
|||
}
|
||||
dataBlocks := fi.Erasure.DataBlocks
|
||||
parityBlocks := fi.Erasure.ParityBlocks
|
||||
correctIndexes := (fi.Erasure.Index > 0 &&
|
||||
fi.Erasure.Index <= dataBlocks+parityBlocks &&
|
||||
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
|
||||
return ((dataBlocks >= parityBlocks) &&
|
||||
(dataBlocks != 0) && (parityBlocks != 0) &&
|
||||
(fi.Erasure.Index > 0 && fi.Erasure.Distribution != nil))
|
||||
correctIndexes)
|
||||
}
|
||||
|
||||
// ToObjectInfo - Converts metadata to object info.
|
||||
|
|
|
@ -142,6 +142,9 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto
|
|||
return
|
||||
}
|
||||
for _, tmpDir := range tmpDirs {
|
||||
if tmpDir == ".trash/" { // do not remove .trash/ here, it has its own routines
|
||||
continue
|
||||
}
|
||||
fi, err := disk.ReadVersion(ctx, minioMetaTmpBucket, tmpDir, "", false)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -355,14 +358,19 @@ func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObjec
|
|||
//
|
||||
// Implements S3 compatible Upload Part API.
|
||||
func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
|
||||
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
partIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID, strconv.Itoa(partID)))
|
||||
if err = partIDLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
defer partIDLock.Unlock()
|
||||
|
||||
uploadIDRLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDRLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
readLocked := true
|
||||
defer func() {
|
||||
if readLocked {
|
||||
uploadIDLock.RUnlock()
|
||||
if uploadIDRLock != nil {
|
||||
uploadIDRLock.RUnlock()
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -386,6 +394,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket,
|
||||
uploadIDPath, "")
|
||||
|
||||
// Unlock upload id locks before, so others can get it.
|
||||
uploadIDRLock.RUnlock()
|
||||
uploadIDRLock = nil
|
||||
|
||||
// get Quorum for this object
|
||||
_, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
||||
if err != nil {
|
||||
|
@ -465,14 +477,12 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
}
|
||||
}
|
||||
|
||||
// Unlock here before acquiring write locks all concurrent
|
||||
// PutObjectParts would serialize here updating `xl.meta`
|
||||
uploadIDLock.RUnlock()
|
||||
readLocked = false
|
||||
if err = uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
||||
// Acquire write lock to update metadata.
|
||||
uploadIDWLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDWLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.Unlock()
|
||||
defer uploadIDWLock.Unlock()
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
|
@ -550,8 +560,8 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
|||
UploadID: uploadID,
|
||||
}
|
||||
|
||||
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return MultipartInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
|
@ -598,8 +608,8 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
|
|||
// ListPartsInfo structure is marshaled directly into XML and
|
||||
// replied back to the client.
|
||||
func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
|
||||
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
|
@ -691,8 +701,8 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
|
|||
func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
|
||||
// Hold read-locks to verify uploaded parts, also disallows
|
||||
// parallel part uploads as well.
|
||||
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err = uploadIDLock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer uploadIDLock.RUnlock()
|
||||
|
@ -703,7 +713,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
if er.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
if opts.ParentIsObject != nil && opts.ParentIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return oi, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -844,8 +854,8 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
}
|
||||
|
||||
// Hold namespace to complete the transaction
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
@ -886,8 +896,8 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
// would be removed from the system, rollback is not possible on this
|
||||
// operation.
|
||||
func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
lk := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, pathJoin(object, uploadID))
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
|
|
@ -48,8 +48,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
|||
}
|
||||
|
||||
defer ObjectPathUpdated(path.Join(dstBucket, dstObject))
|
||||
lk := er.NewNSLock(ctx, dstBucket, dstObject)
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(dstBucket, dstObject)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
@ -135,15 +135,15 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||
|
||||
// Acquire lock
|
||||
if lockType != noLock {
|
||||
lock := er.NewNSLock(ctx, bucket, object)
|
||||
lock := er.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalOperationTimeout); err != nil {
|
||||
if err = lock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(globalOperationTimeout); err != nil {
|
||||
if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
|
@ -196,8 +196,8 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||
// length indicates the total length of the object.
|
||||
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
@ -344,8 +344,8 @@ func (er erasureObjects) getObject(ctx context.Context, bucket, object string, s
|
|||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||
func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
@ -365,6 +365,24 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||
}
|
||||
|
||||
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
||||
if reducedErr == errErasureReadQuorum && bucket != minioMetaBucket {
|
||||
if _, ok := isObjectDangling(metaArr, errs, nil); ok {
|
||||
reducedErr = errFileNotFound
|
||||
if opts.VersionID != "" {
|
||||
reducedErr = errFileVersionNotFound
|
||||
}
|
||||
// Remove the dangling object only when:
|
||||
// - This is a non versioned bucket
|
||||
// - This is a versioned bucket and the version ID is passed, the reason
|
||||
// is that we cannot fetch the ID of the latest version when we don't trust xl.meta
|
||||
if !opts.Versioned || opts.VersionID != "" {
|
||||
er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{
|
||||
Name: object,
|
||||
VersionID: opts.VersionID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return fi, nil, nil, toObjectErr(reducedErr, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -561,7 +579,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
// Check if an object is present as one of the parent dir.
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
// -- FIXME (this also causes performance issue when disks are down).
|
||||
if er.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
if opts.ParentIsObject != nil && opts.ParentIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -632,8 +650,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
@ -902,8 +920,8 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
|||
}
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalDeleteOperationTimeout); err != nil {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalDeleteOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
|
|
@ -89,8 +89,8 @@ func newErasureServerSets(ctx context.Context, endpointServerSets EndpointServer
|
|||
return z, nil
|
||||
}
|
||||
|
||||
func (z *erasureServerSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
return z.serverSets[0].NewNSLock(ctx, bucket, objects...)
|
||||
func (z *erasureServerSets) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
return z.serverSets[0].NewNSLock(bucket, objects...)
|
||||
}
|
||||
|
||||
func (z *erasureServerSets) GetAllLockers() []dsync.NetLocker {
|
||||
|
@ -570,8 +570,8 @@ func (z *erasureServerSets) DeleteObjects(ctx context.Context, bucket string, ob
|
|||
}
|
||||
|
||||
// Acquire a bulk write lock across 'objects'
|
||||
multiDeleteLock := z.NewNSLock(ctx, bucket, objSets.ToSlice()...)
|
||||
if err := multiDeleteLock.GetLock(globalOperationTimeout); err != nil {
|
||||
multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...)
|
||||
if err := multiDeleteLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
for i := range derrs {
|
||||
derrs[i] = err
|
||||
}
|
||||
|
@ -911,6 +911,12 @@ func (z *erasureServerSets) listObjects(ctx context.Context, bucket, prefix, mar
|
|||
|
||||
for _, entry := range entries.Files {
|
||||
objInfo := entry.ToObjectInfo(entry.Volume, entry.Name)
|
||||
// Always avoid including the marker in the result, this is
|
||||
// needed to avoid including dir__XLDIR__ and dir/ twice in
|
||||
// different listing pages
|
||||
if objInfo.Name == marker {
|
||||
continue
|
||||
}
|
||||
if HasSuffix(objInfo.Name, SlashSeparator) && !recursive {
|
||||
loi.Prefixes = append(loi.Prefixes, objInfo.Name)
|
||||
continue
|
||||
|
@ -957,8 +963,7 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||
var lentry FileInfo
|
||||
var found bool
|
||||
var zoneIndex = -1
|
||||
// TODO: following loop can be merged with above
|
||||
// loop, explore this possibility.
|
||||
var setIndex = -1
|
||||
for i, entriesValid := range zoneEntriesValid {
|
||||
for j, valid := range entriesValid {
|
||||
if !valid {
|
||||
|
@ -968,6 +973,7 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||
lentry = zoneEntries[i][j]
|
||||
found = true
|
||||
zoneIndex = i
|
||||
setIndex = zoneEntryChs[i][j].SetIndex
|
||||
continue
|
||||
}
|
||||
str1 := zoneEntries[i][j].Name
|
||||
|
@ -982,6 +988,7 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||
if str1 < str2 {
|
||||
lentry = zoneEntries[i][j]
|
||||
zoneIndex = i
|
||||
setIndex = zoneEntryChs[i][j].SetIndex
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -999,9 +1006,15 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||
continue
|
||||
}
|
||||
|
||||
// Always deduplicate prefixes
|
||||
if lentry.Name == zoneEntries[i][j].Name && strings.HasSuffix(lentry.Name, slashSeparator) {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Entries are duplicated across disks,
|
||||
// we should simply skip such entries.
|
||||
if lentry.Name == zoneEntries[i][j].Name && lentry.ModTime.Equal(zoneEntries[i][j].ModTime) {
|
||||
if lentry.Name == zoneEntries[i][j].Name && lentry.ModTime.Equal(zoneEntries[i][j].ModTime) && setIndex == zoneEntryChs[i][j].SetIndex {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
@ -1050,6 +1063,7 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||
var lentry FileInfoVersions
|
||||
var found bool
|
||||
var zoneIndex = -1
|
||||
var setIndex = -1
|
||||
for i, entriesValid := range zoneEntriesValid {
|
||||
for j, valid := range entriesValid {
|
||||
if !valid {
|
||||
|
@ -1059,6 +1073,7 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||
lentry = zoneEntries[i][j]
|
||||
found = true
|
||||
zoneIndex = i
|
||||
setIndex = zoneEntryChs[i][j].SetIndex
|
||||
continue
|
||||
}
|
||||
str1 := zoneEntries[i][j].Name
|
||||
|
@ -1073,6 +1088,7 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||
if str1 < str2 {
|
||||
lentry = zoneEntries[i][j]
|
||||
zoneIndex = i
|
||||
setIndex = zoneEntryChs[i][j].SetIndex
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1090,9 +1106,15 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||
continue
|
||||
}
|
||||
|
||||
// Always deduplicate prefixes
|
||||
if lentry.Name == zoneEntries[i][j].Name && strings.HasSuffix(lentry.Name, slashSeparator) {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Entries are duplicated across disks,
|
||||
// we should simply skip such entries.
|
||||
if lentry.Name == zoneEntries[i][j].Name && lentry.LatestModTime.Equal(zoneEntries[i][j].LatestModTime) {
|
||||
if lentry.Name == zoneEntries[i][j].Name && lentry.LatestModTime.Equal(zoneEntries[i][j].LatestModTime) && setIndex == zoneEntryChs[i][j].SetIndex {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
@ -1294,7 +1316,7 @@ func (z *erasureServerSets) listObjectVersions(ctx context.Context, bucket, pref
|
|||
entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix})
|
||||
if entryChs == nil {
|
||||
endWalkCh = make(chan struct{})
|
||||
entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet)
|
||||
entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, false, endWalkCh, zone.listTolerancePerSet)
|
||||
}
|
||||
serverSetsEntryChs = append(serverSetsEntryChs, entryChs)
|
||||
serverSetsEndWalkCh = append(serverSetsEndWalkCh, endWalkCh)
|
||||
|
@ -1318,6 +1340,12 @@ func (z *erasureServerSets) listObjectVersions(ctx context.Context, bucket, pref
|
|||
for _, entry := range entries.FilesVersions {
|
||||
for _, version := range entry.Versions {
|
||||
objInfo := version.ToObjectInfo(bucket, entry.Name)
|
||||
// Always avoid including the marker in the result, this is
|
||||
// needed to avoid including dir__XLDIR__ and dir/ twice in
|
||||
// different listing pages
|
||||
if objInfo.Name == marker && objInfo.VersionID == versionMarker {
|
||||
continue
|
||||
}
|
||||
if HasSuffix(objInfo.Name, SlashSeparator) && !recursive {
|
||||
loi.Prefixes = append(loi.Prefixes, objInfo.Name)
|
||||
continue
|
||||
|
@ -1680,21 +1708,10 @@ func (z *erasureServerSets) ListBuckets(ctx context.Context) (buckets []BucketIn
|
|||
return buckets, nil
|
||||
}
|
||||
|
||||
func (z *erasureServerSets) ReloadFormat(ctx context.Context, dryRun bool) error {
|
||||
// No locks needed since reload happens in HealFormat under
|
||||
// write lock across all nodes.
|
||||
for _, zone := range z.serverSets {
|
||||
if err := zone.ReloadFormat(ctx, dryRun); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *erasureServerSets) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
// Acquire lock on format.json
|
||||
formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetLock(globalOperationTimeout); err != nil {
|
||||
formatLock := z.NewNSLock(minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer formatLock.Unlock()
|
||||
|
@ -1722,15 +1739,6 @@ func (z *erasureServerSets) HealFormat(ctx context.Context, dryRun bool) (madmin
|
|||
r.After.Drives = append(r.After.Drives, result.After.Drives...)
|
||||
}
|
||||
|
||||
// Healing succeeded notify the peers to reload format and re-initialize disks.
|
||||
// We will not notify peers if healing is not required.
|
||||
for _, nerr := range globalNotificationSys.ReloadFormat(dryRun) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// No heal returned by all serverSets, return errNoHealRequired
|
||||
if countNoHeal == len(z.serverSets) {
|
||||
return r, errNoHealRequired
|
||||
|
@ -1786,7 +1794,7 @@ func (z *erasureServerSets) Walk(ctx context.Context, bucket, prefix string, res
|
|||
if opts.WalkVersions {
|
||||
var serverSetsEntryChs [][]FileInfoVersionsCh
|
||||
for _, zone := range z.serverSets {
|
||||
serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done()))
|
||||
serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, false, ctx.Done()))
|
||||
}
|
||||
|
||||
var serverSetsEntriesInfos [][]FileInfoVersions
|
||||
|
@ -1852,72 +1860,60 @@ func (z *erasureServerSets) Walk(ctx context.Context, bucket, prefix string, res
|
|||
type HealObjectFn func(bucket, object, versionID string) error
|
||||
|
||||
func (z *erasureServerSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error {
|
||||
endWalkCh := make(chan struct{})
|
||||
defer close(endWalkCh)
|
||||
|
||||
serverSetsEntryChs := make([][]FileInfoVersionsCh, 0, len(z.serverSets))
|
||||
zoneDrivesPerSet := make([]int, 0, len(z.serverSets))
|
||||
|
||||
var skipped int
|
||||
for _, zone := range z.serverSets {
|
||||
serverSetsEntryChs = append(serverSetsEntryChs,
|
||||
zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh))
|
||||
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
|
||||
}
|
||||
entryChs := zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, true, ctx.Done())
|
||||
entriesInfos := make([]FileInfoVersions, len(entryChs))
|
||||
entriesValid := make([]bool, len(entryChs))
|
||||
|
||||
serverSetsEntriesInfos := make([][]FileInfoVersions, 0, len(serverSetsEntryChs))
|
||||
serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs))
|
||||
for _, entryChs := range serverSetsEntryChs {
|
||||
serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfoVersions, len(entryChs)))
|
||||
serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs)))
|
||||
}
|
||||
for {
|
||||
entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entriesInfos, entriesValid)
|
||||
if !ok {
|
||||
skipped++
|
||||
// calculate number of skips to return
|
||||
// "NotFound" error at the end.
|
||||
break
|
||||
}
|
||||
|
||||
// If listing did not return any entries upon first attempt, we
|
||||
// return `ObjectNotFound`, to indicate the caller for any
|
||||
// actions they may want to take as if `prefix` is missing.
|
||||
err := toObjectErr(errFileNotFound, bucket, prefix)
|
||||
for {
|
||||
entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
drivesPerSet := zone.setDriveCount
|
||||
if quorumCount == drivesPerSet && opts.ScanMode == madmin.HealNormalScan {
|
||||
// Skip good entries.
|
||||
continue
|
||||
}
|
||||
|
||||
// Indicate that first attempt was a success and subsequent loop
|
||||
// knows that its not our first attempt at 'prefix'
|
||||
err = nil
|
||||
|
||||
if zoneIndex >= len(zoneDrivesPerSet) || zoneIndex < 0 {
|
||||
return fmt.Errorf("invalid zone index returned: %d", zoneIndex)
|
||||
}
|
||||
|
||||
if quorumCount == zoneDrivesPerSet[zoneIndex] && opts.ScanMode == madmin.HealNormalScan {
|
||||
// Skip good entries.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, version := range entry.Versions {
|
||||
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
|
||||
return toObjectErr(err, bucket, version.Name)
|
||||
for _, version := range entry.Versions {
|
||||
if err := healObject(bucket, version.Name, version.VersionID); err != nil {
|
||||
return toObjectErr(err, bucket, version.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
if skipped == len(z.serverSets) {
|
||||
// If listing did not return any entries upon first attempt, we
|
||||
// return `ObjectNotFound`, to indicate the caller for any
|
||||
// actions they may want to take as if `prefix` is missing.
|
||||
return toObjectErr(errFileNotFound, bucket, prefix)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *erasureServerSets) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
object = encodeDirObject(object)
|
||||
|
||||
lk := z.NewNSLock(ctx, bucket, object)
|
||||
lk := z.NewNSLock(bucket, object)
|
||||
if bucket == minioMetaBucket {
|
||||
// For .minio.sys bucket heals we should hold write locks.
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
} else {
|
||||
// Lock the object before healing. Use read lock since healing
|
||||
// will only regenerate parts & xl.meta of outdated disks.
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
|
|
@ -24,11 +24,13 @@ import (
|
|||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dchest/siphash"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
|
@ -375,9 +377,12 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
|||
|
||||
mutex := newNSLock(globalIsDistErasure)
|
||||
|
||||
// Number of buffers, max 2GB
|
||||
n := (2 * humanize.GiByte) / (blockSizeV2 * 2)
|
||||
|
||||
// Initialize byte pool once for all sets, bpool size is set to
|
||||
// setCount * setDriveCount with each memory upto blockSizeV1.
|
||||
bp := bpool.NewBytePoolCap(setCount*setDriveCount, blockSizeV1, blockSizeV1*2)
|
||||
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||
|
@ -407,21 +412,31 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
|||
|
||||
// Initialize erasure objects for a given set.
|
||||
s.sets[i] = &erasureObjects{
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 10*time.Second),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
}
|
||||
|
||||
go s.sets[i].cleanupStaleUploads(ctx,
|
||||
GlobalStaleUploadsCleanupInterval, GlobalStaleUploadsExpiry)
|
||||
}
|
||||
|
||||
// cleanup ".trash/" folder every 10 minutes with sufficient sleep cycles.
|
||||
deletedObjectsCleanupInterval, err := time.ParseDuration(env.Get("MINIO_DELETE_CLEANUP_INTERVAL", "5m"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mctx, mctxCancel := context.WithCancel(ctx)
|
||||
s.monitorContextCancel = mctxCancel
|
||||
|
||||
// start cleanup of deleted objects.
|
||||
go s.cleanupDeletedObjects(ctx, deletedObjectsCleanupInterval)
|
||||
|
||||
// Start the disk monitoring and connect routine.
|
||||
go s.monitorAndConnectEndpoints(mctx, defaultMonitorConnectEndpointInterval)
|
||||
go s.maintainMRFList()
|
||||
|
@ -430,12 +445,31 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
|||
return s, nil
|
||||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
func (s *erasureSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
if len(objects) == 1 {
|
||||
return s.getHashedSet(objects[0]).NewNSLock(ctx, bucket, objects...)
|
||||
func (s *erasureSets) cleanupDeletedObjects(ctx context.Context, cleanupInterval time.Duration) {
|
||||
timer := time.NewTimer(cleanupInterval)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(cleanupInterval)
|
||||
|
||||
for _, set := range s.sets {
|
||||
set.cleanupDeletedObjects(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.getHashedSet("").NewNSLock(ctx, bucket, objects...)
|
||||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
func (s *erasureSets) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
if len(objects) == 1 {
|
||||
return s.getHashedSet(objects[0]).NewNSLock(bucket, objects...)
|
||||
}
|
||||
return s.getHashedSet("").NewNSLock(bucket, objects...)
|
||||
}
|
||||
|
||||
// SetDriveCount returns the current drives per set.
|
||||
|
@ -721,8 +755,25 @@ func (s *erasureSets) GetObject(ctx context.Context, bucket, object string, star
|
|||
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||
}
|
||||
|
||||
func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
||||
var isParentDirObject func(string) bool
|
||||
isParentDirObject = func(p string) bool {
|
||||
if p == "." || p == SlashSeparator {
|
||||
return false
|
||||
}
|
||||
if s.getHashedSet(p).isObject(ctx, bucket, p) {
|
||||
// If there is already a file at prefix "p", return true.
|
||||
return true
|
||||
}
|
||||
// Check if there is a file as one of the parent paths.
|
||||
return isParentDirObject(path.Dir(p))
|
||||
}
|
||||
return isParentDirObject(parent)
|
||||
}
|
||||
|
||||
// PutObject - writes an object to hashedSet based on the object name.
|
||||
func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
opts.ParentIsObject = s.parentDirIsObject
|
||||
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, opts)
|
||||
}
|
||||
|
||||
|
@ -832,9 +883,10 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
|||
|
||||
// FileInfoVersionsCh - file info versions channel
|
||||
type FileInfoVersionsCh struct {
|
||||
Ch chan FileInfoVersions
|
||||
Prev FileInfoVersions
|
||||
Valid bool
|
||||
Ch chan FileInfoVersions
|
||||
Prev FileInfoVersions
|
||||
Valid bool
|
||||
SetIndex int
|
||||
}
|
||||
|
||||
// Pop - pops a cached entry if any, or from the cached channel.
|
||||
|
@ -855,9 +907,10 @@ func (f *FileInfoVersionsCh) Push(fi FileInfoVersions) {
|
|||
|
||||
// FileInfoCh - file info channel
|
||||
type FileInfoCh struct {
|
||||
Ch chan FileInfo
|
||||
Prev FileInfo
|
||||
Valid bool
|
||||
Ch chan FileInfo
|
||||
Prev FileInfo
|
||||
Valid bool
|
||||
SetIndex int
|
||||
}
|
||||
|
||||
// Pop - pops a cached entry if any, or from the cached channel.
|
||||
|
@ -884,8 +937,8 @@ func (f *FileInfoCh) Push(fi FileInfo) {
|
|||
// if the caller wishes to list N entries to call lexicallySortedEntry
|
||||
// N times until this boolean is 'false'.
|
||||
func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) {
|
||||
for j := range entryChs {
|
||||
entries[j], entriesValid[j] = entryChs[j].Pop()
|
||||
for i := range entryChs {
|
||||
entries[i], entriesValid[i] = entryChs[i].Pop()
|
||||
}
|
||||
|
||||
var isTruncated = false
|
||||
|
@ -899,6 +952,7 @@ func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileI
|
|||
|
||||
var lentry FileInfoVersions
|
||||
var found bool
|
||||
var setIndex = -1
|
||||
for i, valid := range entriesValid {
|
||||
if !valid {
|
||||
continue
|
||||
|
@ -906,10 +960,12 @@ func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileI
|
|||
if !found {
|
||||
lentry = entries[i]
|
||||
found = true
|
||||
setIndex = entryChs[i].SetIndex
|
||||
continue
|
||||
}
|
||||
if entries[i].Name < lentry.Name {
|
||||
lentry = entries[i]
|
||||
setIndex = entryChs[i].SetIndex
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -927,7 +983,7 @@ func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileI
|
|||
|
||||
// Entries are duplicated across disks,
|
||||
// we should simply skip such entries.
|
||||
if lentry.Name == entries[i].Name && lentry.LatestModTime.Equal(entries[i].LatestModTime) {
|
||||
if lentry.Name == entries[i].Name && lentry.LatestModTime.Equal(entries[i].LatestModTime) && setIndex == entryChs[i].SetIndex {
|
||||
lexicallySortedEntryCount++
|
||||
continue
|
||||
}
|
||||
|
@ -944,34 +1000,35 @@ func (s *erasureSets) startMergeWalks(ctx context.Context, bucket, prefix, marke
|
|||
return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1, false)
|
||||
}
|
||||
|
||||
func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh {
|
||||
return s.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1)
|
||||
func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive, healing bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh {
|
||||
return s.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, healing, endWalkCh, -1)
|
||||
}
|
||||
|
||||
// Starts a walk versions channel across N number of disks and returns a slice.
|
||||
// FileInfoCh which can be read from.
|
||||
func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh {
|
||||
func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, healing bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh {
|
||||
var entryChs []FileInfoVersionsCh
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
for _, set := range s.sets {
|
||||
for i, set := range s.sets {
|
||||
// Reset for the next erasure set.
|
||||
for _, disk := range set.getLoadBalancedNDisks(ndisks) {
|
||||
wg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
go func(i int, disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
|
||||
entryCh, err := disk.WalkVersions(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
||||
entryCh, err := disk.WalkVersions(GlobalContext, bucket, prefix, marker, recursive, healing, endWalkCh)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
mutex.Lock()
|
||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||
Ch: entryCh,
|
||||
Ch: entryCh,
|
||||
SetIndex: i,
|
||||
})
|
||||
mutex.Unlock()
|
||||
}(disk)
|
||||
}(i, disk)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -984,11 +1041,11 @@ func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, mark
|
|||
var entryChs []FileInfoCh
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
for _, set := range s.sets {
|
||||
for i, set := range s.sets {
|
||||
// Reset for the next erasure set.
|
||||
for _, disk := range set.getLoadBalancedNDisks(ndisks) {
|
||||
wg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
go func(i int, disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
|
||||
var entryCh chan FileInfo
|
||||
|
@ -1004,10 +1061,11 @@ func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, mark
|
|||
}
|
||||
mutex.Lock()
|
||||
entryChs = append(entryChs, FileInfoCh{
|
||||
Ch: entryCh,
|
||||
Ch: entryCh,
|
||||
SetIndex: i,
|
||||
})
|
||||
mutex.Unlock()
|
||||
}(disk)
|
||||
}(i, disk)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -1055,6 +1113,7 @@ func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object,
|
|||
|
||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||
func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
opts.ParentIsObject = s.parentDirIsObject
|
||||
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
|
@ -1141,81 +1200,6 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs
|
|||
return beforeDrives
|
||||
}
|
||||
|
||||
// Reloads the format from the disk, usually called by a remote peer notifier while
|
||||
// healing in a distributed setup.
|
||||
func (s *erasureSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
||||
storageDisks, errs := initStorageDisksWithErrorsWithoutHealthCheck(s.endpoints)
|
||||
for i, err := range errs {
|
||||
if err != nil && err != errDiskNotFound {
|
||||
return fmt.Errorf("Disk %s: %w", s.endpoints[i], err)
|
||||
}
|
||||
}
|
||||
defer func(storageDisks []StorageAPI) {
|
||||
if err != nil {
|
||||
closeStorageDisks(storageDisks)
|
||||
}
|
||||
}(storageDisks)
|
||||
|
||||
formats, _ := loadFormatErasureAll(storageDisks, false)
|
||||
if err = checkFormatErasureValues(formats, s.setDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
refFormat, err := getFormatErasureInQuorum(formats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.monitorContextCancel() // turn-off disk monitoring and replace format.
|
||||
|
||||
s.erasureDisksMu.Lock()
|
||||
|
||||
// Replace with new reference format.
|
||||
s.format = refFormat
|
||||
|
||||
// Close all existing disks and reconnect all the disks.
|
||||
for _, disk := range storageDisks {
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
m, n, err := findDiskIndexByDiskID(refFormat, diskID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if s.erasureDisks[m][n] != nil {
|
||||
s.erasureDisks[m][n].Close()
|
||||
}
|
||||
|
||||
s.endpointStrings[m*s.setDriveCount+n] = disk.String()
|
||||
if !disk.IsLocal() {
|
||||
// Enable healthcheck disk for remote endpoint.
|
||||
disk, err = newStorageAPI(disk.Endpoint())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
disk.SetDiskID(diskID)
|
||||
}
|
||||
|
||||
s.erasureDisks[m][n] = disk
|
||||
}
|
||||
|
||||
s.erasureDisksMu.Unlock()
|
||||
|
||||
mctx, mctxCancel := context.WithCancel(GlobalContext)
|
||||
s.monitorContextCancel = mctxCancel
|
||||
|
||||
go s.monitorAndConnectEndpoints(mctx, defaultMonitorConnectEndpointInterval)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If it is a single node Erasure and all disks are root disks, it is most likely a test setup, else it is a production setup.
|
||||
// On a test setup we allow creation of format.json on root disks to help with dev/testing.
|
||||
func isTestSetup(infos []DiskInfo, errs []error) bool {
|
||||
|
@ -1335,13 +1319,8 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
|||
}
|
||||
}
|
||||
|
||||
// Save formats `format.json` across all disks.
|
||||
if err = saveFormatErasureAllWithErrs(ctx, storageDisks, sErrs, tmpNewFormats); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
refFormat, err = getFormatErasureInQuorum(tmpNewFormats)
|
||||
if err != nil {
|
||||
// Save new formats `format.json` on unformatted disks.
|
||||
if err = saveUnformattedFormat(ctx, storageDisks, tmpNewFormats); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
|
||||
|
@ -1349,21 +1328,12 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
|||
|
||||
s.erasureDisksMu.Lock()
|
||||
|
||||
// Replace with new reference format.
|
||||
s.format = refFormat
|
||||
|
||||
// Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers.
|
||||
for _, disk := range storageDisks {
|
||||
if disk == nil {
|
||||
for index, format := range tmpNewFormats {
|
||||
if format == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
m, n, err := findDiskIndexByDiskID(refFormat, diskID)
|
||||
m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -1372,19 +1342,13 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
|||
s.erasureDisks[m][n].Close()
|
||||
}
|
||||
|
||||
s.endpointStrings[m*s.setDriveCount+n] = disk.String()
|
||||
if !disk.IsLocal() {
|
||||
// Enable healthcheck disk for remote endpoint.
|
||||
disk, err = newStorageAPI(disk.Endpoint())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
disk.SetDiskID(diskID)
|
||||
}
|
||||
s.erasureDisks[m][n] = disk
|
||||
|
||||
s.erasureDisks[m][n] = storageDisks[index]
|
||||
s.endpointStrings[m*s.setDriveCount+n] = storageDisks[index].String()
|
||||
}
|
||||
|
||||
// Replace with new reference format.
|
||||
s.format = refFormat
|
||||
|
||||
s.erasureDisksMu.Unlock()
|
||||
|
||||
mctx, mctxCancel := context.WithCancel(GlobalContext)
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -64,12 +66,14 @@ type erasureObjects struct {
|
|||
// Byte pools used for temporary i/o buffers.
|
||||
bp *bpool.BytePoolCap
|
||||
|
||||
deletedCleanupSleeper *dynamicSleeper
|
||||
|
||||
mrfOpCh chan partialOperation
|
||||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
func (er erasureObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
return er.nsMutex.NewNSLock(ctx, er.getLockers, bucket, objects...)
|
||||
func (er erasureObjects) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...)
|
||||
}
|
||||
|
||||
// SetDriveCount returns the current drives per set.
|
||||
|
@ -237,6 +241,31 @@ func (er erasureObjects) StorageInfo(ctx context.Context, local bool) (StorageIn
|
|||
return getStorageInfo(disks, endpoints)
|
||||
}
|
||||
|
||||
// Clean-up previously deleted objects. from .minio.sys/tmp/.trash/
|
||||
func (er erasureObjects) cleanupDeletedObjects(ctx context.Context) {
|
||||
// run multiple cleanup's local to this server.
|
||||
var wg sync.WaitGroup
|
||||
for _, disk := range er.getLoadBalancedLocalDisks() {
|
||||
if disk != nil {
|
||||
wg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
diskPath := disk.Endpoint().Path
|
||||
readDirFn(pathJoin(diskPath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error {
|
||||
wait := er.deletedCleanupSleeper.Timer(ctx)
|
||||
if intDataUpdateTracker != nil && intDataUpdateTracker.debug {
|
||||
logger.Info("cleanupDeletedObjects: %s/%s", minioMetaTmpDeletedBucket, ddir)
|
||||
}
|
||||
removeAll(pathJoin(diskPath, minioMetaTmpDeletedBucket, ddir))
|
||||
wait()
|
||||
return nil
|
||||
})
|
||||
}(disk)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed.
|
||||
// Updates are sent on a regular basis and the caller *must* consume them.
|
||||
func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error {
|
||||
|
@ -294,7 +323,8 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc
|
|||
var saverWg sync.WaitGroup
|
||||
saverWg.Add(1)
|
||||
go func() {
|
||||
const updateTime = 30 * time.Second
|
||||
// Add jitter to the update time so multiple sets don't sync up.
|
||||
var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
||||
t := time.NewTicker(updateTime)
|
||||
defer t.Stop()
|
||||
defer saverWg.Done()
|
||||
|
@ -377,11 +407,15 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc
|
|||
if r := cache.root(); r != nil {
|
||||
root = cache.flatten(*r)
|
||||
}
|
||||
t := time.Now()
|
||||
bucketResults <- dataUsageEntryInfo{
|
||||
Name: cache.Info.Name,
|
||||
Parent: dataUsageRoot,
|
||||
Entry: root,
|
||||
}
|
||||
// We want to avoid synchronizing up all writes in case
|
||||
// the results are piled up.
|
||||
time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64()))
|
||||
// Save cache
|
||||
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
|
||||
}
|
||||
|
|
|
@ -704,28 +704,18 @@ func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*fo
|
|||
return nil
|
||||
}
|
||||
|
||||
// saveFormatErasureAllWithErrs - populates `format.json` on disks in its order.
|
||||
// saveUnformattedFormat - populates `format.json` on unformatted disks.
|
||||
// also adds `.healing.bin` on the disks which are being actively healed.
|
||||
func saveFormatErasureAllWithErrs(ctx context.Context, storageDisks []StorageAPI, fErrs []error, formats []*formatErasureV3) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
|
||||
// Write `format.json` to all disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if formats[index] == nil {
|
||||
return errDiskNotFound
|
||||
}
|
||||
if errors.Is(fErrs[index], errUnformattedDisk) {
|
||||
return saveFormatErasure(storageDisks[index], formats[index], true)
|
||||
}
|
||||
return nil
|
||||
}, index)
|
||||
func saveUnformattedFormat(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
for index, format := range formats {
|
||||
if format == nil {
|
||||
continue
|
||||
}
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writeQuorum := getWriteQuorum(len(storageDisks))
|
||||
// Wait for the routines to finish.
|
||||
return reduceWriteQuorumErrs(ctx, g.Wait(), nil, writeQuorum)
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveFormatErasureAll - populates `format.json` on disks in its order.
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"os"
|
||||
pathutil "path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
|
@ -403,6 +404,55 @@ func fsRenameFile(ctx context.Context, sourcePath, destPath string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteFile(basePath, deletePath string, recursive bool) error {
|
||||
if basePath == "" || deletePath == "" {
|
||||
return nil
|
||||
}
|
||||
isObjectDir := HasSuffix(deletePath, SlashSeparator)
|
||||
basePath = pathutil.Clean(basePath)
|
||||
deletePath = pathutil.Clean(deletePath)
|
||||
if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if recursive {
|
||||
os.RemoveAll(deletePath)
|
||||
} else {
|
||||
err = os.Remove(deletePath)
|
||||
}
|
||||
if err != nil {
|
||||
switch {
|
||||
case isSysErrNotEmpty(err):
|
||||
// if object is a directory, but if its not empty
|
||||
// return FileNotFound to indicate its an empty prefix.
|
||||
if isObjectDir {
|
||||
return errFileNotFound
|
||||
}
|
||||
// Ignore errors if the directory is not empty. The server relies on
|
||||
// this functionality, and sometimes uses recursion that should not
|
||||
// error on parent directories.
|
||||
return nil
|
||||
case osIsNotExist(err):
|
||||
return errFileNotFound
|
||||
case osIsPermission(err):
|
||||
return errFileAccessDenied
|
||||
case isSysErrIO(err):
|
||||
return errFaultyDisk
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
deletePath = pathutil.Dir(deletePath)
|
||||
|
||||
// Delete parent directory obviously not recursively. Errors for
|
||||
// parent directories shouldn't trickle down.
|
||||
deleteFile(basePath, deletePath, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fsDeleteFile is a wrapper for deleteFile(), after checking the path length.
|
||||
func fsDeleteFile(ctx context.Context, basePath, deletePath string) error {
|
||||
if err := checkPathLength(basePath); err != nil {
|
||||
|
|
|
@ -714,8 +714,8 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||
}
|
||||
|
||||
// Hold write lock on the object.
|
||||
destLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = destLock.GetLock(globalOperationTimeout); err != nil {
|
||||
destLock := fs.NewNSLock(bucket, object)
|
||||
if err = destLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer destLock.Unlock()
|
||||
|
|
40
cmd/fs-v1.go
40
cmd/fs-v1.go
|
@ -186,9 +186,9 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
func (fs *FSObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
func (fs *FSObjects) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
// lockers are explicitly 'nil' for FS mode since there are only local lockers
|
||||
return fs.nsMutex.NewNSLock(ctx, nil, bucket, objects...)
|
||||
return fs.nsMutex.NewNSLock(nil, bucket, objects...)
|
||||
}
|
||||
|
||||
// SetDriveCount no-op
|
||||
|
@ -601,8 +601,8 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
|||
defer ObjectPathUpdated(path.Join(dstBucket, dstObject))
|
||||
|
||||
if !cpSrcDstSame {
|
||||
objectDWLock := fs.NewNSLock(ctx, dstBucket, dstObject)
|
||||
if err := objectDWLock.GetLock(globalOperationTimeout); err != nil {
|
||||
objectDWLock := fs.NewNSLock(dstBucket, dstObject)
|
||||
if err := objectDWLock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer objectDWLock.Unlock()
|
||||
|
@ -692,15 +692,15 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||
|
||||
if lockType != noLock {
|
||||
// Lock the object before reading.
|
||||
lock := fs.NewNSLock(ctx, bucket, object)
|
||||
lock := fs.NewNSLock(bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalOperationTimeout); err != nil {
|
||||
if err = lock.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(globalOperationTimeout); err != nil {
|
||||
if err = lock.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
|
@ -785,8 +785,8 @@ func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offse
|
|||
}
|
||||
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
@ -1013,8 +1013,8 @@ func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (
|
|||
// getObjectInfoWithLock - reads object metadata and replies back ObjectInfo.
|
||||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
@ -1051,8 +1051,8 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
|||
|
||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -1102,8 +1102,8 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
|||
}
|
||||
|
||||
// Lock the object.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
|
@ -1284,8 +1284,8 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, op
|
|||
}
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
if err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
@ -1535,12 +1535,6 @@ func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string
|
|||
return fs.PutObjectTags(ctx, bucket, object, "", opts)
|
||||
}
|
||||
|
||||
// ReloadFormat - no-op for fs, Valid only for Erasure.
|
||||
func (fs *FSObjects) ReloadFormat(ctx context.Context, dryRun bool) error {
|
||||
logger.LogIf(ctx, NotImplemented{})
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
// HealFormat - no-op for fs, Valid only for Erasure.
|
||||
func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
logger.LogIf(ctx, NotImplemented{})
|
||||
|
|
|
@ -270,7 +270,7 @@ func IsBackendOnline(ctx context.Context, clnt *http.Client, urlStr string) bool
|
|||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
clnt.CloseIdleConnections()
|
||||
return !xnet.IsNetworkOrHostDown(err)
|
||||
return !xnet.IsNetworkOrHostDown(err, false)
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
return true
|
||||
|
@ -291,7 +291,7 @@ func ErrorRespToObjectError(err error, params ...string) error {
|
|||
object = params[1]
|
||||
}
|
||||
|
||||
if xnet.IsNetworkOrHostDown(err) {
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
return BackendDown{}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,8 +51,8 @@ type GatewayLocker struct {
|
|||
}
|
||||
|
||||
// NewNSLock - implements gateway level locker
|
||||
func (l *GatewayLocker) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
return l.nsMutex.NewNSLock(ctx, nil, bucket, objects...)
|
||||
func (l *GatewayLocker) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
return l.nsMutex.NewNSLock(nil, bucket, objects...)
|
||||
}
|
||||
|
||||
// Walk - implements common gateway level Walker, to walk on all objects recursively at a prefix
|
||||
|
|
|
@ -41,8 +41,8 @@ func (a GatewayUnsupported) CrawlAndGetDataUsage(ctx context.Context, bf *bloomF
|
|||
}
|
||||
|
||||
// NewNSLock is a dummy stub for gateway.
|
||||
func (a GatewayUnsupported) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||
func (a GatewayUnsupported) NewNSLock(bucket string, objects ...string) RWLocker {
|
||||
logger.CriticalIf(context.Background(), errors.New("not implemented"))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -160,11 +160,6 @@ func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket st
|
|||
return NotImplemented{}
|
||||
}
|
||||
|
||||
// ReloadFormat - Not implemented stub.
|
||||
func (a GatewayUnsupported) ReloadFormat(ctx context.Context, dryRun bool) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
// HealFormat - Not implemented stub
|
||||
func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
return madmin.HealResultItem{}, NotImplemented{}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -157,15 +158,48 @@ const (
|
|||
loginPathPrefix = SlashSeparator + "login"
|
||||
)
|
||||
|
||||
// Adds redirect rules for incoming requests.
|
||||
type redirectHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
||||
func setRedirectHandler(h http.Handler) http.Handler {
|
||||
return redirectHandler{handler: h}
|
||||
}
|
||||
|
||||
// Adds redirect rules for incoming requests.
|
||||
type browserRedirectHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
||||
return browserRedirectHandler{handler: h}
|
||||
}
|
||||
|
||||
func shouldProxy() bool {
|
||||
if newObjectLayerFn() == nil {
|
||||
return true
|
||||
}
|
||||
return !globalIAMSys.Initialized()
|
||||
}
|
||||
|
||||
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if guessIsRPCReq(r) || guessIsBrowserReq(r) ||
|
||||
guessIsHealthCheckReq(r) || guessIsMetricsReq(r) || isAdminReq(r) {
|
||||
h.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
if shouldProxy() {
|
||||
// if this server is still initializing, proxy the request
|
||||
// to any other online servers to avoid 503 for any incoming
|
||||
// API calls.
|
||||
if idx := getOnlineProxyEndpointIdx(); idx >= 0 {
|
||||
proxyRequest(context.TODO(), w, r, globalProxyEndpoints[idx])
|
||||
return
|
||||
}
|
||||
}
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Fetch redirect location if urlPath satisfies certain
|
||||
// criteria. Some special names are considered to be
|
||||
// redirectable, this is purely internal function and
|
||||
|
@ -236,7 +270,7 @@ func guessIsRPCReq(req *http.Request) bool {
|
|||
strings.HasPrefix(req.URL.Path, minioReservedBucketPath+SlashSeparator)
|
||||
}
|
||||
|
||||
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (h browserRedirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Re-direction is handled specifically for browser requests.
|
||||
if guessIsBrowserReq(r) {
|
||||
// Fetch the redirect location if any.
|
||||
|
|
|
@ -18,6 +18,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -42,8 +44,8 @@ func newBgHealSequence() *healSequence {
|
|||
}
|
||||
|
||||
return &healSequence{
|
||||
sourceCh: make(chan healSource),
|
||||
respCh: make(chan healResult),
|
||||
sourceCh: make(chan healSource, runtime.GOMAXPROCS(0)),
|
||||
respCh: make(chan healResult, runtime.GOMAXPROCS(0)),
|
||||
startTime: UTCNow(),
|
||||
clientToken: bgHealingUUID,
|
||||
// run-background heal with reserved bucket
|
||||
|
@ -97,7 +99,7 @@ func getLocalBackgroundHealStatus() (madmin.BgHealState, bool) {
|
|||
}
|
||||
|
||||
// healErasureSet lists and heals all objects in a specific erasure set
|
||||
func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, disks []StorageAPI) error {
|
||||
func healErasureSet(ctx context.Context, prefix string, setIndex int, maxIO int, maxSleep time.Duration, buckets []BucketInfo, disks []StorageAPI) error {
|
||||
// Get background heal sequence to send elements to heal
|
||||
var bgSeq *healSequence
|
||||
var ok bool
|
||||
|
@ -114,11 +116,12 @@ func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, dis
|
|||
}
|
||||
}
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketConfigPrefix),
|
||||
}) // add metadata .minio.sys/ bucket prefixes to heal
|
||||
obj := newObjectLayerFn()
|
||||
if obj == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
setDriveCount := obj.SetDriveCount()
|
||||
|
||||
// Try to pro-actively heal backend-encrypted file.
|
||||
bgSeq.sourceCh <- healSource{
|
||||
|
@ -126,53 +129,87 @@ func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, dis
|
|||
object: backendEncryptedFile,
|
||||
}
|
||||
|
||||
// Heal all buckets with all objects
|
||||
for _, bucket := range buckets {
|
||||
// Heal current bucket
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bucket: bucket.Name,
|
||||
}
|
||||
// Heal config prefix.
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bucket: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}
|
||||
|
||||
var entryChs []FileInfoVersionsCh
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
for _, disk := range disks {
|
||||
disk := disk
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
entryCh, err := disk.WalkVersions(ctx, bucket.Name, "", "", true, ctx.Done())
|
||||
if err != nil {
|
||||
// Disk walk returned error, ignore it.
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bucket: pathJoin(minioMetaBucket, bucketConfigPrefix),
|
||||
}
|
||||
|
||||
// Heal all buckets with all objects
|
||||
var wg sync.WaitGroup
|
||||
for _, bucket := range buckets {
|
||||
wg.Add(1)
|
||||
go func(bucket BucketInfo, disks []StorageAPI) {
|
||||
defer wg.Done()
|
||||
|
||||
// Heal current bucket
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bucket: bucket.Name,
|
||||
}
|
||||
|
||||
var entryChs []FileInfoVersionsCh
|
||||
var mu sync.Mutex
|
||||
var wwg sync.WaitGroup
|
||||
for _, disk := range disks {
|
||||
wwg.Add(1)
|
||||
go func(disk StorageAPI) {
|
||||
defer wwg.Done()
|
||||
if disk == nil {
|
||||
// disk is nil and not available.
|
||||
return
|
||||
}
|
||||
entryCh, err := disk.WalkVersions(ctx, bucket.Name, prefix, "", true, false, ctx.Done())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("%s returned %w - disk will be ignored and continued further", disk, err))
|
||||
// Disk walk returned error, ignore it.
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||
Ch: entryCh,
|
||||
SetIndex: setIndex,
|
||||
})
|
||||
mu.Unlock()
|
||||
}(disk)
|
||||
}
|
||||
wwg.Wait()
|
||||
|
||||
entriesValid := make([]bool, len(entryChs))
|
||||
entries := make([]FileInfoVersions, len(entryChs))
|
||||
|
||||
for {
|
||||
entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid)
|
||||
if !ok {
|
||||
logger.Info("Healing finished for bucket '%s' on erasure set %d", bucket.Name, setIndex+1)
|
||||
// We are finished with this bucket return.
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||
Ch: entryCh,
|
||||
})
|
||||
mu.Unlock()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
entriesValid := make([]bool, len(entryChs))
|
||||
entries := make([]FileInfoVersions, len(entryChs))
|
||||
if quorumCount == setDriveCount {
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
entry, _, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
for _, version := range entry.Versions {
|
||||
bgSeq.sourceCh <- healSource{
|
||||
bucket: bucket.Name,
|
||||
object: version.Name,
|
||||
versionID: version.VersionID,
|
||||
for _, version := range entry.Versions {
|
||||
hsrc := healSource{
|
||||
bucket: bucket.Name,
|
||||
object: version.Name,
|
||||
versionID: version.VersionID,
|
||||
}
|
||||
hsrc.throttle.maxIO = maxIO
|
||||
hsrc.throttle.maxSleep = maxSleep
|
||||
if err := bgSeq.queueHealTask(ctx, hsrc, madmin.HealItemObject); err != nil {
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}(bucket, disks)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -47,13 +47,13 @@ func (t *apiConfig) init(cfg api.Config, setDriveCount int) {
|
|||
stats, err := sys.GetStats()
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
// Default to 16 GiB, not critical.
|
||||
stats.TotalRAM = 16 << 30
|
||||
// Default to 8 GiB, not critical.
|
||||
stats.TotalRAM = 8 << 30
|
||||
}
|
||||
// max requests per node is calculated as
|
||||
// total_ram / ram_per_request
|
||||
// ram_per_request is 4MiB * setDriveCount + 2 * 10MiB (default erasure block size)
|
||||
apiRequestsMaxPerNode = int(stats.TotalRAM / uint64(setDriveCount*readBlockSize+blockSizeV1*2))
|
||||
// ram_per_request is 4MiB * setDriveCount + (2 * 10MiB (v1 erasure block size) + 2 * 1MiB (v2 erasure block size)
|
||||
apiRequestsMaxPerNode = int(stats.TotalRAM / uint64(setDriveCount*readBlockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
||||
} else {
|
||||
apiRequestsMaxPerNode = cfg.RequestsMax
|
||||
if len(globalEndpoints.Hostnames()) > 0 {
|
||||
|
@ -85,31 +85,34 @@ func (t *apiConfig) getClusterDeadline() time.Duration {
|
|||
return t.clusterDeadline
|
||||
}
|
||||
|
||||
func (t *apiConfig) getRequestsPool() (chan struct{}, <-chan time.Time) {
|
||||
func (t *apiConfig) getRequestsPool() (chan struct{}, time.Duration) {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
if t.requestsPool == nil {
|
||||
return nil, nil
|
||||
return nil, time.Duration(0)
|
||||
}
|
||||
|
||||
return t.requestsPool, time.NewTimer(t.requestsDeadline).C
|
||||
return t.requestsPool, t.requestsDeadline
|
||||
}
|
||||
|
||||
// maxClients throttles the S3 API calls
|
||||
func maxClients(f http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
pool, deadlineTimer := globalAPIConfig.getRequestsPool()
|
||||
pool, deadline := globalAPIConfig.getRequestsPool()
|
||||
if pool == nil {
|
||||
f.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
deadlineTimer := time.NewTimer(deadline)
|
||||
defer deadlineTimer.Stop()
|
||||
|
||||
select {
|
||||
case pool <- struct{}{}:
|
||||
defer func() { <-pool }()
|
||||
f.ServeHTTP(w, r)
|
||||
case <-deadlineTimer:
|
||||
case <-deadlineTimer.C:
|
||||
// Send a http timeout message
|
||||
writeErrorResponse(r.Context(), w,
|
||||
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
||||
|
|
|
@ -20,31 +20,36 @@ import (
|
|||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
const unavailable = "offline"
|
||||
|
||||
// ClusterCheckHandler returns if the server is ready for requests.
|
||||
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClusterCheckHandler")
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
// Service not initialized yet
|
||||
if objLayer == nil {
|
||||
if shouldProxy() {
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return
|
||||
}
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, globalAPIConfig.getClusterDeadline())
|
||||
defer cancel()
|
||||
|
||||
opts := HealthOptions{Maintenance: r.URL.Query().Get("maintenance") == "true"}
|
||||
result := objLayer.Health(ctx, opts)
|
||||
if result.WriteQuorum > 0 {
|
||||
w.Header().Set("X-Minio-Write-Quorum", strconv.Itoa(result.WriteQuorum))
|
||||
w.Header().Set(xhttp.MinIOWriteQuorum, strconv.Itoa(result.WriteQuorum))
|
||||
}
|
||||
if !result.Healthy {
|
||||
// return how many drives are being healed if any
|
||||
if result.HealingDrives > 0 {
|
||||
w.Header().Set("X-Minio-Healing-Drives", strconv.Itoa(result.HealingDrives))
|
||||
w.Header().Set(xhttp.MinIOHealingDrives, strconv.Itoa(result.HealingDrives))
|
||||
}
|
||||
// As a maintenance call we are purposefully asked to be taken
|
||||
// down, this is for orchestrators to know if we can safely
|
||||
|
@ -61,12 +66,19 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// ReadinessCheckHandler Checks if the process is up. Always returns success.
|
||||
func ReadinessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: only implement this function to notify that this pod is
|
||||
// busy, at a local scope in future, for now '200 OK'.
|
||||
if shouldProxy() {
|
||||
// Service not initialized yet
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
}
|
||||
|
||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||
}
|
||||
|
||||
// LivenessCheckHandler - Checks if the process is up. Always returns success.
|
||||
func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if shouldProxy() {
|
||||
// Service not initialized yet
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
}
|
||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||
}
|
||||
|
|
|
@ -126,6 +126,12 @@ const (
|
|||
|
||||
// Header indicates if the etag should be preserved by client
|
||||
MinIOSourceETag = "x-minio-source-etag"
|
||||
|
||||
// Writes expected write quorum
|
||||
MinIOWriteQuorum = "x-minio-write-quorum"
|
||||
|
||||
// Reports number of drives currently healing
|
||||
MinIOHealingDrives = "x-minio-healing-drives"
|
||||
)
|
||||
|
||||
// Common http query params S3 API
|
||||
|
|
|
@ -194,7 +194,7 @@ func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificat
|
|||
// TLS hardening
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
NextProtos: []string{"http/1.1", "h2"},
|
||||
}
|
||||
tlsConfig.GetCertificate = getCert
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
|
|
98
cmd/iam.go
98
cmd/iam.go
|
@ -24,6 +24,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
|
@ -201,6 +202,8 @@ func newMappedPolicy(policy string) MappedPolicy {
|
|||
|
||||
// IAMSys - config system.
|
||||
type IAMSys struct {
|
||||
sync.Mutex
|
||||
|
||||
usersSysType UsersSysType
|
||||
|
||||
// map of policy names to policy definitions
|
||||
|
@ -277,7 +280,7 @@ type IAMStorageAPI interface {
|
|||
// simplifies the implementation for group removal. This is called
|
||||
// only via IAM notifications.
|
||||
func (sys *IAMSys) LoadGroup(objAPI ObjectLayer, group string) error {
|
||||
if objAPI == nil || sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -318,7 +321,7 @@ func (sys *IAMSys) LoadGroup(objAPI ObjectLayer, group string) error {
|
|||
|
||||
// LoadPolicy - reloads a specific canned policy from backend disks or etcd.
|
||||
func (sys *IAMSys) LoadPolicy(objAPI ObjectLayer, policyName string) error {
|
||||
if objAPI == nil || sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -336,7 +339,7 @@ func (sys *IAMSys) LoadPolicy(objAPI ObjectLayer, policyName string) error {
|
|||
// LoadPolicyMapping - loads the mapped policy for a user or group
|
||||
// from storage into server memory.
|
||||
func (sys *IAMSys) LoadPolicyMapping(objAPI ObjectLayer, userOrGroup string, isGroup bool) error {
|
||||
if objAPI == nil || sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -362,7 +365,7 @@ func (sys *IAMSys) LoadPolicyMapping(objAPI ObjectLayer, userOrGroup string, isG
|
|||
|
||||
// LoadUser - reloads a specific user from backend disks or etcd.
|
||||
func (sys *IAMSys) LoadUser(objAPI ObjectLayer, accessKey string, userType IAMUserType) error {
|
||||
if objAPI == nil || sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -386,7 +389,7 @@ func (sys *IAMSys) LoadUser(objAPI ObjectLayer, accessKey string, userType IAMUs
|
|||
|
||||
// LoadServiceAccount - reloads a specific service account from backend disks or etcd.
|
||||
func (sys *IAMSys) LoadServiceAccount(accessKey string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -410,6 +413,9 @@ func (sys *IAMSys) doIAMConfigMigration(ctx context.Context) error {
|
|||
|
||||
// InitStore initializes IAM stores
|
||||
func (sys *IAMSys) InitStore(objAPI ObjectLayer) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
if globalEtcdClient == nil {
|
||||
sys.store = newIAMObjectStore(objAPI)
|
||||
} else {
|
||||
|
@ -421,6 +427,16 @@ func (sys *IAMSys) InitStore(objAPI ObjectLayer) {
|
|||
}
|
||||
}
|
||||
|
||||
// Initialized check if IAM is initialized
|
||||
func (sys *IAMSys) Initialized() bool {
|
||||
if sys == nil {
|
||||
return false
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
return sys.store != nil
|
||||
}
|
||||
|
||||
// Init - initializes config system by reading entries from config/iam
|
||||
func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
|
@ -429,7 +445,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
defer cancel()
|
||||
|
||||
// Hold the lock for migration only.
|
||||
txnLk := objAPI.NewNSLock(retryCtx, minioMetaBucket, minioConfigPrefix+"/iam.lock")
|
||||
txnLk := objAPI.NewNSLock(minioMetaBucket, minioConfigPrefix+"/iam.lock")
|
||||
|
||||
// Initializing IAM sub-system needs a retry mechanism for
|
||||
// the following reasons:
|
||||
|
@ -446,7 +462,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
for range retry.NewTimerWithJitter(retryCtx, time.Second, 5*time.Second, retry.MaxJitter) {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if err := txnLk.GetLock(iamLockTimeout); err != nil {
|
||||
if err := txnLk.GetLock(retryCtx, iamLockTimeout); err != nil {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. trying to acquire lock")
|
||||
continue
|
||||
}
|
||||
|
@ -505,7 +521,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
|||
|
||||
// DeletePolicy - deletes a canned policy from backend or etcd.
|
||||
func (sys *IAMSys) DeletePolicy(policyName string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -557,7 +573,7 @@ func (sys *IAMSys) DeletePolicy(policyName string) error {
|
|||
|
||||
// InfoPolicy - expands the canned policy into its JSON structure.
|
||||
func (sys *IAMSys) InfoPolicy(policyName string) (iampolicy.Policy, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return iampolicy.Policy{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -574,7 +590,7 @@ func (sys *IAMSys) InfoPolicy(policyName string) (iampolicy.Policy, error) {
|
|||
|
||||
// ListPolicies - lists all canned policies.
|
||||
func (sys *IAMSys) ListPolicies() (map[string]iampolicy.Policy, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -595,7 +611,7 @@ func (sys *IAMSys) ListPolicies() (map[string]iampolicy.Policy, error) {
|
|||
|
||||
// SetPolicy - sets a new name policy.
|
||||
func (sys *IAMSys) SetPolicy(policyName string, p iampolicy.Policy) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -616,7 +632,7 @@ func (sys *IAMSys) SetPolicy(policyName string, p iampolicy.Policy) error {
|
|||
|
||||
// DeleteUser - delete user (only for long-term users not STS users).
|
||||
func (sys *IAMSys) DeleteUser(accessKey string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -665,13 +681,14 @@ func (sys *IAMSys) DeleteUser(accessKey string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// returns comma separated policy string, from an input policy
|
||||
// after validating if there are any current policies which exist
|
||||
// on MinIO corresponding to the input.
|
||||
func (sys *IAMSys) currentPolicies(policyName string) string {
|
||||
if sys.store == nil {
|
||||
// CurrentPolicies - returns comma separated policy string, from
|
||||
// an input policy after validating if there are any current
|
||||
// policies which exist on MinIO corresponding to the input.
|
||||
func (sys *IAMSys) CurrentPolicies(policyName string) string {
|
||||
if !sys.Initialized() {
|
||||
return ""
|
||||
}
|
||||
|
||||
sys.store.rlock()
|
||||
defer sys.store.runlock()
|
||||
|
||||
|
@ -688,7 +705,7 @@ func (sys *IAMSys) currentPolicies(policyName string) string {
|
|||
|
||||
// SetTempUser - set temporary user credentials, these credentials have an expiry.
|
||||
func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyName string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -737,7 +754,7 @@ func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyNa
|
|||
|
||||
// ListUsers - list all users.
|
||||
func (sys *IAMSys) ListUsers() (map[string]madmin.UserInfo, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -773,7 +790,7 @@ func (sys *IAMSys) ListUsers() (map[string]madmin.UserInfo, error) {
|
|||
|
||||
// IsTempUser - returns if given key is a temporary user.
|
||||
func (sys *IAMSys) IsTempUser(name string) (bool, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return false, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -790,7 +807,7 @@ func (sys *IAMSys) IsTempUser(name string) (bool, error) {
|
|||
|
||||
// IsServiceAccount - returns if given key is a service account
|
||||
func (sys *IAMSys) IsServiceAccount(name string) (bool, string, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return false, "", errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -811,7 +828,7 @@ func (sys *IAMSys) IsServiceAccount(name string) (bool, string, error) {
|
|||
|
||||
// GetUserInfo - get info on a user.
|
||||
func (sys *IAMSys) GetUserInfo(name string) (u madmin.UserInfo, err error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return u, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -856,8 +873,7 @@ func (sys *IAMSys) GetUserInfo(name string) (u madmin.UserInfo, err error) {
|
|||
|
||||
// SetUserStatus - sets current user status, supports disabled or enabled.
|
||||
func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus) error {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -902,8 +918,7 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
|
|||
|
||||
// NewServiceAccount - create a new service account
|
||||
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, sessionPolicy *iampolicy.Policy) (auth.Credentials, error) {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return auth.Credentials{}, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -969,8 +984,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, ses
|
|||
|
||||
// ListServiceAccounts - lists all services accounts associated to a specific user
|
||||
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]string, error) {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -994,8 +1008,7 @@ func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([
|
|||
|
||||
// GetServiceAccountParent - gets information about a service account
|
||||
func (sys *IAMSys) GetServiceAccountParent(ctx context.Context, accessKey string) (string, error) {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return "", errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1011,8 +1024,7 @@ func (sys *IAMSys) GetServiceAccountParent(ctx context.Context, accessKey string
|
|||
|
||||
// DeleteServiceAccount - delete a service account
|
||||
func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string) error {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1040,8 +1052,7 @@ func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string) e
|
|||
|
||||
// SetUser - set user credentials and policy.
|
||||
func (sys *IAMSys) SetUser(accessKey string, uinfo madmin.UserInfo) error {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1078,8 +1089,7 @@ func (sys *IAMSys) SetUser(accessKey string, uinfo madmin.UserInfo) error {
|
|||
|
||||
// SetUserSecretKey - sets user secret key
|
||||
func (sys *IAMSys) SetUserSecretKey(accessKey string, secretKey string) error {
|
||||
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1107,7 +1117,7 @@ func (sys *IAMSys) SetUserSecretKey(accessKey string, secretKey string) error {
|
|||
|
||||
// GetUser - get user credentials
|
||||
func (sys *IAMSys) GetUser(accessKey string) (cred auth.Credentials, ok bool) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return cred, false
|
||||
}
|
||||
|
||||
|
@ -1173,7 +1183,7 @@ func (sys *IAMSys) GetUser(accessKey string) (cred auth.Credentials, ok bool) {
|
|||
// AddUsersToGroup - adds users to a group, creating the group if
|
||||
// needed. No error if user(s) already are in the group.
|
||||
func (sys *IAMSys) AddUsersToGroup(group string, members []string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1233,7 +1243,7 @@ func (sys *IAMSys) AddUsersToGroup(group string, members []string) error {
|
|||
// RemoveUsersFromGroup - remove users from group. If no users are
|
||||
// given, and the group is empty, deletes the group as well.
|
||||
func (sys *IAMSys) RemoveUsersFromGroup(group string, members []string) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1313,7 +1323,7 @@ func (sys *IAMSys) RemoveUsersFromGroup(group string, members []string) error {
|
|||
|
||||
// SetGroupStatus - enable/disabled a group
|
||||
func (sys *IAMSys) SetGroupStatus(group string, enabled bool) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1348,7 +1358,7 @@ func (sys *IAMSys) SetGroupStatus(group string, enabled bool) error {
|
|||
|
||||
// GetGroupDescription - builds up group description
|
||||
func (sys *IAMSys) GetGroupDescription(group string) (gd madmin.GroupDesc, err error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return gd, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1398,7 @@ func (sys *IAMSys) GetGroupDescription(group string) (gd madmin.GroupDesc, err e
|
|||
|
||||
// ListGroups - lists groups.
|
||||
func (sys *IAMSys) ListGroups() (r []string, err error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1411,7 +1421,7 @@ func (sys *IAMSys) ListGroups() (r []string, err error) {
|
|||
|
||||
// PolicyDBSet - sets a policy for a user or group in the PolicyDB.
|
||||
func (sys *IAMSys) PolicyDBSet(name, policy string, isGroup bool) error {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
|
@ -1477,7 +1487,7 @@ func (sys *IAMSys) policyDBSet(name, policyName string, userType IAMUserType, is
|
|||
// be a member of multiple groups, this function returns an array of
|
||||
// applicable policies (each group is mapped to at most one policy).
|
||||
func (sys *IAMSys) PolicyDBGet(name string, isGroup bool) ([]string, error) {
|
||||
if sys == nil || sys.store == nil {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
jwtreq "github.com/dgrijalva/jwt-go/request"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
jwtreq "github.com/golang-jwt/jwt/request"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
|
|
|
@ -18,8 +18,8 @@ package jwt
|
|||
|
||||
// This file is a re-implementation of the original code here with some
|
||||
// additional allocation tweaks reproduced using GODEBUG=allocfreetrace=1
|
||||
// original file https://github.com/dgrijalva/jwt-go/blob/master/parser.go
|
||||
// borrowed under MIT License https://github.com/dgrijalva/jwt-go/blob/master/LICENSE
|
||||
// original file https://github.com/golang-jwt/jwt/blob/master/parser.go
|
||||
// borrowed under MIT License https://github.com/golang-jwt/jwt/blob/master/LICENSE
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
|
@ -31,7 +31,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (c *StandardClaims) SetAccessKey(accessKey string) {
|
|||
c.AccessKey = accessKey
|
||||
}
|
||||
|
||||
// Valid - implements https://godoc.org/github.com/dgrijalva/jwt-go#Claims compatible
|
||||
// Valid - implements https://godoc.org/github.com/golang-jwt/jwt#Claims compatible
|
||||
// claims interface, additionally validates "accessKey" fields.
|
||||
func (c *StandardClaims) Valid() error {
|
||||
if err := c.StandardClaims.Valid(); err != nil {
|
||||
|
@ -153,7 +153,7 @@ func (c *MapClaims) SetAccessKey(accessKey string) {
|
|||
c.MapClaims["accessKey"] = accessKey
|
||||
}
|
||||
|
||||
// Valid - implements https://godoc.org/github.com/dgrijalva/jwt-go#Claims compatible
|
||||
// Valid - implements https://godoc.org/github.com/golang-jwt/jwt#Claims compatible
|
||||
// claims interface, additionally validates "accessKey" fields.
|
||||
func (c *MapClaims) Valid() error {
|
||||
if err := c.MapClaims.Valid(); err != nil {
|
||||
|
|
|
@ -18,15 +18,15 @@ package jwt
|
|||
|
||||
// This file is a re-implementation of the original code here with some
|
||||
// additional allocation tweaks reproduced using GODEBUG=allocfreetrace=1
|
||||
// original file https://github.com/dgrijalva/jwt-go/blob/master/parser.go
|
||||
// borrowed under MIT License https://github.com/dgrijalva/jwt-go/blob/master/LICENSE
|
||||
// original file https://github.com/golang-jwt/jwt/blob/master/parser.go
|
||||
// borrowed under MIT License https://github.com/golang-jwt/jwt/blob/master/LICENSE
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -215,6 +215,23 @@ func (l *localLocker) IsLocal() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
default:
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if len(args.UID) != 0 {
|
||||
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
|
||||
}
|
||||
for _, resource := range args.Resources {
|
||||
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *localLocker) Expired(ctx context.Context, args dsync.LockArgs) (expired bool, err error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -137,6 +137,11 @@ func (client *lockRESTClient) Expired(ctx context.Context, args dsync.LockArgs)
|
|||
return client.restCall(ctx, lockRESTMethodExpired, args)
|
||||
}
|
||||
|
||||
// ForceUnlock calls force unlock handler to forcibly unlock an active lock.
|
||||
func (client *lockRESTClient) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
|
||||
return client.restCall(ctx, lockRESTMethodForceUnlock, args)
|
||||
}
|
||||
|
||||
func newLockAPI(endpoint Endpoint) dsync.NetLocker {
|
||||
if endpoint.IsLocal {
|
||||
return globalLockServers[endpoint]
|
||||
|
@ -162,6 +167,7 @@ func newlockRESTClient(endpoint Endpoint) *lockRESTClient {
|
|||
|
||||
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultTimeout)
|
||||
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
restClient.ExpectTimeouts = true
|
||||
restClient.HealthCheckFn = func() bool {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
||||
// Instantiate a new rest client for healthcheck
|
||||
|
|
|
@ -27,12 +27,13 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
lockRESTMethodHealth = "/health"
|
||||
lockRESTMethodLock = "/lock"
|
||||
lockRESTMethodRLock = "/rlock"
|
||||
lockRESTMethodUnlock = "/unlock"
|
||||
lockRESTMethodRUnlock = "/runlock"
|
||||
lockRESTMethodExpired = "/expired"
|
||||
lockRESTMethodHealth = "/health"
|
||||
lockRESTMethodLock = "/lock"
|
||||
lockRESTMethodRLock = "/rlock"
|
||||
lockRESTMethodUnlock = "/unlock"
|
||||
lockRESTMethodRUnlock = "/runlock"
|
||||
lockRESTMethodExpired = "/expired"
|
||||
lockRESTMethodForceUnlock = "/force-unlock"
|
||||
|
||||
// lockRESTOwner represents owner UUID
|
||||
lockRESTOwner = "owner"
|
||||
|
|
|
@ -209,6 +209,25 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
// ForceUnlockHandler - query expired lock status.
|
||||
func (l *lockRESTServer) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !l.IsValid(w, r) {
|
||||
l.writeErrorResponse(w, errors.New("invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
args, err := getLockArgs(r)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = l.ll.ForceUnlock(r.Context(), args); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// nameLockRequesterInfoPair is a helper type for lock maintenance
|
||||
type nameLockRequesterInfoPair struct {
|
||||
name string
|
||||
|
@ -378,6 +397,7 @@ func registerLockRESTHandlers(router *mux.Router, endpointServerSets EndpointSer
|
|||
subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(lockServer.RLockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(lockServer.UnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(lockServer.RUnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(lockServer.ForceUnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodExpired).HandlerFunc(httpTraceAll(lockServer.ExpiredHandler))
|
||||
|
||||
globalLockServers[endpoint] = lockServer.ll
|
||||
|
|
|
@ -38,9 +38,9 @@ var globalLockServers = make(map[Endpoint]*localLocker)
|
|||
|
||||
// RWLocker - locker interface to introduce GetRLock, RUnlock.
|
||||
type RWLocker interface {
|
||||
GetLock(timeout *dynamicTimeout) (timedOutErr error)
|
||||
GetLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error)
|
||||
Unlock()
|
||||
GetRLock(timeout *dynamicTimeout) (timedOutErr error)
|
||||
GetRLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error)
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
|
@ -139,15 +139,14 @@ func (n *nsLockMap) unlock(volume string, path string, readLock bool) {
|
|||
type distLockInstance struct {
|
||||
rwMutex *dsync.DRWMutex
|
||||
opsID string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Lock - block until write lock is taken or timeout has occurred.
|
||||
func (di *distLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
if !di.rwMutex.GetLock(di.ctx, di.opsID, lockSource, dsync.Options{
|
||||
if !di.rwMutex.GetLock(ctx, di.opsID, lockSource, dsync.Options{
|
||||
Timeout: timeout.Timeout(),
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
|
@ -163,11 +162,11 @@ func (di *distLockInstance) Unlock() {
|
|||
}
|
||||
|
||||
// RLock - block until read lock is taken or timeout has occurred.
|
||||
func (di *distLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
if !di.rwMutex.GetRLock(di.ctx, di.opsID, lockSource, dsync.Options{
|
||||
if !di.rwMutex.GetRLock(ctx, di.opsID, lockSource, dsync.Options{
|
||||
Timeout: timeout.Timeout(),
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
|
@ -184,7 +183,6 @@ func (di *distLockInstance) RUnlock() {
|
|||
|
||||
// localLockInstance - frontend/top-level interface for namespace locks.
|
||||
type localLockInstance struct {
|
||||
ctx context.Context
|
||||
ns *nsLockMap
|
||||
volume string
|
||||
paths []string
|
||||
|
@ -194,26 +192,26 @@ type localLockInstance struct {
|
|||
// NewNSLock - returns a lock instance for a given volume and
|
||||
// path. The returned lockInstance object encapsulates the nsLockMap,
|
||||
// volume, path and operation ID.
|
||||
func (n *nsLockMap) NewNSLock(ctx context.Context, lockers func() ([]dsync.NetLocker, string), volume string, paths ...string) RWLocker {
|
||||
func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume string, paths ...string) RWLocker {
|
||||
opsID := mustGetUUID()
|
||||
if n.isDistErasure {
|
||||
drwmutex := dsync.NewDRWMutex(&dsync.Dsync{
|
||||
GetLockers: lockers,
|
||||
}, pathsJoinPrefix(volume, paths...)...)
|
||||
return &distLockInstance{drwmutex, opsID, ctx}
|
||||
return &distLockInstance{drwmutex, opsID}
|
||||
}
|
||||
sort.Strings(paths)
|
||||
return &localLockInstance{ctx, n, volume, paths, opsID}
|
||||
return &localLockInstance{n, volume, paths, opsID}
|
||||
}
|
||||
|
||||
// Lock - block until write lock is taken or timeout has occurred.
|
||||
func (li *localLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
readLock := false
|
||||
const readLock = false
|
||||
var success []int
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(li.ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
|
@ -228,20 +226,20 @@ func (li *localLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error
|
|||
|
||||
// Unlock - block until write lock is released.
|
||||
func (li *localLockInstance) Unlock() {
|
||||
readLock := false
|
||||
const readLock = false
|
||||
for _, path := range li.paths {
|
||||
li.ns.unlock(li.volume, path, readLock)
|
||||
}
|
||||
}
|
||||
|
||||
// RLock - block until read lock is taken or timeout has occurred.
|
||||
func (li *localLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
readLock := true
|
||||
const readLock = true
|
||||
var success []int
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(li.ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
|
@ -256,7 +254,7 @@ func (li *localLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr erro
|
|||
|
||||
// RUnlock - block until read lock is released.
|
||||
func (li *localLockInstance) RUnlock() {
|
||||
readLock := true
|
||||
const readLock = true
|
||||
for _, path := range li.paths {
|
||||
li.ns.unlock(li.volume, path, readLock)
|
||||
}
|
||||
|
|
|
@ -138,21 +138,6 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
|
|||
}()
|
||||
}
|
||||
|
||||
// ReloadFormat - calls ReloadFormat REST call on all peers.
|
||||
func (sys *NotificationSys) ReloadFormat(dryRun bool) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(GlobalContext, func() error {
|
||||
return client.ReloadFormat(dryRun)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
return ng.Wait()
|
||||
}
|
||||
|
||||
// DeletePolicy - deletes policy across all peers.
|
||||
func (sys *NotificationSys) DeletePolicy(policyName string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
|
|
|
@ -31,6 +31,9 @@ const (
|
|||
// Block size used for all internal operations version 1.
|
||||
blockSizeV1 = 10 * humanize.MiByte
|
||||
|
||||
// Block size used for all internal operations version 2
|
||||
blockSizeV2 = 1 * humanize.MiByte
|
||||
|
||||
// Staging buffer read size for all internal operations version 1.
|
||||
readSizeV1 = 1 * humanize.MiByte
|
||||
|
||||
|
|
|
@ -45,6 +45,8 @@ type ObjectOptions struct {
|
|||
UserDefined map[string]string // only set in case of POST/PUT operations
|
||||
PartNumber int // only useful in case of GetObject/HeadObject
|
||||
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
|
||||
// Used to verify if parent is an object.
|
||||
ParentIsObject func(ctx context.Context, bucket, parent string) bool
|
||||
}
|
||||
|
||||
// BucketOptions represents bucket options for ObjectLayer bucket operations
|
||||
|
@ -68,7 +70,7 @@ type ObjectLayer interface {
|
|||
SetDriveCount() int // Only implemented by erasure layer
|
||||
|
||||
// Locking operations on object.
|
||||
NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker
|
||||
NewNSLock(bucket string, objects ...string) RWLocker
|
||||
|
||||
// Storage operations.
|
||||
Shutdown(context.Context) error
|
||||
|
@ -114,7 +116,6 @@ type ObjectLayer interface {
|
|||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Healing operations.
|
||||
ReloadFormat(ctx context.Context, dryRun bool) error
|
||||
HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error)
|
||||
HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error)
|
||||
HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error)
|
||||
|
|
|
@ -58,6 +58,8 @@ const (
|
|||
minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
|
||||
// MinIO Tmp meta prefix.
|
||||
minioMetaTmpBucket = minioMetaBucket + "/tmp"
|
||||
// MinIO tmp meta prefix for deleted objects.
|
||||
minioMetaTmpDeletedBucket = minioMetaTmpBucket + "/.trash"
|
||||
// DNS separator (period), used for bucket name validation.
|
||||
dnsDelimiter = "."
|
||||
// On compressed files bigger than this;
|
||||
|
|
|
@ -454,19 +454,6 @@ func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ReloadFormat - reload format on the peer node.
|
||||
func (client *peerRESTClient) ReloadFormat(dryRun bool) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTDryRun, strconv.FormatBool(dryRun))
|
||||
|
||||
respBody, err := client.call(peerRESTMethodReloadFormat, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// cycleServerBloomFilter will cycle the bloom filter to start recording to index y if not already.
|
||||
// The response will contain a bloom filter starting at index x up to, but not including index y.
|
||||
// If y is 0, the response will not update y, but return the currently recorded information
|
||||
|
|
|
@ -50,7 +50,6 @@ const (
|
|||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
peerRESTMethodReloadFormat = "/reloadformat"
|
||||
peerRESTMethodCycleBloom = "/cyclebloom"
|
||||
peerRESTMethodTrace = "/trace"
|
||||
peerRESTMethodListen = "/listen"
|
||||
|
@ -70,7 +69,6 @@ const (
|
|||
peerRESTIsGroup = "is-group"
|
||||
peerRESTSignal = "signal"
|
||||
peerRESTProfiler = "profiler"
|
||||
peerRESTDryRun = "dry-run"
|
||||
peerRESTTraceAll = "all"
|
||||
peerRESTTraceErr = "err"
|
||||
|
||||
|
|
|
@ -577,46 +577,7 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(w http.ResponseWriter, r *htt
|
|||
}
|
||||
}
|
||||
|
||||
// ReloadFormatHandler - Reload Format.
|
||||
func (s *peerRESTServer) ReloadFormatHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
dryRunString := vars[peerRESTDryRun]
|
||||
if dryRunString == "" {
|
||||
s.writeErrorResponse(w, errors.New("dry-run parameter is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
var dryRun bool
|
||||
switch strings.ToLower(dryRunString) {
|
||||
case "true":
|
||||
dryRun = true
|
||||
case "false":
|
||||
dryRun = false
|
||||
default:
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
s.writeErrorResponse(w, errServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
err := objAPI.ReloadFormat(GlobalContext, dryRun)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// CycleServerBloomFilterHandler cycles bllom filter on server.
|
||||
// CycleServerBloomFilterHandler cycles bloom filter on server.
|
||||
func (s *peerRESTServer) CycleServerBloomFilterHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
|
@ -1047,7 +1008,6 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProfilingDataHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodListen).HandlerFunc(httpTraceHdrs(server.ListenHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||
|
|
|
@ -17,14 +17,19 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bcicen/jstream"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
)
|
||||
|
||||
// startWithConds - map which indicates if a given condition supports starts-with policy operator
|
||||
|
@ -110,8 +115,45 @@ type PostPolicyForm struct {
|
|||
}
|
||||
}
|
||||
|
||||
// implemented to ensure that duplicate keys in JSON
|
||||
// are merged together into a single JSON key, also
|
||||
// to remove any extraneous JSON bodies.
|
||||
//
|
||||
// Go stdlib doesn't support parsing JSON with duplicate
|
||||
// keys, so we need to use this technique to merge the
|
||||
// keys.
|
||||
func sanitizePolicy(r io.Reader) (io.Reader, error) {
|
||||
var buf bytes.Buffer
|
||||
e := json.NewEncoder(&buf)
|
||||
d := jstream.NewDecoder(r, 0).ObjectAsKVS()
|
||||
sset := set.NewStringSet()
|
||||
for mv := range d.Stream() {
|
||||
var kvs jstream.KVS
|
||||
if mv.ValueType == jstream.Object {
|
||||
// This is a JSON object type (that preserves key order)
|
||||
kvs = mv.Value.(jstream.KVS)
|
||||
for _, kv := range kvs {
|
||||
if sset.Contains(kv.Key) {
|
||||
// Reject duplicate conditions or expiration.
|
||||
return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key)
|
||||
}
|
||||
sset.Add(kv.Key)
|
||||
}
|
||||
e.Encode(kvs)
|
||||
}
|
||||
}
|
||||
return &buf, d.Err()
|
||||
}
|
||||
|
||||
// parsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure.
|
||||
func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
||||
func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) {
|
||||
reader, err := sanitizePolicy(r)
|
||||
if err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
d := json.NewDecoder(reader)
|
||||
|
||||
// Convert po into interfaces and
|
||||
// perform strict type conversion using reflection.
|
||||
var rawPolicy struct {
|
||||
|
@ -119,9 +161,9 @@ func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
|||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal([]byte(policy), &rawPolicy)
|
||||
if err != nil {
|
||||
return ppf, err
|
||||
d.DisallowUnknownFields()
|
||||
if err := d.Decode(&rawPolicy); err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
parsedPolicy := PostPolicyForm{}
|
||||
|
@ -129,7 +171,7 @@ func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
|
|||
// Parse expiry time.
|
||||
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
|
||||
if err != nil {
|
||||
return ppf, err
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
// Parse conditions.
|
||||
|
|
|
@ -17,14 +17,66 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func TestParsePostPolicyForm(t *testing.T) {
|
||||
testCases := []struct {
|
||||
policy string
|
||||
success bool
|
||||
}{
|
||||
// missing expiration, will fail.
|
||||
{
|
||||
policy: `{"conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// invalid json.
|
||||
{
|
||||
policy: `{"conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]`,
|
||||
success: false,
|
||||
},
|
||||
// duplicate 'expiration' reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","evil"],["eq","$key","hello.txt"],["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
},
|
||||
// duplicate '$bucket' reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","good"],["eq","$key","hello.txt"]],"conditions":[["eq","$bucket","evil"],["eq","$key","hello.txt"],["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// duplicate conditions, reject
|
||||
{
|
||||
policy: `{"expiration":"2021-03-22T09:16:21.310Z","conditions":[["eq","$bucket","asdf"],["eq","$key","hello.txt"]],"conditions":[["eq","$success_action_status","201"],["eq","$Content-Type","plain/text"],["eq","$success_action_status","201"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210315/us-east-1/s3/aws4_request"],["eq","$x-amz-date","20210315T091621Z"]]}`,
|
||||
success: false,
|
||||
},
|
||||
// no duplicates, shall be parsed properly.
|
||||
{
|
||||
policy: `{"expiration":"2021-03-27T20:35:28.458Z","conditions":[["eq","$bucket","testbucket"],["eq","$key","wtf.txt"],["eq","$x-amz-date","20210320T203528Z"],["eq","$x-amz-algorithm","AWS4-HMAC-SHA256"],["eq","$x-amz-credential","Q3AM3UQ867SPQQA43P2F/20210320/us-east-1/s3/aws4_request"]]}`,
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := parsePostPolicyForm(strings.NewReader(testCase.policy))
|
||||
if testCase.success && err != nil {
|
||||
t.Errorf("Expected success but failed with %s", err)
|
||||
}
|
||||
if !testCase.success && err == nil {
|
||||
t.Errorf("Expected failed but succeeded")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test Post Policy parsing and checking conditions
|
||||
func TestPostPolicyForm(t *testing.T) {
|
||||
pp := minio.NewPostPolicy()
|
||||
|
@ -94,7 +146,7 @@ func TestPostPolicyForm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -177,7 +176,7 @@ func IsServerResolvable(endpoint Endpoint) error {
|
|||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
Path: path.Join(healthCheckPathPrefix, healthCheckLivenessPath),
|
||||
Path: pathJoin(healthCheckPathPrefix, healthCheckLivenessPath),
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
|
@ -195,9 +194,9 @@ func IsServerResolvable(endpoint Endpoint) error {
|
|||
&http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.NewCustomDialContext(3 * time.Second),
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Second,
|
||||
TLSHandshakeTimeout: 3 * time.Second,
|
||||
ExpectContinueTimeout: 3 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
|
@ -207,23 +206,29 @@ func IsServerResolvable(endpoint Endpoint) error {
|
|||
}
|
||||
defer httpClient.CloseIdleConnections()
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 5*time.Second)
|
||||
defer cancel()
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 3*time.Second)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, serverURL.String(), nil)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer xhttp.DrainBody(resp.Body)
|
||||
xhttp.DrainBody(resp.Body)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return StorageErr(resp.Status)
|
||||
}
|
||||
|
||||
if resp.Header.Get(xhttp.MinIOServerStatus) == unavailable {
|
||||
return StorageErr(unavailable)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
|
@ -75,6 +76,10 @@ type Client struct {
|
|||
// Should only be modified before any calls are made.
|
||||
MaxErrResponseSize int64
|
||||
|
||||
// ExpectTimeouts indicates if context timeouts are expected.
|
||||
// This will not mark the client offline in these cases.
|
||||
ExpectTimeouts bool
|
||||
|
||||
httpClient *http.Client
|
||||
url *url.URL
|
||||
newAuthToken func(audience string) string
|
||||
|
@ -112,7 +117,8 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
|
|||
}
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
if xnet.IsNetworkOrHostDown(err) {
|
||||
if xnet.IsNetworkOrHostDown(err, c.ExpectTimeouts) {
|
||||
logger.LogIf(ctx, err, "marking disk offline")
|
||||
c.MarkOffline()
|
||||
}
|
||||
return nil, &NetworkError{err}
|
||||
|
@ -141,7 +147,8 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
|
|||
// Limit the ReadAll(), just in case, because of a bug, the server responds with large data.
|
||||
b, err := ioutil.ReadAll(io.LimitReader(resp.Body, c.MaxErrResponseSize))
|
||||
if err != nil {
|
||||
if xnet.IsNetworkOrHostDown(err) {
|
||||
if xnet.IsNetworkOrHostDown(err, c.ExpectTimeouts) {
|
||||
logger.LogIf(ctx, err, "marking disk offline")
|
||||
c.MarkOffline()
|
||||
}
|
||||
return nil, err
|
||||
|
|
|
@ -39,6 +39,10 @@ func registerDistErasureRouters(router *mux.Router, endpointServerSets EndpointS
|
|||
|
||||
// List of some generic handlers which are applied for all incoming requests.
|
||||
var globalHandlers = []MiddlewareFunc{
|
||||
// add redirect handler to redirect
|
||||
// requests when object layer is not
|
||||
// initialized.
|
||||
setRedirectHandler,
|
||||
// set x-amz-request-id header.
|
||||
addCustomHeaders,
|
||||
// set HTTP security headers such as Content-Security-Policy.
|
||||
|
|
|
@ -199,9 +199,6 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
globalObjectAPI = newObject
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Initialize IAM store
|
||||
globalIAMSys.InitStore(newObject)
|
||||
|
||||
// Create cancel context to control 'newRetryTimer' go routine.
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
|
@ -213,7 +210,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
// at a given time, this big transaction lock ensures this
|
||||
// appropriately. This is also true for rotation of encrypted
|
||||
// content.
|
||||
txnLk := newObject.NewNSLock(retryCtx, minioMetaBucket, minioConfigPrefix+"/transaction.lock")
|
||||
txnLk := newObject.NewNSLock(minioMetaBucket, minioConfigPrefix+"/transaction.lock")
|
||||
|
||||
// allocate dynamic timeout once before the loop
|
||||
configLockTimeout := newDynamicTimeout(5*time.Second, 3*time.Second)
|
||||
|
@ -235,7 +232,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||
for range retry.NewTimerWithJitter(retryCtx, 500*time.Millisecond, time.Second, retry.MaxJitter) {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if err = txnLk.GetLock(configLockTimeout); err != nil {
|
||||
if err = txnLk.GetLock(retryCtx, configLockTimeout); err != nil {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
|
||||
continue
|
||||
}
|
||||
|
@ -338,6 +335,9 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
|
|||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
|
||||
}
|
||||
|
||||
// Initialize IAM store
|
||||
globalIAMSys.InitStore(newObject)
|
||||
|
||||
// Populate existing buckets to the etcd backend
|
||||
if globalDNSConfig != nil {
|
||||
// Background this operation.
|
||||
|
|
|
@ -2440,7 +2440,7 @@ func (s *TestSuiteCommon) TestObjectMultipartListError(c *check) {
|
|||
c.Assert(err, nil)
|
||||
// Since max-keys parameter in the ListMultipart request set to invalid value of -2,
|
||||
// its expected to fail with error message "InvalidArgument".
|
||||
verifyError(c, response4, "InvalidArgument", "Argument max-parts must be an integer between 0 and 2147483647", http.StatusBadRequest)
|
||||
verifyError(c, response4, "InvalidArgument", "Part number must be an integer between 1 and 10000, inclusive", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// TestObjectValidMD5 - First uploads an object with a valid Content-Md5 header and verifies the status,
|
||||
|
|
|
@ -75,20 +75,18 @@ const (
|
|||
|
||||
// AWS S3 Signature V2 calculation rule is give here:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
|
||||
|
||||
func doesPolicySignatureV2Match(formValues http.Header) APIErrorCode {
|
||||
cred := globalActiveCred
|
||||
func doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, APIErrorCode) {
|
||||
accessKey := formValues.Get(xhttp.AmzAccessKeyID)
|
||||
cred, _, s3Err := checkKeyValid(accessKey)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return cred, s3Err
|
||||
}
|
||||
policy := formValues.Get("Policy")
|
||||
signature := formValues.Get(xhttp.AmzSignatureV2)
|
||||
if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) {
|
||||
return ErrSignatureDoesNotMatch
|
||||
return cred, ErrSignatureDoesNotMatch
|
||||
}
|
||||
return ErrNone
|
||||
return cred, ErrNone
|
||||
}
|
||||
|
||||
// Escape encodedQuery string into unescaped list of query params, returns error
|
||||
|
|
|
@ -265,7 +265,7 @@ func TestDoesPolicySignatureV2Match(t *testing.T) {
|
|||
formValues.Set("Awsaccesskeyid", test.accessKey)
|
||||
formValues.Set("Signature", test.signature)
|
||||
formValues.Set("Policy", test.policy)
|
||||
errCode := doesPolicySignatureV2Match(formValues)
|
||||
_, errCode := doesPolicySignatureV2Match(formValues)
|
||||
if errCode != test.errCode {
|
||||
t.Fatalf("(%d) expected to get %s, instead got %s", i+1, niceError(test.errCode), niceError(errCode))
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
|
@ -148,7 +149,7 @@ func getSignature(signingKey []byte, stringToSign string) string {
|
|||
}
|
||||
|
||||
// Check to see if Policy is signed correctly.
|
||||
func doesPolicySignatureMatch(formValues http.Header) APIErrorCode {
|
||||
func doesPolicySignatureMatch(formValues http.Header) (auth.Credentials, APIErrorCode) {
|
||||
// For SignV2 - Signature field will be valid
|
||||
if _, ok := formValues["Signature"]; ok {
|
||||
return doesPolicySignatureV2Match(formValues)
|
||||
|
@ -168,19 +169,19 @@ func compareSignatureV4(sig1, sig2 string) bool {
|
|||
// doesPolicySignatureMatch - Verify query headers with post policy
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
|
||||
// returns ErrNone if the signature matches.
|
||||
func doesPolicySignatureV4Match(formValues http.Header) APIErrorCode {
|
||||
func doesPolicySignatureV4Match(formValues http.Header) (auth.Credentials, APIErrorCode) {
|
||||
// Server region.
|
||||
region := globalServerRegion
|
||||
|
||||
// Parse credential tag.
|
||||
credHeader, s3Err := parseCredentialHeader("Credential="+formValues.Get(xhttp.AmzCredential), region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return auth.Credentials{}, s3Err
|
||||
}
|
||||
|
||||
cred, _, s3Err := checkKeyValid(credHeader.accessKey)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
// Get signing key.
|
||||
|
@ -191,11 +192,11 @@ func doesPolicySignatureV4Match(formValues http.Header) APIErrorCode {
|
|||
|
||||
// Verify signature.
|
||||
if !compareSignatureV4(newSignature, formValues.Get(xhttp.AmzSignature)) {
|
||||
return ErrSignatureDoesNotMatch
|
||||
return cred, ErrSignatureDoesNotMatch
|
||||
}
|
||||
|
||||
// Success.
|
||||
return ErrNone
|
||||
return cred, ErrNone
|
||||
}
|
||||
|
||||
// doesPresignedSignatureMatch - Verify query headers with presigned signature
|
||||
|
|
|
@ -84,7 +84,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) {
|
|||
|
||||
// Run each test case individually.
|
||||
for i, testCase := range testCases {
|
||||
code := doesPolicySignatureMatch(testCase.form)
|
||||
_, code := doesPolicySignatureMatch(testCase.form)
|
||||
if code != testCase.expected {
|
||||
t.Errorf("(%d) expected to get %s, instead got %s", i, niceError(testCase.expected), niceError(code))
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func newFileInfo(object string, dataBlocks, parityBlocks int) (fi FileInfo) {
|
|||
Algorithm: erasureAlgorithm,
|
||||
DataBlocks: dataBlocks,
|
||||
ParityBlocks: parityBlocks,
|
||||
BlockSize: blockSizeV1,
|
||||
BlockSize: blockSizeV2,
|
||||
Distribution: hashOrder(object, dataBlocks+parityBlocks),
|
||||
}
|
||||
return fi
|
||||
|
|
|
@ -49,7 +49,7 @@ type StorageAPI interface {
|
|||
DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error)
|
||||
|
||||
// WalkVersions in sorted order directly on disk.
|
||||
WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error)
|
||||
WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, healing bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error)
|
||||
// Walk in sorted order directly on disk.
|
||||
Walk(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error)
|
||||
// Walk in sorted order directly on disk.
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/http"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
|
@ -42,7 +43,7 @@ func isNetworkError(err error) bool {
|
|||
return false
|
||||
}
|
||||
if nerr, ok := err.(*rest.NetworkError); ok {
|
||||
return xnet.IsNetworkOrHostDown(nerr.Err)
|
||||
return xnet.IsNetworkOrHostDown(nerr.Err, false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -167,18 +168,34 @@ func (client *storageRESTClient) Healing() bool {
|
|||
}
|
||||
|
||||
func (client *storageRESTClient) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) {
|
||||
b := cache.serialize()
|
||||
respBody, err := client.call(ctx, storageRESTMethodCrawlAndGetDataUsage, url.Values{}, bytes.NewBuffer(b), int64(len(b)))
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(cache.serializeTo(pw))
|
||||
}()
|
||||
defer pr.Close()
|
||||
respBody, err := client.call(ctx, storageRESTMethodCrawlAndGetDataUsage, url.Values{}, pr, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
if err != nil {
|
||||
return cache, err
|
||||
}
|
||||
reader, err := waitForHTTPResponse(respBody)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var newCache dataUsageCache
|
||||
var decErr error
|
||||
pr, pw = io.Pipe()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
decErr = newCache.deserialize(pr)
|
||||
pr.CloseWithError(err)
|
||||
}()
|
||||
err = waitForHTTPStream(respBody, pw)
|
||||
pw.CloseWithError(err)
|
||||
if err != nil {
|
||||
return cache, err
|
||||
}
|
||||
var newCache dataUsageCache
|
||||
return newCache, newCache.deserialize(reader)
|
||||
wg.Wait()
|
||||
return newCache, decErr
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) GetDiskID() (string, error) {
|
||||
|
@ -467,12 +484,13 @@ func (client *storageRESTClient) WalkSplunk(ctx context.Context, volume, dirPath
|
|||
return ch, nil
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) {
|
||||
func (client *storageRESTClient) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, healing bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) {
|
||||
values := make(url.Values)
|
||||
values.Set(storageRESTVolume, volume)
|
||||
values.Set(storageRESTDirPath, dirPath)
|
||||
values.Set(storageRESTMarkerPath, marker)
|
||||
values.Set(storageRESTRecursive, strconv.FormatBool(recursive))
|
||||
values.Set(storageRESTHealing, strconv.FormatBool(healing))
|
||||
respBody, err := client.call(ctx, storageRESTMethodWalkVersions, values, nil, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -72,6 +72,7 @@ const (
|
|||
storageRESTCount = "count"
|
||||
storageRESTMarkerPath = "marker"
|
||||
storageRESTRecursive = "recursive"
|
||||
storageRESTHealing = "healing"
|
||||
storageRESTBitrotAlgo = "bitrot-algo"
|
||||
storageRESTBitrotHash = "bitrot-hash"
|
||||
storageRESTDiskID = "disk-id"
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
@ -31,7 +32,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
jwtreq "github.com/dgrijalva/jwt-go/request"
|
||||
jwtreq "github.com/golang-jwt/jwt/request"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
|
@ -168,15 +169,14 @@ func (s *storageRESTServer) CrawlAndGetDataUsageHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
done := keepHTTPResponseAlive(w)
|
||||
resp := streamHTTPResponse(w)
|
||||
usageInfo, err := s.storage.CrawlAndGetDataUsage(r.Context(), cache)
|
||||
|
||||
done(err)
|
||||
if err != nil {
|
||||
resp.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.Write(usageInfo.serialize())
|
||||
w.(http.Flusher).Flush()
|
||||
resp.CloseWithError(usageInfo.serializeTo(resp))
|
||||
}
|
||||
|
||||
// MakeVolHandler - make a volume.
|
||||
|
@ -561,10 +561,19 @@ func (s *storageRESTServer) WalkVersionsHandler(w http.ResponseWriter, r *http.R
|
|||
return
|
||||
}
|
||||
|
||||
healing := false
|
||||
if healingParam := r.URL.Query().Get(storageRESTHealing); healingParam != "" {
|
||||
healing, err = strconv.ParseBool(healingParam)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
encoder := gob.NewEncoder(w)
|
||||
|
||||
fch, err := s.storage.WalkVersions(r.Context(), volume, dirPath, markerPath, recursive, r.Context().Done())
|
||||
fch, err := s.storage.WalkVersions(r.Context(), volume, dirPath, markerPath, recursive, healing, r.Context().Done())
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
|
@ -792,6 +801,158 @@ func waitForHTTPResponse(respBody io.Reader) (io.Reader, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// drainCloser can be used for wrapping an http response.
|
||||
// It will drain the body before closing.
|
||||
type drainCloser struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
// Read forwards the read operation.
|
||||
func (f drainCloser) Read(p []byte) (n int, err error) {
|
||||
return f.rc.Read(p)
|
||||
}
|
||||
|
||||
// Close drains the body and closes the upstream.
|
||||
func (f drainCloser) Close() error {
|
||||
xhttp.DrainBody(f.rc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// httpStreamResponse allows streaming a response, but still send an error.
|
||||
type httpStreamResponse struct {
|
||||
done chan error
|
||||
block chan []byte
|
||||
err error
|
||||
}
|
||||
|
||||
// Write part of the the streaming response.
|
||||
// Note that upstream errors are currently not forwarded, but may be in the future.
|
||||
func (h *httpStreamResponse) Write(b []byte) (int, error) {
|
||||
if len(b) == 0 || h.err != nil {
|
||||
// Ignore 0 length blocks
|
||||
return 0, h.err
|
||||
}
|
||||
tmp := make([]byte, len(b))
|
||||
copy(tmp, b)
|
||||
h.block <- tmp
|
||||
return len(b), h.err
|
||||
}
|
||||
|
||||
// CloseWithError will close the stream and return the specified error.
|
||||
// This can be done several times, but only the first error will be sent.
|
||||
// After calling this the stream should not be written to.
|
||||
func (h *httpStreamResponse) CloseWithError(err error) {
|
||||
if h.done == nil {
|
||||
return
|
||||
}
|
||||
h.done <- err
|
||||
h.err = err
|
||||
// Indicates that the response is done.
|
||||
<-h.done
|
||||
h.done = nil
|
||||
}
|
||||
|
||||
// streamHTTPResponse can be used to avoid timeouts with long storage
|
||||
// operations, such as bitrot verification or data usage crawling.
|
||||
// Every 10 seconds a space character is sent.
|
||||
// The returned function should always be called to release resources.
|
||||
// An optional error can be sent which will be picked as text only error,
|
||||
// without its original type by the receiver.
|
||||
// waitForHTTPStream should be used to the receiving side.
|
||||
func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse {
|
||||
doneCh := make(chan error)
|
||||
blockCh := make(chan []byte)
|
||||
h := httpStreamResponse{done: doneCh, block: blockCh}
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Response not ready, write a filler byte.
|
||||
w.Write([]byte{32})
|
||||
w.(http.Flusher).Flush()
|
||||
case err := <-doneCh:
|
||||
ticker.Stop()
|
||||
defer close(doneCh)
|
||||
if err != nil {
|
||||
w.Write([]byte{1})
|
||||
w.Write([]byte(err.Error()))
|
||||
} else {
|
||||
w.Write([]byte{0})
|
||||
}
|
||||
return
|
||||
case block := <-blockCh:
|
||||
var tmp [5]byte
|
||||
tmp[0] = 2
|
||||
binary.LittleEndian.PutUint32(tmp[1:], uint32(len(block)))
|
||||
w.Write(tmp[:])
|
||||
w.Write(block)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return &h
|
||||
}
|
||||
|
||||
// waitForHTTPStream will wait for responses where
|
||||
// streamHTTPResponse has been used.
|
||||
// The returned reader contains the payload and must be closed if no error is returned.
|
||||
func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
||||
var tmp [1]byte
|
||||
for {
|
||||
_, err := io.ReadFull(respBody, tmp[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if we have a response ready or a filler byte.
|
||||
switch tmp[0] {
|
||||
case 0:
|
||||
// 0 is unbuffered, copy the rest.
|
||||
_, err := io.Copy(w, respBody)
|
||||
respBody.Close()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
case 1:
|
||||
errorText, err := ioutil.ReadAll(respBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
respBody.Close()
|
||||
return errors.New(string(errorText))
|
||||
case 3:
|
||||
// gob style is already deprecated, we can remove this when
|
||||
// storage API version will be greater or equal to 23.
|
||||
defer respBody.Close()
|
||||
dec := gob.NewDecoder(respBody)
|
||||
var err error
|
||||
if de := dec.Decode(&err); de == nil {
|
||||
return err
|
||||
}
|
||||
return errors.New("rpc error")
|
||||
case 2:
|
||||
// Block of data
|
||||
var tmp [4]byte
|
||||
_, err := io.ReadFull(respBody, tmp[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length := binary.LittleEndian.Uint32(tmp[:])
|
||||
_, err = io.CopyN(w, respBody, int64(length))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
case 32:
|
||||
continue
|
||||
default:
|
||||
go xhttp.DrainBody(respBody)
|
||||
return fmt.Errorf("unexpected filler byte: %d", tmp[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyFileResp - VerifyFile()'s response.
|
||||
type VerifyFileResp struct {
|
||||
Err error
|
||||
|
|
|
@ -166,74 +166,25 @@ func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, APIErrorCode) {
|
|||
seedDate: seedDate,
|
||||
region: region,
|
||||
chunkSHA256Writer: sha256.New(),
|
||||
state: readChunkHeader,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, ErrNone
|
||||
}
|
||||
|
||||
// Represents the overall state that is required for decoding a
|
||||
// AWS Signature V4 chunked reader.
|
||||
type s3ChunkedReader struct {
|
||||
reader *bufio.Reader
|
||||
cred auth.Credentials
|
||||
seedSignature string
|
||||
seedDate time.Time
|
||||
region string
|
||||
state chunkState
|
||||
lastChunk bool
|
||||
chunkSignature string
|
||||
reader *bufio.Reader
|
||||
cred auth.Credentials
|
||||
seedSignature string
|
||||
seedDate time.Time
|
||||
region string
|
||||
|
||||
chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
|
||||
n uint64 // Unread bytes in chunk
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
|
||||
// Read chunk reads the chunk token signature portion.
|
||||
func (cr *s3ChunkedReader) readS3ChunkHeader() {
|
||||
// Read the first chunk line until CRLF.
|
||||
var hexChunkSize, hexChunkSignature []byte
|
||||
hexChunkSize, hexChunkSignature, cr.err = readChunkLine(cr.reader)
|
||||
if cr.err != nil {
|
||||
return
|
||||
}
|
||||
// <hex>;token=value - converts the hex into its uint64 form.
|
||||
cr.n, cr.err = parseHexUint(hexChunkSize)
|
||||
if cr.err != nil {
|
||||
return
|
||||
}
|
||||
if cr.n == 0 {
|
||||
cr.err = io.EOF
|
||||
}
|
||||
// Save the incoming chunk signature.
|
||||
cr.chunkSignature = string(hexChunkSignature)
|
||||
}
|
||||
|
||||
type chunkState int
|
||||
|
||||
const (
|
||||
readChunkHeader chunkState = iota
|
||||
readChunkTrailer
|
||||
readChunk
|
||||
verifyChunk
|
||||
eofChunk
|
||||
)
|
||||
|
||||
func (cs chunkState) String() string {
|
||||
stateString := ""
|
||||
switch cs {
|
||||
case readChunkHeader:
|
||||
stateString = "readChunkHeader"
|
||||
case readChunkTrailer:
|
||||
stateString = "readChunkTrailer"
|
||||
case readChunk:
|
||||
stateString = "readChunk"
|
||||
case verifyChunk:
|
||||
stateString = "verifyChunk"
|
||||
case eofChunk:
|
||||
stateString = "eofChunk"
|
||||
|
||||
}
|
||||
return stateString
|
||||
}
|
||||
|
||||
func (cr *s3ChunkedReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
@ -241,83 +192,165 @@ func (cr *s3ChunkedReader) Close() (err error) {
|
|||
// Read - implements `io.Reader`, which transparently decodes
|
||||
// the incoming AWS Signature V4 streaming signature.
|
||||
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
||||
// First, if there is any unread data, copy it to the client
|
||||
// provided buffer.
|
||||
if cr.offset > 0 {
|
||||
n = copy(buf, cr.buffer[cr.offset:])
|
||||
if n == len(buf) {
|
||||
cr.offset += n
|
||||
return n, nil
|
||||
}
|
||||
cr.offset = 0
|
||||
buf = buf[n:]
|
||||
}
|
||||
|
||||
// Now, we read one chunk from the underlying reader.
|
||||
// A chunk has the following format:
|
||||
// <chunk-size-as-hex> + ";chunk-signature=" + <signature-as-hex> + "\r\n" + <payload> + "\r\n"
|
||||
//
|
||||
// Frist, we read the chunk size but fail if it is larger
|
||||
// than 1 MB. We must not accept arbitrary large chunks.
|
||||
// One 1 MB is a reasonable max limit.
|
||||
//
|
||||
// Then we read the signature and payload data. We compute the SHA256 checksum
|
||||
// of the payload and verify that it matches the expected signature value.
|
||||
//
|
||||
// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered
|
||||
// a chunk with a chunk size = 0. However, this chunk still has a signature and we must
|
||||
// verify it.
|
||||
const MaxSize = 1 << 20 // 1 MB
|
||||
var size int
|
||||
for {
|
||||
switch cr.state {
|
||||
case readChunkHeader:
|
||||
cr.readS3ChunkHeader()
|
||||
// If we're at the end of a chunk.
|
||||
if cr.n == 0 && cr.err == io.EOF {
|
||||
cr.state = readChunkTrailer
|
||||
cr.lastChunk = true
|
||||
continue
|
||||
}
|
||||
if cr.err != nil {
|
||||
return 0, cr.err
|
||||
}
|
||||
cr.state = readChunk
|
||||
case readChunkTrailer:
|
||||
cr.err = readCRLF(cr.reader)
|
||||
if cr.err != nil {
|
||||
return 0, errMalformedEncoding
|
||||
}
|
||||
cr.state = verifyChunk
|
||||
case readChunk:
|
||||
// There is no more space left in the request buffer.
|
||||
if len(buf) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
rbuf := buf
|
||||
// The request buffer is larger than the current chunk size.
|
||||
// Read only the current chunk from the underlying reader.
|
||||
if uint64(len(rbuf)) > cr.n {
|
||||
rbuf = rbuf[:cr.n]
|
||||
}
|
||||
var n0 int
|
||||
n0, cr.err = cr.reader.Read(rbuf)
|
||||
if cr.err != nil {
|
||||
// We have lesser than chunk size advertised in chunkHeader, this is 'unexpected'.
|
||||
if cr.err == io.EOF {
|
||||
cr.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, cr.err
|
||||
}
|
||||
b, err := cr.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
if b == ';' { // separating character
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate sha256.
|
||||
cr.chunkSHA256Writer.Write(rbuf[:n0])
|
||||
// Update the bytes read into request buffer so far.
|
||||
n += n0
|
||||
buf = buf[n0:]
|
||||
// Update bytes to be read of the current chunk before verifying chunk's signature.
|
||||
cr.n -= uint64(n0)
|
||||
|
||||
// If we're at the end of a chunk.
|
||||
if cr.n == 0 {
|
||||
cr.state = readChunkTrailer
|
||||
continue
|
||||
}
|
||||
case verifyChunk:
|
||||
// Calculate the hashed chunk.
|
||||
hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
|
||||
// Calculate the chunk signature.
|
||||
newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hashedChunk)
|
||||
if !compareSignatureV4(cr.chunkSignature, newSignature) {
|
||||
// Chunk signature doesn't match we return signature does not match.
|
||||
cr.err = errSignatureMismatch
|
||||
return 0, cr.err
|
||||
}
|
||||
// Newly calculated signature becomes the seed for the next chunk
|
||||
// this follows the chaining.
|
||||
cr.seedSignature = newSignature
|
||||
cr.chunkSHA256Writer.Reset()
|
||||
if cr.lastChunk {
|
||||
cr.state = eofChunk
|
||||
} else {
|
||||
cr.state = readChunkHeader
|
||||
}
|
||||
case eofChunk:
|
||||
return n, io.EOF
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
if size > MaxSize {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
}
|
||||
|
||||
// Now, we read the signature of the following payload and expect:
|
||||
// chunk-signature=" + <signature-as-hex> + "\r\n"
|
||||
//
|
||||
// The signature is 64 bytes long (hex-encoded SHA256 hash) and
|
||||
// starts with a 16 byte header: len("chunk-signature=") + 64 == 80.
|
||||
var signature [80]byte
|
||||
_, err = io.ReadFull(cr.reader, signature[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
if !bytes.HasPrefix(signature[:], []byte("chunk-signature=")) {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
b, err := cr.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
if b != '\r' {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
b, err = cr.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
if b != '\n' {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
|
||||
if cap(cr.buffer) < size {
|
||||
cr.buffer = make([]byte, size)
|
||||
} else {
|
||||
cr.buffer = cr.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(cr.reader, cr.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
b, err = cr.reader.ReadByte()
|
||||
if b != '\r' {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
b, err = cr.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
cr.err = err
|
||||
return n, cr.err
|
||||
}
|
||||
if b != '\n' {
|
||||
cr.err = errMalformedEncoding
|
||||
return n, cr.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
cr.chunkSHA256Writer.Write(cr.buffer)
|
||||
newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)))
|
||||
if !compareSignatureV4(string(signature[16:]), newSignature) {
|
||||
cr.err = errSignatureMismatch
|
||||
return n, cr.err
|
||||
}
|
||||
cr.seedSignature = newSignature
|
||||
cr.chunkSHA256Writer.Reset()
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
cr.err = io.EOF
|
||||
return n, cr.err
|
||||
}
|
||||
|
||||
cr.offset = copy(buf, cr.buffer)
|
||||
n += cr.offset
|
||||
return n, err
|
||||
}
|
||||
|
||||
// readCRLF - check if reader only has '\r\n' CRLF character.
|
||||
|
|
|
@ -328,11 +328,12 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ
|
|||
var policyName string
|
||||
policySet, ok := iampolicy.GetPoliciesFromClaims(m, iamPolicyClaimNameOpenID())
|
||||
if ok {
|
||||
policyName = globalIAMSys.currentPolicies(strings.Join(policySet.ToSlice(), ","))
|
||||
policyName = globalIAMSys.CurrentPolicies(strings.Join(policySet.ToSlice(), ","))
|
||||
}
|
||||
|
||||
if policyName == "" && globalPolicyOPA == nil {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("%s claim missing from the JWT token, credentials will not be generated", iamPolicyClaimNameOpenID()))
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue,
|
||||
fmt.Errorf("%s claim missing from the JWT token, credentials will not be generated", iamPolicyClaimNameOpenID()))
|
||||
return
|
||||
}
|
||||
m[iamPolicyClaimNameOpenID()] = policyName
|
||||
|
|
Binary file not shown.
|
@ -309,7 +309,7 @@ func downloadReleaseURL(u *url.URL, timeout time.Duration, mode string) (content
|
|||
client := &http.Client{Transport: getUpdateTransport(timeout)}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if xnet.IsNetworkOrHostDown(err) {
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
return content, AdminError{
|
||||
Code: AdminUpdateURLNotReachable,
|
||||
Message: err.Error(),
|
||||
|
@ -501,7 +501,7 @@ func getUpdateReaderFromURL(u *url.URL, transport http.RoundTripper, mode string
|
|||
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
if xnet.IsNetworkOrHostDown(err) {
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
return nil, AdminError{
|
||||
Code: AdminUpdateURLNotReachable,
|
||||
Message: err.Error(),
|
||||
|
|
84
cmd/utils.go
84
cmd/utils.go
|
@ -45,7 +45,7 @@ import (
|
|||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"golang.org/x/net/http2"
|
||||
http2 "golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -470,6 +470,8 @@ func newInternodeHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration)
|
|||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)),
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
WriteBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
|
||||
ReadBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode.
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
|
@ -481,9 +483,26 @@ func newInternodeHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration)
|
|||
DisableCompression: true,
|
||||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
http2.ConfigureTransport(tr)
|
||||
}
|
||||
// https://github.com/golang/go/issues/23559
|
||||
// https://github.com/golang/go/issues/42534
|
||||
// https://github.com/golang/go/issues/43989
|
||||
// https://github.com/golang/go/issues/33425
|
||||
// https://github.com/golang/go/issues/29246
|
||||
// if tlsConfig != nil {
|
||||
// trhttp2, _ := http2.ConfigureTransports(tr)
|
||||
// if trhttp2 != nil {
|
||||
// // ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// // frame will be carried out if no frame is received on the
|
||||
// // connection. 5 minutes is sufficient time for any idle connection.
|
||||
// trhttp2.ReadIdleTimeout = 5 * time.Minute
|
||||
// // PingTimeout is the timeout after which the connection will be closed
|
||||
// // if a response to Ping is not received.
|
||||
// trhttp2.PingTimeout = dialTimeout
|
||||
// // DisableCompression, if true, prevents the Transport from
|
||||
// // requesting compression with an "Accept-Encoding: gzip"
|
||||
// trhttp2.DisableCompression = true
|
||||
// }
|
||||
// }
|
||||
|
||||
return func() *http.Transport {
|
||||
return tr
|
||||
|
@ -498,6 +517,8 @@ func newCustomHTTPProxyTransport(tlsConfig *tls.Config, dialTimeout time.Duratio
|
|||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)),
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
WriteBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
ReadBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 30 * time.Minute, // Set larger timeouts for proxied requests.
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
|
@ -514,13 +535,15 @@ func newCustomHTTPProxyTransport(tlsConfig *tls.Config, dialTimeout time.Duratio
|
|||
}
|
||||
}
|
||||
|
||||
func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport {
|
||||
func newCustomHTTPTransportWithHTTP2(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport {
|
||||
// For more details about various values used here refer
|
||||
// https://golang.org/pkg/net/http/#Transport documentation
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)),
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
WriteBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
ReadBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode.
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
|
@ -533,7 +556,10 @@ func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) fu
|
|||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
http2.ConfigureTransport(tr)
|
||||
trhttp2, _ := http2.ConfigureTransports(tr)
|
||||
if trhttp2 != nil {
|
||||
trhttp2.DisableCompression = true
|
||||
}
|
||||
}
|
||||
|
||||
return func() *http.Transport {
|
||||
|
@ -541,6 +567,52 @@ func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) fu
|
|||
}
|
||||
}
|
||||
|
||||
func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) func() *http.Transport {
|
||||
// For more details about various values used here refer
|
||||
// https://golang.org/pkg/net/http/#Transport documentation
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: xhttp.DialContextWithDNSCache(globalDNSCache, xhttp.NewInternodeDialContext(dialTimeout)),
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
WriteBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
ReadBufferSize: 16 << 10, // 16KiB moving up from 4KiB default
|
||||
IdleConnTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 3 * time.Minute, // Set conservative timeouts for MinIO internode.
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
// Go net/http automatically unzip if content-type is
|
||||
// gzip disable this feature, as we are always interested
|
||||
// in raw stream.
|
||||
DisableCompression: true,
|
||||
}
|
||||
|
||||
// https://github.com/golang/go/issues/23559
|
||||
// https://github.com/golang/go/issues/42534
|
||||
// https://github.com/golang/go/issues/43989
|
||||
// https://github.com/golang/go/issues/33425
|
||||
// https://github.com/golang/go/issues/29246
|
||||
// if tlsConfig != nil {
|
||||
// trhttp2, _ := http2.ConfigureTransports(tr)
|
||||
// if trhttp2 != nil {
|
||||
// // ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// // frame will be carried out if no frame is received on the
|
||||
// // connection. 5 minutes is sufficient time for any idle connection.
|
||||
// trhttp2.ReadIdleTimeout = 5 * time.Minute
|
||||
// // PingTimeout is the timeout after which the connection will be closed
|
||||
// // if a response to Ping is not received.
|
||||
// trhttp2.PingTimeout = dialTimeout
|
||||
// // DisableCompression, if true, prevents the Transport from
|
||||
// // requesting compression with an "Accept-Encoding: gzip"
|
||||
// trhttp2.DisableCompression = true
|
||||
// }
|
||||
// }
|
||||
|
||||
return func() *http.Transport {
|
||||
return tr
|
||||
}
|
||||
}
|
||||
|
||||
// NewGatewayHTTPTransport returns a new http configuration
|
||||
// used while communicating with the cloud backends.
|
||||
// This sets the value for MaxIdleConnsPerHost from 2 (go default)
|
||||
|
|
|
@ -33,8 +33,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
|
|
@ -142,11 +142,11 @@ func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, for
|
|||
return p.storage.DeleteVol(ctx, volume, forceDelete)
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) {
|
||||
func (p *xlStorageDiskIDCheck) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, healing bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) {
|
||||
if err := p.checkDiskStale(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.storage.WalkVersions(ctx, volume, dirPath, marker, recursive, endWalkCh)
|
||||
return p.storage.WalkVersions(ctx, volume, dirPath, marker, recursive, healing, endWalkCh)
|
||||
}
|
||||
|
||||
func (p *xlStorageDiskIDCheck) Walk(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) {
|
||||
|
|
|
@ -68,6 +68,14 @@ func isSysErrTooManySymlinks(err error) bool {
|
|||
return errors.Is(err, syscall.ELOOP)
|
||||
}
|
||||
|
||||
func osIsNotExist(err error) bool {
|
||||
return errors.Is(err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func osIsPermission(err error) bool {
|
||||
return errors.Is(err, os.ErrPermission)
|
||||
}
|
||||
|
||||
// Check if the given error corresponds to ENOTEMPTY for unix,
|
||||
// EEXIST for solaris variants,
|
||||
// and ERROR_DIR_NOT_EMPTY for windows (directory not empty).
|
||||
|
|
|
@ -29,9 +29,11 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
pathutil "path"
|
||||
slashpath "path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -52,7 +54,7 @@ import (
|
|||
const (
|
||||
nullVersionID = "null"
|
||||
diskMinTotalSpace = 900 * humanize.MiByte // Min 900MiB total space.
|
||||
readBlockSize = 4 * humanize.MiByte // Default read block size 4MiB.
|
||||
readBlockSize = 2 * humanize.MiByte // Default read block size 2MiB.
|
||||
|
||||
// On regular files bigger than this;
|
||||
readAheadSize = 16 << 20
|
||||
|
@ -259,6 +261,25 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !rootDisk {
|
||||
// If for some reason we couldn't detect the
|
||||
// root disk use - MINIO_ROOTDISK_THRESHOLD_SIZE
|
||||
// to figure out if the disk is root disk or not.
|
||||
if rootDiskSize := env.Get(config.EnvRootDiskThresholdSize, ""); rootDiskSize != "" {
|
||||
info, err := disk.GetInfo(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size, err := humanize.ParseBytes(rootDiskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// size of the disk is less than the threshold or
|
||||
// equal to the size of the disk at path, treat
|
||||
// such disks as rootDisks and reject them.
|
||||
rootDisk = info.Total <= size
|
||||
}
|
||||
}
|
||||
|
||||
p := &xlStorage{
|
||||
diskPath: path,
|
||||
|
@ -927,7 +948,13 @@ func (s *xlStorage) WalkSplunk(ctx context.Context, volume, dirPath, marker stri
|
|||
|
||||
// WalkVersions - is a sorted walker which returns file entries in lexically sorted order,
|
||||
// additionally along with metadata version info about each of those entries.
|
||||
func (s *xlStorage) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfoVersions, err error) {
|
||||
func (s *xlStorage) WalkVersions(ctx context.Context, volume, dirPath, marker string, recursive bool, healing bool, endWalkCh <-chan struct{}) (ch chan FileInfoVersions, err error) {
|
||||
delayMult, err := strconv.ParseFloat(env.Get(envDataUsageCrawlDelay, "10.0"), 64)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
delayMult = dataCrawlSleepDefMult
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt32(&s.activeIOCount, -1)
|
||||
|
@ -962,10 +989,14 @@ func (s *xlStorage) WalkVersions(ctx context.Context, volume, dirPath, marker st
|
|||
go func() {
|
||||
defer close(ch)
|
||||
listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string, delayIsLeaf bool) {
|
||||
t := time.Now()
|
||||
entries, err := s.ListDir(ctx, volume, dirPath, -1)
|
||||
if err != nil {
|
||||
return false, nil, false
|
||||
}
|
||||
if healing {
|
||||
sleepDuration(time.Since(t), delayMult)
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
return true, nil, false
|
||||
}
|
||||
|
@ -1171,7 +1202,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
|||
if !isXL2V1Format(buf) {
|
||||
// Delete the meta file, if there are no more versions the
|
||||
// top level parent is automatically removed.
|
||||
return deleteFile(volumeDir, pathJoin(volumeDir, path), true)
|
||||
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true)
|
||||
}
|
||||
|
||||
var xlMeta xlMetaV2
|
||||
|
@ -1196,7 +1227,8 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
|||
return err
|
||||
}
|
||||
|
||||
if err = removeAll(filePath); err != nil {
|
||||
tmpuuid := mustGetUUID()
|
||||
if err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1212,7 +1244,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
|||
return err
|
||||
}
|
||||
|
||||
return deleteFile(volumeDir, filePath, false)
|
||||
return s.deleteFile(volumeDir, filePath, false)
|
||||
}
|
||||
|
||||
// WriteMetadata - writes FileInfo metadata for path at `xl.meta`
|
||||
|
@ -1930,7 +1962,7 @@ func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) e
|
|||
// move up the tree, deleting empty parent directories until it finds one
|
||||
// with files in it. Returns nil for a non-empty directory even when
|
||||
// recursive is set to false.
|
||||
func deleteFile(basePath, deletePath string, recursive bool) error {
|
||||
func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) error {
|
||||
if basePath == "" || deletePath == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -1943,7 +1975,8 @@ func deleteFile(basePath, deletePath string, recursive bool) error {
|
|||
|
||||
var err error
|
||||
if recursive {
|
||||
err = os.RemoveAll(deletePath)
|
||||
tmpuuid := mustGetUUID()
|
||||
err = renameAll(deletePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
} else {
|
||||
err = os.Remove(deletePath)
|
||||
}
|
||||
|
@ -1974,7 +2007,7 @@ func deleteFile(basePath, deletePath string, recursive bool) error {
|
|||
|
||||
// Delete parent directory obviously not recursively. Errors for
|
||||
// parent directories shouldn't trickle down.
|
||||
deleteFile(basePath, deletePath, false)
|
||||
s.deleteFile(basePath, deletePath, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -2012,7 +2045,7 @@ func (s *xlStorage) DeleteFile(ctx context.Context, volume string, path string)
|
|||
}
|
||||
|
||||
// Delete file and delete parent directory as well if its empty.
|
||||
return deleteFile(volumeDir, filePath, false)
|
||||
return s.deleteFile(volumeDir, filePath, false)
|
||||
}
|
||||
|
||||
func (s *xlStorage) DeleteFileBulk(volume string, paths []string) (errs []error, err error) {
|
||||
|
@ -2049,7 +2082,7 @@ func (s *xlStorage) DeleteFileBulk(volume string, paths []string) (errs []error,
|
|||
continue
|
||||
}
|
||||
// Delete file and delete parent directory as well if its empty.
|
||||
errs[idx] = deleteFile(volumeDir, filePath, false)
|
||||
errs[idx] = s.deleteFile(volumeDir, filePath, false)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -2243,8 +2276,10 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
|||
|
||||
// Commit data
|
||||
if srcDataPath != "" {
|
||||
removeAll(oldDstDataPath)
|
||||
removeAll(dstDataPath)
|
||||
tmpuuid := mustGetUUID()
|
||||
renameAll(oldDstDataPath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
tmpuuid = mustGetUUID()
|
||||
renameAll(dstDataPath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid))
|
||||
if err = renameAll(srcDataPath, dstDataPath); err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
|
@ -2257,12 +2292,12 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
|
|||
|
||||
// Remove parent dir of the source file if empty
|
||||
if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) {
|
||||
deleteFile(srcVolumeDir, parentDir, false)
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
}
|
||||
|
||||
if srcDataPath != "" {
|
||||
if parentDir := slashpath.Dir(srcDataPath); isDirEmpty(parentDir) {
|
||||
deleteFile(srcVolumeDir, parentDir, false)
|
||||
s.deleteFile(srcVolumeDir, parentDir, false)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue