Compare commits
78 commits
RELEASE.20
...
master
Author | SHA1 | Date | |
---|---|---|---|
68c5ad83fb | |||
5acc8c0134 | |||
c897b6a82d | |||
d008e90d50 | |||
ea820b30bf | |||
03725dc015 | |||
0a6f9bc1eb | |||
1946922de3 | |||
edf1f4233b | |||
f4b55ea7a7 | |||
8dfd1f03e9 | |||
acf26c5ab7 | |||
d9800c8135 | |||
02bef7560f | |||
07dd0692b6 | |||
4f3317effe | |||
9afdbe3648 | |||
fe0df01448 | |||
12e6907512 | |||
5aef492b4c | |||
5d7ed8ff7d | |||
b1754fc5ff | |||
19bbf3e142 | |||
e1755275a0 | |||
520037e721 | |||
cbb0828ab8 | |||
8774d10bdf | |||
df9f479d58 | |||
8bb52c9c2a | |||
947c423824 | |||
c3d24fb26d | |||
112f9ae087 | |||
01b9ff54d9 | |||
64a1904136 | |||
bce6864785 | |||
ecd54b4cba | |||
ca2b288a4b | |||
1016fbb8f9 | |||
be3f81c7ec | |||
9f3c151c3c | |||
9735f3d8f2 | |||
34680c5ccf | |||
58934e5881 | |||
ad3f98b8e7 | |||
7c33a33ef3 | |||
3dfcca68e6 | |||
73b74c94a1 | |||
18338d60d5 | |||
e106070640 | |||
091a7ae359 | |||
70160aeab3 | |||
1aa08f594d | |||
14d8a931fe | |||
30ba85bc67 | |||
caadcc3ed8 | |||
26f55472c6 | |||
79a58e275c | |||
900e584514 | |||
bb639d9f29 | |||
15dcacc1fc | |||
6d53e3c2d7 | |||
8ed7346273 | |||
3c1220adca | |||
4ed0eb7012 | |||
2af5445309 | |||
abb1916bda | |||
9424dca9e4 | |||
db84bb9bd3 | |||
c603f85488 | |||
2f1ee25f50 | |||
7bdf9005e5 | |||
d9c1d79e30 | |||
bd88b86919 | |||
8e29ae8c44 | |||
d158607f8e | |||
939fbb3c38 | |||
3b9dfa9d29 | |||
0c76fb57f2 |
10
.github/workflows/go-cross.yml
vendored
10
.github/workflows/go-cross.yml
vendored
|
@ -1,13 +1,19 @@
|
|||
name: Go
|
||||
name: Crosscompile
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO crosscompile tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Build Tests with Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
|
|
11
.github/workflows/go-lint.yml
vendored
11
.github/workflows/go-lint.yml
vendored
|
@ -1,13 +1,19 @@
|
|||
name: Go
|
||||
name: Linters and Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO tests on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
|
@ -39,4 +45,5 @@ jobs:
|
|||
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
|
||||
go list -deps -json ./... | jq -s 'unique_by(.Module.Path)|.[]|select(has("Module"))|.Module' | ./nancy sleuth
|
||||
make
|
||||
make test
|
||||
make test-race
|
||||
|
|
10
.github/workflows/go.yml
vendored
10
.github/workflows/go.yml
vendored
|
@ -1,13 +1,19 @@
|
|||
name: Go
|
||||
name: Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MinIO Setup on ${{ matrix.go-version }} and ${{ matrix.os }}
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
|
|
125
.github/workflows/iam-integrations.yaml
vendored
Normal file
125
.github/workflows/iam-integrations.yaml
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
name: IAM integration with external systems
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
ldap-test:
|
||||
name: LDAP Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: quay.io/minio/openldap
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
env:
|
||||
LDAP_ORGANIZATION: "MinIO Inc"
|
||||
LDAP_DOMAIN: "min.io"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Test LDAP
|
||||
env:
|
||||
LDAP_TEST_SERVER: "localhost:389"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
|
||||
etcd-test:
|
||||
name: Etcd Backend Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
etcd:
|
||||
image: "quay.io/coreos/etcd:v3.5.1"
|
||||
env:
|
||||
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ports:
|
||||
- "2379:2379"
|
||||
options: >-
|
||||
--health-cmd "etcdctl endpoint health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Test Etcd IAM backend
|
||||
env:
|
||||
ETCD_SERVER: "http://localhost:2379"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
||||
|
||||
iam-etcd-test:
|
||||
name: Etcd Backend + LDAP Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: quay.io/minio/openldap
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
env:
|
||||
LDAP_ORGANIZATION: "MinIO Inc"
|
||||
LDAP_DOMAIN: "min.io"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
etcd:
|
||||
image: "quay.io/coreos/etcd:v3.5.1"
|
||||
env:
|
||||
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
|
||||
ports:
|
||||
- "2379:2379"
|
||||
options: >-
|
||||
--health-cmd "etcdctl endpoint health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Test Etcd IAM backend with LDAP IDP
|
||||
env:
|
||||
ETCD_SERVER: "http://localhost:2379"
|
||||
LDAP_TEST_SERVER: "localhost:389"
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-iam
|
33
.github/workflows/replication.yaml
vendored
Normal file
33
.github/workflows/replication.yaml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
name: Multi-site replication tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
replication-test:
|
||||
name: Replication Tests with Go ${{ matrix.go-version }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Test Replication
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go install -v github.com/minio/mc@latest
|
||||
make test-replication
|
|
@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
|||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
|
@ -39,7 +40,8 @@ RUN \
|
|||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
|
|
@ -27,8 +27,9 @@ COPY CREDITS /licenses/CREDITS
|
|||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
RUN \
|
||||
microdnf clean all && \
|
||||
microdnf update --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux iproute iputils --nodocs && \
|
||||
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
|
||||
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
|
||||
microdnf install minisign --nodocs && \
|
||||
mkdir -p /opt/bin && chmod -R 777 /opt/bin && \
|
||||
|
@ -39,7 +40,8 @@ RUN \
|
|||
chmod +x /opt/bin/minio && \
|
||||
chmod +x /usr/bin/docker-entrypoint.sh && \
|
||||
chmod +x /usr/bin/verify-minio.sh && \
|
||||
/usr/bin/verify-minio.sh
|
||||
/usr/bin/verify-minio.sh && \
|
||||
microdnf clean all
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
|
|
18
Makefile
18
Makefile
|
@ -20,8 +20,8 @@ help: ## print this help
|
|||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@which msgp 1>/dev/null || (echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.3)
|
||||
@which stringer 1>/dev/null || (echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
|
@ -40,12 +40,22 @@ lint: ## runs golangci-lint suite of linters
|
|||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
test-race: verifiers build
|
||||
test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-replication: install ## verify multi site replication
|
||||
@echo "Running tests for Replication three sites"
|
||||
@(env bash $(PWD)/docs/bucket/replication/setup_3site_replication.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
|
|
@ -114,8 +114,6 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketACLHandler - GET Bucket ACL
|
||||
|
@ -164,8 +162,6 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutObjectACLHandler - PUT Object ACL
|
||||
|
@ -229,8 +225,6 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetObjectACLHandler - GET Object ACL
|
||||
|
@ -283,6 +277,4 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
|
|
@ -103,6 +103,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
|||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
case errors.Is(err, errPolicyInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminPolicyInUse",
|
||||
Description: "The policy cannot be removed, as it is in use",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, kes.ErrKeyExists):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioKMSKeyExists",
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -78,6 +79,22 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dynamic := config.SubSystemsDynamic.Contains(string(kvBytes))
|
||||
if dynamic {
|
||||
applyDynamic(ctx, objectAPI, cfg, r, w)
|
||||
}
|
||||
}
|
||||
|
||||
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, r *http.Request, w http.ResponseWriter) {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// Tell the client that dynamic config was applied.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
|
||||
|
@ -135,14 +152,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
if dynamic {
|
||||
// Apply dynamic values.
|
||||
if err := applyDynamicConfig(GlobalContext, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.SignalService(serviceReloadDynamic)
|
||||
// If all values were dynamic, tell the client.
|
||||
w.Header().Set(madmin.ConfigAppliedHeader, madmin.ConfigAppliedTrue)
|
||||
applyDynamic(ctx, objectAPI, cfg, r, w)
|
||||
}
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
@ -326,7 +336,6 @@ func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
json.NewEncoder(w).Encode(rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v3/config
|
||||
|
|
|
@ -269,8 +269,6 @@ func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Req
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -288,8 +286,6 @@ func (a adminAPIHandlers) SRInternalGetIDPSettings(w http.ResponseWriter, r *htt
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func readJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) APIErrorCode {
|
||||
|
|
141
cmd/admin-handlers-users-race_test.go
Normal file
141
cmd/admin-handlers-users-race_test.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
// Tests in this file are not run under the `-race` flag as they are too slow
|
||||
// and cause context deadline errors.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestDeleteUserRace(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
secretKeys[i] = secretKey
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
err := s.adm.RemoveUser(ctx, accessKeys[i])
|
||||
if err != nil {
|
||||
c.Fatalf("unable to remove user: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
|
@ -1307,9 +1307,6 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
}
|
||||
|
||||
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
|
||||
|
@ -1342,8 +1339,6 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -32,25 +33,26 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
testDefaultTimeout = 10 * time.Second
|
||||
testDefaultTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// API suite container for IAM
|
||||
type TestSuiteIAM struct {
|
||||
TestSuiteCommon
|
||||
|
||||
// Flag to turn on tests for etcd backend IAM
|
||||
withEtcdBackend bool
|
||||
|
||||
endpoint string
|
||||
adm *madmin.AdminClient
|
||||
client *minio.Client
|
||||
}
|
||||
|
||||
func newTestSuiteIAM(c TestSuiteCommon) *TestSuiteIAM {
|
||||
return &TestSuiteIAM{TestSuiteCommon: c}
|
||||
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
|
||||
return &TestSuiteIAM{TestSuiteCommon: c, withEtcdBackend: withEtcdBackend}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
s.TestSuiteCommon.SetUpSuite(c)
|
||||
|
||||
func (s *TestSuiteIAM) iamSetup(c *check) {
|
||||
var err error
|
||||
// strip url scheme from endpoint
|
||||
s.endpoint = strings.TrimPrefix(s.endPoint, "http://")
|
||||
|
@ -75,6 +77,50 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
|||
}
|
||||
}
|
||||
|
||||
const (
|
||||
EnvTestEtcdBackend = "ETCD_SERVER"
|
||||
)
|
||||
|
||||
func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
configCmds := []string{
|
||||
"etcd",
|
||||
"endpoints=" + etcdServer,
|
||||
"path_prefix=" + mustGetUUID(),
|
||||
}
|
||||
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to setup Etcd for tests: %v", err)
|
||||
}
|
||||
|
||||
s.RestartIAMSuite(c)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
// If etcd backend is specified and etcd server is not present, the test
|
||||
// is skipped.
|
||||
etcdServer := os.Getenv(EnvTestEtcdBackend)
|
||||
if s.withEtcdBackend && etcdServer == "" {
|
||||
c.Skip("Skipping etcd backend IAM test as no etcd server is configured.")
|
||||
}
|
||||
|
||||
s.TestSuiteCommon.SetUpSuite(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
|
||||
if s.withEtcdBackend {
|
||||
s.setUpEtcd(c, etcdServer)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToken string) *minio.Client {
|
||||
client, err := minio.New(s.endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken),
|
||||
|
@ -91,25 +137,41 @@ func runAllIAMTests(suite *TestSuiteIAM, c *check) {
|
|||
suite.SetUpSuite(c)
|
||||
suite.TestUserCreate(c)
|
||||
suite.TestPolicyCreate(c)
|
||||
suite.TestCannedPolicies(c)
|
||||
suite.TestGroupAddRemove(c)
|
||||
suite.TestServiceAccountOps(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
testCases := []*TestSuiteIAM{
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
|
||||
runAllIAMTests(testCase, &check{t, testCase.serverType})
|
||||
})
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,25 +294,15 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
|||
}
|
||||
// 3.1 check that user does not have any access to the bucket
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
res := uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
// 3.4 Check that user cannot exceed their permissions
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil {
|
||||
|
@ -262,18 +314,92 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
|||
if err != nil {
|
||||
c.Fatalf("policy list err: %v", err)
|
||||
}
|
||||
_, ok = ps[policy]
|
||||
_, ok := ps[policy]
|
||||
if !ok {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 5. Check that policy can be deleted.
|
||||
// 5. Check that policy cannot be deleted when attached to a user.
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy)
|
||||
if err == nil {
|
||||
c.Fatalf("policy could be unexpectedly deleted!")
|
||||
}
|
||||
|
||||
// 6. Delete the user and then delete the policy.
|
||||
err = s.adm.RemoveUser(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("user could not be deleted: %v", err)
|
||||
}
|
||||
err = s.adm.RemoveCannedPolicy(ctx, policy)
|
||||
if err != nil {
|
||||
c.Fatalf("policy delete err: %v", err)
|
||||
c.Fatalf("policy del err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
policies, err := s.adm.ListCannedPolicies(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to list policies: %v", err)
|
||||
}
|
||||
|
||||
defaultPolicies := []string{
|
||||
"readwrite",
|
||||
"readonly",
|
||||
"writeonly",
|
||||
"diagnostics",
|
||||
"consoleAdmin",
|
||||
}
|
||||
|
||||
for _, v := range defaultPolicies {
|
||||
if _, ok := policies[v]; !ok {
|
||||
c.Fatalf("Failed to find %s in policies list", v)
|
||||
}
|
||||
}
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err = s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
info, err := s.adm.InfoCannedPolicy(ctx, "readwrite")
|
||||
if err != nil {
|
||||
c.Fatalf("policy info err: %v", err)
|
||||
}
|
||||
|
||||
infoStr := string(info)
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
@ -324,14 +450,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
|
||||
// 2. Check that user has no access
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
res := uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
|
@ -339,11 +458,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
// 3.2 Check that user cannot exceed their permissions
|
||||
err = uClient.RemoveBucket(ctx, bucket)
|
||||
if err == nil {
|
||||
|
@ -377,14 +492,8 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
c.Fatalf("group desc err: %v", err)
|
||||
}
|
||||
c.Assert(groupInfo.Status, string(madmin.GroupDisabled))
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
err = s.adm.SetGroupStatus(ctx, group, madmin.GroupEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("group set status err: %v", err)
|
||||
|
@ -394,11 +503,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
c.Fatalf("group desc err: %v", err)
|
||||
}
|
||||
c.Assert(groupInfo.Status, string(madmin.GroupEnabled))
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if ok {
|
||||
c.Fatalf("list channel was not closed - unexpected error or objects in listing: %v", v)
|
||||
}
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 6. Verify that group cannot be deleted with users.
|
||||
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
|
||||
|
@ -423,14 +528,8 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
if err != nil {
|
||||
c.Fatalf("group update err: %v", err)
|
||||
}
|
||||
res = uClient.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok = <-res
|
||||
if !ok {
|
||||
c.Fatalf("list channel was closed!")
|
||||
}
|
||||
if v.Err == nil {
|
||||
c.Fatalf("User appears to be able to list!")
|
||||
}
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 7.1 verify group still exists
|
||||
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
|
@ -460,6 +559,214 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountOps(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
// 1.2 Check that svc account appears in listing
|
||||
listResp, err := s.adm.ListServiceAccounts(ctx, accessKey)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to list svc accounts: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(listResp.Accounts...).Contains(svcAK) {
|
||||
c.Fatalf("created service account did not appear in listing!")
|
||||
}
|
||||
// 1.3 Check that svc account info can be queried
|
||||
infoResp, err := s.adm.InfoServiceAccount(ctx, svcAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to get svc acc info: %v", err)
|
||||
}
|
||||
c.Assert(infoResp.ParentUser, accessKey)
|
||||
c.Assert(infoResp.AccountStatus, "on")
|
||||
c.Assert(infoResp.ImpliedPolicy, true)
|
||||
|
||||
// 2. Check that svc account can access the bucket
|
||||
{
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
// 3. Check that svc account can restrict the policy, and that the
|
||||
// session policy can be updated.
|
||||
{
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
cr, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
|
||||
newPolicyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: newPolicyBytes,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update session policy for svc acc: %v", err)
|
||||
}
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
|
||||
// 4. Check that service account's secret key and account status can be
|
||||
// updated.
|
||||
{
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
|
||||
_, svcSK2 := mustGenerateCredentials(c)
|
||||
err = s.adm.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewSecretKey: svcSK2,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update secret key for svc acc: %v", err)
|
||||
}
|
||||
// old creds should not work:
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
// new creds work:
|
||||
svcClient2 := s.getUserClient(c, cr.AccessKey, svcSK2, "")
|
||||
c.mustListObjects(ctx, svcClient2, bucket)
|
||||
|
||||
// update status to disabled
|
||||
err = s.adm.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewStatus: "off",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to update secret key for svc acc: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, svcClient2, bucket)
|
||||
}
|
||||
|
||||
// 5. Check that service account can be deleted.
|
||||
{
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
cr, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
TargetUser: accessKey,
|
||||
AccessKey: svcAK,
|
||||
SecretKey: svcSK,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to create svc acc: %v", err)
|
||||
}
|
||||
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
|
||||
c.mustListObjects(ctx, svcClient, bucket)
|
||||
|
||||
err = s.adm.DeleteServiceAccount(ctx, svcAK)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to delete svc acc: %v", err)
|
||||
}
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if !ok || v.Err == nil {
|
||||
c.Fatalf("user was able to list unexpectedly!")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucket string) {
|
||||
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
|
||||
v, ok := <-res
|
||||
if ok && v.Err != nil {
|
||||
msg := fmt.Sprintf("user was unable to list: %v", v.Err)
|
||||
c.Fatalf(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func mustGenerateCredentials(c *check) (string, string) {
|
||||
ak, sk, err := auth.GenerateCredentials()
|
||||
if err != nil {
|
||||
|
|
|
@ -951,54 +951,40 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||
duration = time.Second * 10
|
||||
}
|
||||
|
||||
throughputSize := size
|
||||
iopsSize := size
|
||||
|
||||
if autotune {
|
||||
iopsSize = 4 * humanize.KiByte
|
||||
deleteBucket := func() {
|
||||
loc := pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix)
|
||||
objectAPI.DeleteBucket(context.Background(), loc, DeleteBucketOptions{
|
||||
Force: true,
|
||||
NoRecreate: true,
|
||||
})
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
endBlankRepliesCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
endBlankRepliesCh <- nil
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := json.NewEncoder(w).Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
endBlankRepliesCh <- err
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case endBlankRepliesCh <- nil:
|
||||
enc := json.NewEncoder(w)
|
||||
ch := speedTest(ctx, size, concurrent, duration, autotune)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
// Write a blank entry to prevent client from disconnecting
|
||||
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
deleteBucket()
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}()
|
||||
|
||||
result, err := speedTest(ctx, throughputSize, iopsSize, concurrent, duration, autotune)
|
||||
if <-endBlankRepliesCh != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(result); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objectAPI.DeleteBucket(ctx, pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix), DeleteBucketOptions{Force: true, NoRecreate: true})
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// Admin API errors
|
||||
|
@ -1943,13 +1929,15 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
|||
}
|
||||
}
|
||||
}()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
case report, ok := <-reportCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
||||
if err := enc.Encode(report); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -2096,7 +2084,7 @@ func fetchKMSStatus() madmin.KMS {
|
|||
func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
for _, target := range logger.Targets {
|
||||
for _, target := range logger.Targets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
|
@ -2112,7 +2100,7 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
|
|||
}
|
||||
}
|
||||
|
||||
for _, target := range logger.AuditTargets {
|
||||
for _, target := range logger.AuditTargets() {
|
||||
if target.Endpoint() != "" {
|
||||
tgt := target.String()
|
||||
err := checkConnection(target.Endpoint(), 15*time.Second)
|
||||
|
|
|
@ -192,6 +192,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/disable").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationDisable)))
|
||||
|
|
|
@ -1362,8 +1362,8 @@ var errorCodes = errorCodeMap{
|
|||
},
|
||||
ErrBackendDown: {
|
||||
Code: "XMinioBackendDown",
|
||||
Description: "Object storage backend is unreachable",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Remote backend is unreachable",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrIncorrectContinuationToken: {
|
||||
Code: "InvalidArgument",
|
||||
|
@ -1956,6 +1956,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrNoSuchKey
|
||||
case MethodNotAllowed:
|
||||
apiErr = ErrMethodNotAllowed
|
||||
case ObjectLocked:
|
||||
apiErr = ErrObjectLocked
|
||||
case InvalidVersionID:
|
||||
apiErr = ErrInvalidVersionID
|
||||
case VersionNotFound:
|
||||
|
@ -2127,6 +2129,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
}
|
||||
}
|
||||
|
||||
if apiErr.Code == "XMinioBackendDown" {
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
return apiErr
|
||||
}
|
||||
|
||||
if apiErr.Code == "InternalError" {
|
||||
// If we see an internal error try to interpret
|
||||
// any underlying errors if possible depending on
|
||||
|
|
|
@ -740,7 +740,6 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
|||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
w.Write(response)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -487,6 +487,26 @@ func setAuthHandler(h http.Handler) http.Handler {
|
|||
// handler for validating incoming authorization headers.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if isSupportedS3AuthType(aType) || aType == authTypeJWT || aType == authTypeSTS {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
|
|
|
@ -123,7 +123,6 @@ func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Reque
|
|||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
|
|
|
@ -175,68 +175,76 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
|||
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
|
||||
// governance bypass headers are set and user has governance bypass permissions.
|
||||
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) (ObjectInfo, APIErrorCode) {
|
||||
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi ObjectInfo, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool) error {
|
||||
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return oi, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
|
||||
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
|
||||
// Pass in relative days from current time, to additionally
|
||||
// to verify "object-lock-remaining-retention-days" policy if any.
|
||||
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
|
||||
|
||||
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
|
||||
if ret.Mode.Valid() {
|
||||
// Retention has expired you may change whatever you like.
|
||||
if ret.RetainUntilDate.Before(t) {
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred,
|
||||
owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch ret.Mode {
|
||||
case objectlock.RetGovernance:
|
||||
govPerm := isPutRetentionAllowed(bucket, object, days,
|
||||
govPerm := isPutRetentionAllowed(oi.Bucket, oi.Name, days,
|
||||
objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
byPassSet, r, cred, owner)
|
||||
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
|
||||
if !byPassSet {
|
||||
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
}
|
||||
return oi, govPerm
|
||||
switch govPerm {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
case objectlock.RetCompliance:
|
||||
// Compliance retention mode cannot be changed or shortened.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
|
||||
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
|
||||
return oi, ErrObjectLocked
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
compliancePerm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
|
||||
false, r, cred, owner)
|
||||
return oi, compliancePerm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return oi, ErrNone
|
||||
return nil
|
||||
} // No pre-existing retention metadata present.
|
||||
|
||||
perm := isPutRetentionAllowed(bucket, object,
|
||||
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
|
||||
days, objRetention.RetainUntilDate.Time,
|
||||
objRetention.Mode, byPassSet, r, cred, owner)
|
||||
return oi, perm
|
||||
switch apiErr {
|
||||
case ErrAccessDenied:
|
||||
return errAuthentication
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
|
||||
|
|
|
@ -910,24 +910,24 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
// metadata should be updated with last resync timestamp.
|
||||
if objInfo.ReplicationStatusInternal != newReplStatusInternal || rinfos.ReplicationResynced() {
|
||||
popts := ObjectOptions{
|
||||
MTime: objInfo.ModTime,
|
||||
VersionID: objInfo.VersionID,
|
||||
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
popts.UserDefined[k] = v
|
||||
}
|
||||
popts.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
|
||||
popts.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
popts.UserDefined[xhttp.AmzBucketReplicationStatus] = string(rinfos.ReplicationStatus())
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ResyncTimestamp != "" {
|
||||
popts.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
popts.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
MTime: objInfo.ModTime,
|
||||
VersionID: objInfo.VersionID,
|
||||
EvalMetadataFn: func(oi ObjectInfo) error {
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
oi.UserDefined[xhttp.AmzBucketReplicationStatus] = string(rinfos.ReplicationStatus())
|
||||
for _, rinfo := range rinfos.Targets {
|
||||
if rinfo.ResyncTimestamp != "" {
|
||||
oi.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
oi.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
|
||||
fcolor "github.com/fatih/color"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/inconshreveable/mousetrap"
|
||||
dns2 "github.com/miekg/dns"
|
||||
"github.com/minio/cli"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
|
@ -66,6 +67,17 @@ var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.
|
|||
var defaultAWSCredProvider []credentials.Provider
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
if mousetrap.StartedByExplorer() {
|
||||
fmt.Printf("Don't double-click %s\n", os.Args[0])
|
||||
fmt.Println("You need to open cmd.exe/PowerShell and run it from the command line")
|
||||
fmt.Println("Refer to the docs here on how to run it as a Windows Service https://github.com/minio/minio-service/tree/master/windows")
|
||||
fmt.Println("Press the Enter Key to Exit")
|
||||
fmt.Scanln()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
|
|
|
@ -25,29 +25,21 @@ const crossDomainXML = `<?xml version="1.0"?><!DOCTYPE cross-domain-policy SYSTE
|
|||
// Standard path where an app would find cross domain policy information.
|
||||
const crossDomainXMLEntity = "/crossdomain.xml"
|
||||
|
||||
// Cross domain policy implements http.Handler interface, implementing a custom ServerHTTP.
|
||||
type crossDomainPolicy struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// A cross-domain policy file is an XML document that grants a web client, such as Adobe Flash Player
|
||||
// or Adobe Acrobat (though not necessarily limited to these), permission to handle data across domains.
|
||||
// When clients request content hosted on a particular source domain and that content make requests
|
||||
// directed towards a domain other than its own, the remote domain needs to host a cross-domain
|
||||
// policy file that grants access to the source domain, allowing the client to continue the transaction.
|
||||
func setCrossDomainPolicy(h http.Handler) http.Handler {
|
||||
return crossDomainPolicy{handler: h}
|
||||
}
|
||||
|
||||
func (c crossDomainPolicy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Look for 'crossdomain.xml' in the incoming request.
|
||||
switch r.URL.Path {
|
||||
case crossDomainXMLEntity:
|
||||
// Write the standard cross domain policy xml.
|
||||
w.Write([]byte(crossDomainXML))
|
||||
// Request completed, no need to serve to other handlers.
|
||||
return
|
||||
}
|
||||
// Continue to serve the request further.
|
||||
c.handler.ServeHTTP(w, r)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Look for 'crossdomain.xml' in the incoming request.
|
||||
switch r.URL.Path {
|
||||
case crossDomainXMLEntity:
|
||||
// Write the standard cross domain policy xml.
|
||||
w.Write([]byte(crossDomainXML))
|
||||
// Request completed, no need to serve to other handlers.
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
|
@ -816,14 +817,13 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
|
||||
// scannerItem represents each file while walking.
|
||||
type scannerItem struct {
|
||||
Path string
|
||||
Typ os.FileMode
|
||||
|
||||
Path string
|
||||
bucket string // Bucket.
|
||||
prefix string // Only the prefix if any, does not have final object name.
|
||||
objectName string // Only the object name without prefixes.
|
||||
lifeCycle *lifecycle.Lifecycle
|
||||
replication replicationConfig
|
||||
lifeCycle *lifecycle.Lifecycle
|
||||
Typ fs.FileMode
|
||||
heal bool // Has the object been selected for heal check?
|
||||
debug bool
|
||||
}
|
||||
|
@ -838,6 +838,7 @@ type sizeSummary struct {
|
|||
pendingCount uint64
|
||||
failedCount uint64
|
||||
replTargetStats map[string]replTargetSizeSummary
|
||||
tiers map[string]tierStats
|
||||
}
|
||||
|
||||
// replTargetSizeSummary holds summary of replication stats by target
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
@ -45,16 +46,70 @@ type dataUsageHash string
|
|||
// sizeHistogram is a size histogram.
|
||||
type sizeHistogram [dataUsageBucketLen]uint64
|
||||
|
||||
//msgp:tuple dataUsageEntry
|
||||
type dataUsageEntry struct {
|
||||
Children dataUsageHashMap
|
||||
Children dataUsageHashMap `msg:"ch"`
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
Versions uint64 // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats *replicationAllStats
|
||||
Compacted bool
|
||||
Size int64 `msg:"sz"`
|
||||
Objects uint64 `msg:"os"`
|
||||
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram `msg:"szs"`
|
||||
ReplicationStats *replicationAllStats `msg:"rs,omitempty"`
|
||||
AllTierStats *allTierStats `msg:"ats,omitempty"`
|
||||
Compacted bool `msg:"c"`
|
||||
}
|
||||
|
||||
// allTierStats is a collection of per-tier stats across all configured remote
|
||||
// tiers.
|
||||
type allTierStats struct {
|
||||
Tiers map[string]tierStats `msg:"ts"`
|
||||
}
|
||||
|
||||
func newAllTierStats() *allTierStats {
|
||||
return &allTierStats{
|
||||
Tiers: make(map[string]tierStats),
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) addSizes(sz sizeSummary) {
|
||||
for tier, st := range sz.tiers {
|
||||
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) merge(other *allTierStats) {
|
||||
for tier, st := range other.Tiers {
|
||||
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[string]madmin.TierStats {
|
||||
if ats == nil {
|
||||
return stats
|
||||
}
|
||||
|
||||
// Update stats for tiers as they become available.
|
||||
for tier, st := range ats.Tiers {
|
||||
stats[tier] = madmin.TierStats{
|
||||
TotalSize: st.TotalSize,
|
||||
NumVersions: st.NumVersions,
|
||||
NumObjects: st.NumObjects,
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// tierStats holds per-tier stats of a remote tier.
|
||||
type tierStats struct {
|
||||
TotalSize uint64 `msg:"ts"`
|
||||
NumVersions int `msg:"nv"`
|
||||
NumObjects int `msg:"no"`
|
||||
}
|
||||
|
||||
func (ts tierStats) add(u tierStats) tierStats {
|
||||
ts.TotalSize += u.TotalSize
|
||||
ts.NumVersions += u.NumVersions
|
||||
ts.NumObjects += u.NumObjects
|
||||
return ts
|
||||
}
|
||||
|
||||
//msgp:tuple replicationStatsV1
|
||||
|
@ -96,14 +151,19 @@ func (rs replicationStats) Empty() bool {
|
|||
rs.FailedCount == 0
|
||||
}
|
||||
|
||||
//msgp:tuple replicationAllStats
|
||||
type replicationAllStats struct {
|
||||
Targets map[string]replicationStats `msg:"t,omitempty"`
|
||||
ReplicaSize uint64 `msg:"r,omitempty"`
|
||||
}
|
||||
|
||||
//msgp:tuple replicationAllStatsV1
|
||||
type replicationAllStatsV1 struct {
|
||||
Targets map[string]replicationStats
|
||||
ReplicaSize uint64 `msg:"ReplicaSize,omitempty"`
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
||||
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
||||
|
||||
//msgp:tuple dataUsageEntryV2
|
||||
type dataUsageEntryV2 struct {
|
||||
|
@ -149,6 +209,18 @@ type dataUsageEntryV5 struct {
|
|||
Compacted bool
|
||||
}
|
||||
|
||||
//msgp:tuple dataUsageEntryV6
|
||||
type dataUsageEntryV6 struct {
|
||||
Children dataUsageHashMap
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
Versions uint64 // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats *replicationAllStatsV1
|
||||
Compacted bool
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries latest version.
|
||||
type dataUsageCache struct {
|
||||
Info dataUsageCacheInfo
|
||||
|
@ -156,8 +228,8 @@ type dataUsageCache struct {
|
|||
Disks []string
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5
|
||||
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5
|
||||
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
||||
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
||||
|
||||
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
||||
type dataUsageCacheV2 struct {
|
||||
|
@ -166,27 +238,34 @@ type dataUsageCacheV2 struct {
|
|||
Cache map[string]dataUsageEntryV2
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 3.
|
||||
// dataUsageCacheV3 contains a cache of data usage entries version 3.
|
||||
type dataUsageCacheV3 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV3
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 4.
|
||||
// dataUsageCacheV4 contains a cache of data usage entries version 4.
|
||||
type dataUsageCacheV4 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV4
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 5.
|
||||
// dataUsageCacheV5 contains a cache of data usage entries version 5.
|
||||
type dataUsageCacheV5 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV5
|
||||
}
|
||||
|
||||
// dataUsageCacheV6 contains a cache of data usage entries version 6.
|
||||
type dataUsageCacheV6 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV6
|
||||
}
|
||||
|
||||
//msgp:ignore dataUsageEntryInfo
|
||||
type dataUsageEntryInfo struct {
|
||||
Name string
|
||||
|
@ -242,6 +321,12 @@ func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
|||
e.ReplicationStats.Targets[arn] = tgtStat
|
||||
}
|
||||
}
|
||||
if summary.tiers != nil {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
e.AllTierStats.addSizes(summary)
|
||||
}
|
||||
}
|
||||
|
||||
// merge other data usage entry into this, excluding children.
|
||||
|
@ -271,6 +356,13 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
|||
for i, v := range other.ObjSizes[:] {
|
||||
e.ObjSizes[i] += v
|
||||
}
|
||||
|
||||
if other.AllTierStats != nil {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
e.AllTierStats.merge(other.AllTierStats)
|
||||
}
|
||||
}
|
||||
|
||||
// mod returns true if the hash mod cycles == cycle.
|
||||
|
@ -317,6 +409,11 @@ func (e dataUsageEntry) clone() dataUsageEntry {
|
|||
r := *e.ReplicationStats
|
||||
e.ReplicationStats = &r
|
||||
}
|
||||
if e.AllTierStats != nil {
|
||||
ats := newAllTierStats()
|
||||
ats.merge(e.AllTierStats)
|
||||
e.AllTierStats = ats
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
|
@ -438,6 +535,7 @@ func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
|
|||
ObjectsTotalSize: uint64(flat.Size),
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
TierStats: d.tiersUsageInfo(buckets),
|
||||
}
|
||||
return dui
|
||||
}
|
||||
|
@ -654,6 +752,25 @@ func (h *sizeHistogram) toMap() map[string]uint64 {
|
|||
return res
|
||||
}
|
||||
|
||||
func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats {
|
||||
dst := newAllTierStats()
|
||||
for _, bucket := range buckets {
|
||||
e := d.find(bucket.Name)
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
if flat.AllTierStats == nil {
|
||||
continue
|
||||
}
|
||||
dst.merge(flat.AllTierStats)
|
||||
}
|
||||
if len(dst.Tiers) == 0 {
|
||||
return nil
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// bucketsUsageInfo returns the buckets usage info as a map, with
|
||||
// key as bucket name
|
||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
||||
|
@ -857,7 +974,8 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
|||
// Bumping the cache version will drop data from previous versions
|
||||
// and write new data with the new version.
|
||||
const (
|
||||
dataUsageCacheVerCurrent = 6
|
||||
dataUsageCacheVerCurrent = 7
|
||||
dataUsageCacheVerV6 = 6
|
||||
dataUsageCacheVerV5 = 5
|
||||
dataUsageCacheVerV4 = 4
|
||||
dataUsageCacheVerV3 = 3
|
||||
|
@ -1086,6 +1204,40 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
|||
d.Cache[k] = e
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerV6:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
dold := &dataUsageCacheV6{}
|
||||
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Info = dold.Info
|
||||
d.Disks = dold.Disks
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
var replicationStats *replicationAllStats
|
||||
if v.ReplicationStats != nil {
|
||||
replicationStats = &replicationAllStats{
|
||||
Targets: v.ReplicationStats.Targets,
|
||||
ReplicaSize: v.ReplicationStats.ReplicaSize,
|
||||
}
|
||||
}
|
||||
due := dataUsageEntry{
|
||||
Children: v.Children,
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
Versions: v.Versions,
|
||||
ObjSizes: v.ObjSizes,
|
||||
ReplicationStats: replicationStats,
|
||||
Compacted: v.Compacted,
|
||||
}
|
||||
d.Cache[k] = due
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerCurrent:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -9,6 +9,119 @@ import (
|
|||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalallTierStats(t *testing.T) {
|
||||
v := allTierStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeallTierStats(t *testing.T) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeallTierStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := allTierStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageCache(t *testing.T) {
|
||||
v := dataUsageCache{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
@ -348,119 +461,6 @@ func BenchmarkDecodedataUsageEntry(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntryV5(t *testing.T) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageEntryV5(t *testing.T) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageEntryV5 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageEntryV5{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationAllStats(t *testing.T) {
|
||||
v := replicationAllStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
@ -574,6 +574,119 @@ func BenchmarkDecodereplicationAllStats(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationAllStatsV1(t *testing.T) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodereplicationAllStatsV1(t *testing.T) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodereplicationAllStatsV1 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := replicationAllStatsV1{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodereplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodereplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
|
||||
v := replicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
@ -912,3 +1025,116 @@ func BenchmarkDecodesizeHistogram(b *testing.B) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaltierStats(t *testing.T) {
|
||||
v := tierStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgtierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgtierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaltierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodetierStats(t *testing.T) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodetierStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := tierStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodetierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodetierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,10 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
)
|
||||
|
||||
// BucketTargetUsageInfo - bucket target usage info provides
|
||||
|
@ -81,7 +84,50 @@ type DataUsageInfo struct {
|
|||
// - total objects in a bucket
|
||||
// - object size histogram per bucket
|
||||
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
|
||||
|
||||
// Deprecated kept here for backward compatibility reasons.
|
||||
BucketSizes map[string]uint64 `json:"bucketsSizes"`
|
||||
|
||||
// TierStats contains per-tier stats of all configured remote tiers
|
||||
TierStats *allTierStats `json:"tierStats,omitempty"`
|
||||
}
|
||||
|
||||
func (dui DataUsageInfo) tierStats() []madmin.TierInfo {
|
||||
if globalTierConfigMgr.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ts := make(map[string]madmin.TierStats)
|
||||
// Add configured remote tiers
|
||||
for tier := range globalTierConfigMgr.Tiers {
|
||||
ts[tier] = madmin.TierStats{}
|
||||
}
|
||||
// Add STANDARD (hot-tier)
|
||||
ts[minioHotTier] = madmin.TierStats{}
|
||||
|
||||
ts = dui.TierStats.adminStats(ts)
|
||||
infos := make([]madmin.TierInfo, 0, len(ts))
|
||||
for tier, st := range ts {
|
||||
var tierType string
|
||||
if tier == minioHotTier {
|
||||
tierType = "internal"
|
||||
} else {
|
||||
tierType = globalTierConfigMgr.Tiers[tier].Type.String()
|
||||
}
|
||||
infos = append(infos, madmin.TierInfo{
|
||||
Name: tier,
|
||||
Type: tierType,
|
||||
Stats: st,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(infos, func(i, j int) bool {
|
||||
if infos[i].Type == "internal" {
|
||||
return true
|
||||
}
|
||||
if infos[j].Type == "internal" {
|
||||
return false
|
||||
}
|
||||
return infos[i].Name < infos[j].Name
|
||||
})
|
||||
return infos
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -39,6 +41,7 @@ import (
|
|||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/disk"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
|
@ -48,13 +51,18 @@ import (
|
|||
|
||||
const (
|
||||
// cache.json object metadata for cached objects.
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
cacheDataFile = "part.1"
|
||||
cacheMetaVersion = "1.0.0"
|
||||
cacheExpiryDays = 90 * time.Hour * 24 // defaults to 90 days
|
||||
cacheMetaJSONFile = "cache.json"
|
||||
cacheDataFile = "part.1"
|
||||
cacheDataFilePrefix = "part"
|
||||
|
||||
cacheMetaVersion = "1.0.0"
|
||||
cacheExpiryDays = 90 * time.Hour * 24 // defaults to 90 days
|
||||
// SSECacheEncrypted is the metadata key indicating that the object
|
||||
// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
|
||||
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"
|
||||
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"
|
||||
cacheMultipartDir = "multipart"
|
||||
cacheStaleUploadCleanupInterval = time.Hour * 24
|
||||
cacheStaleUploadExpiry = time.Hour * 24
|
||||
)
|
||||
|
||||
// CacheChecksumInfoV1 - carries checksums of individual blocks on disk.
|
||||
|
@ -78,6 +86,11 @@ type cacheMeta struct {
|
|||
Hits int `json:"hits,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Object string `json:"object,omitempty"`
|
||||
// for multipart upload
|
||||
PartNumbers []int `json:"partNums,omitempty"` // Part Numbers
|
||||
PartETags []string `json:"partETags,omitempty"` // Part ETags
|
||||
PartSizes []int64 `json:"partSizes,omitempty"` // Part Sizes
|
||||
PartActualSizes []int64 `json:"partASizes,omitempty"` // Part ActualSizes (compression)
|
||||
}
|
||||
|
||||
// RangeInfo has the range, file and range length information for a cached range.
|
||||
|
@ -104,13 +117,13 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
|||
CacheStatus: CacheHit,
|
||||
CacheLookupStatus: CacheHit,
|
||||
}
|
||||
|
||||
meta := cloneMSS(m.Meta)
|
||||
// We set file info only if its valid.
|
||||
o.Size = m.Stat.Size
|
||||
o.ETag = extractETag(m.Meta)
|
||||
o.ContentType = m.Meta["content-type"]
|
||||
o.ContentEncoding = m.Meta["content-encoding"]
|
||||
if storageClass, ok := m.Meta[xhttp.AmzStorageClass]; ok {
|
||||
o.ETag = extractETag(meta)
|
||||
o.ContentType = meta["content-type"]
|
||||
o.ContentEncoding = meta["content-encoding"]
|
||||
if storageClass, ok := meta[xhttp.AmzStorageClass]; ok {
|
||||
o.StorageClass = storageClass
|
||||
} else {
|
||||
o.StorageClass = globalMinioDefaultStorageClass
|
||||
|
@ -119,20 +132,26 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
|||
t time.Time
|
||||
e error
|
||||
)
|
||||
if exp, ok := m.Meta["expires"]; ok {
|
||||
if exp, ok := meta["expires"]; ok {
|
||||
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
|
||||
o.Expires = t.UTC()
|
||||
}
|
||||
}
|
||||
if mtime, ok := m.Meta["last-modified"]; ok {
|
||||
if mtime, ok := meta["last-modified"]; ok {
|
||||
if t, e = time.Parse(http.TimeFormat, mtime); e == nil {
|
||||
o.ModTime = t.UTC()
|
||||
}
|
||||
}
|
||||
|
||||
o.Parts = make([]ObjectPartInfo, len(m.PartNumbers))
|
||||
for i := range m.PartNumbers {
|
||||
o.Parts[i].Number = m.PartNumbers[i]
|
||||
o.Parts[i].Size = m.PartSizes[i]
|
||||
o.Parts[i].ETag = m.PartETags[i]
|
||||
o.Parts[i].ActualSize = m.PartActualSizes[i]
|
||||
}
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of user-defined metadata
|
||||
o.UserDefined = cleanMetadata(m.Meta)
|
||||
o.UserDefined = cleanMetadata(meta)
|
||||
return o
|
||||
}
|
||||
|
||||
|
@ -142,16 +161,18 @@ type diskCache struct {
|
|||
online uint32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
purgeRunning int32
|
||||
|
||||
triggerGC chan struct{}
|
||||
dir string // caching directory
|
||||
stats CacheDiskStats // disk cache stats for prometheus
|
||||
quotaPct int // max usage in %
|
||||
pool sync.Pool
|
||||
after int // minimum accesses before an object is cached.
|
||||
lowWatermark int
|
||||
highWatermark int
|
||||
enableRange bool
|
||||
commitWriteback bool
|
||||
triggerGC chan struct{}
|
||||
dir string // caching directory
|
||||
stats CacheDiskStats // disk cache stats for prometheus
|
||||
quotaPct int // max usage in %
|
||||
pool sync.Pool
|
||||
after int // minimum accesses before an object is cached.
|
||||
lowWatermark int
|
||||
highWatermark int
|
||||
enableRange bool
|
||||
commitWriteback bool
|
||||
commitWritethrough bool
|
||||
|
||||
retryWritebackCh chan ObjectInfo
|
||||
// nsMutex namespace lock
|
||||
nsMutex *nsLockMap
|
||||
|
@ -170,15 +191,17 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
|||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err)
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
triggerGC: make(chan struct{}, 1),
|
||||
stats: CacheDiskStats{Dir: dir},
|
||||
quotaPct: quotaPct,
|
||||
after: config.After,
|
||||
lowWatermark: config.WatermarkLow,
|
||||
highWatermark: config.WatermarkHigh,
|
||||
enableRange: config.Range,
|
||||
commitWriteback: config.CommitWriteback,
|
||||
dir: dir,
|
||||
triggerGC: make(chan struct{}, 1),
|
||||
stats: CacheDiskStats{Dir: dir},
|
||||
quotaPct: quotaPct,
|
||||
after: config.After,
|
||||
lowWatermark: config.WatermarkLow,
|
||||
highWatermark: config.WatermarkHigh,
|
||||
enableRange: config.Range,
|
||||
commitWriteback: config.CacheCommitMode == CommitWriteBack,
|
||||
commitWritethrough: config.CacheCommitMode == CommitWriteThrough,
|
||||
|
||||
retryWritebackCh: make(chan ObjectInfo, 10000),
|
||||
online: 1,
|
||||
pool: sync.Pool{
|
||||
|
@ -190,6 +213,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
|||
nsMutex: newNSLock(false),
|
||||
}
|
||||
go cache.purgeWait(ctx)
|
||||
go cache.cleanupStaleUploads(ctx)
|
||||
if cache.commitWriteback {
|
||||
go cache.scanCacheWritebackFailures(ctx)
|
||||
}
|
||||
|
@ -303,16 +327,11 @@ func (c *diskCache) purge(ctx context.Context) {
|
|||
// ignore error we know what value we are passing.
|
||||
scorer, _ := newFileScorer(toFree, time.Now().Unix(), 100)
|
||||
|
||||
// this function returns FileInfo for cached range files and cache data file.
|
||||
fiStatFn := func(ranges map[string]string, dataFile, pathPrefix string) map[string]os.FileInfo {
|
||||
// this function returns FileInfo for cached range files.
|
||||
fiStatRangesFn := func(ranges map[string]string, pathPrefix string) map[string]os.FileInfo {
|
||||
fm := make(map[string]os.FileInfo)
|
||||
fname := pathJoin(pathPrefix, dataFile)
|
||||
if fi, err := os.Stat(fname); err == nil {
|
||||
fm[fname] = fi
|
||||
}
|
||||
|
||||
for _, rngFile := range ranges {
|
||||
fname = pathJoin(pathPrefix, rngFile)
|
||||
fname := pathJoin(pathPrefix, rngFile)
|
||||
if fi, err := os.Stat(fname); err == nil {
|
||||
fm[fname] = fi
|
||||
}
|
||||
|
@ -320,6 +339,26 @@ func (c *diskCache) purge(ctx context.Context) {
|
|||
return fm
|
||||
}
|
||||
|
||||
// this function returns most recent Atime among cached part files.
|
||||
lastAtimeFn := func(partNums []int, pathPrefix string) time.Time {
|
||||
lastATime := timeSentinel
|
||||
for _, pnum := range partNums {
|
||||
fname := pathJoin(pathPrefix, fmt.Sprintf("%s.%d", cacheDataFilePrefix, pnum))
|
||||
if fi, err := os.Stat(fname); err == nil {
|
||||
if atime.Get(fi).After(lastATime) {
|
||||
lastATime = atime.Get(fi)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(partNums) == 0 {
|
||||
fname := pathJoin(pathPrefix, cacheDataFile)
|
||||
if fi, err := os.Stat(fname); err == nil {
|
||||
lastATime = atime.Get(fi)
|
||||
}
|
||||
}
|
||||
return lastATime
|
||||
}
|
||||
|
||||
filterFn := func(name string, typ os.FileMode) error {
|
||||
if name == minioMetaBucket {
|
||||
// Proceed to next file.
|
||||
|
@ -334,9 +373,10 @@ func (c *diskCache) purge(ctx context.Context) {
|
|||
// Proceed to next file.
|
||||
return nil
|
||||
}
|
||||
|
||||
// stat all cached file ranges and cacheDataFile.
|
||||
cachedFiles := fiStatFn(meta.Ranges, cacheDataFile, pathJoin(c.dir, name))
|
||||
// get last access time of cache part files
|
||||
lastAtime := lastAtimeFn(meta.PartNumbers, pathJoin(c.dir, name))
|
||||
// stat all cached file ranges.
|
||||
cachedRngFiles := fiStatRangesFn(meta.Ranges, pathJoin(c.dir, name))
|
||||
objInfo := meta.ToObjectInfo("", "")
|
||||
// prevent gc from clearing un-synced commits. This metadata is present when
|
||||
// cache writeback commit setting is enabled.
|
||||
|
@ -345,7 +385,27 @@ func (c *diskCache) purge(ctx context.Context) {
|
|||
return nil
|
||||
}
|
||||
cc := cacheControlOpts(objInfo)
|
||||
for fname, fi := range cachedFiles {
|
||||
switch {
|
||||
case cc != nil:
|
||||
if cc.isStale(objInfo.ModTime) {
|
||||
if err = removeAll(cacheDir); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
scorer.adjustSaveBytes(-objInfo.Size)
|
||||
// break early if sufficient disk space reclaimed.
|
||||
if c.diskUsageLow() {
|
||||
// if we found disk usage is already low, we return nil filtering is complete.
|
||||
return errDoneForNow
|
||||
}
|
||||
}
|
||||
case lastAtime != timeSentinel:
|
||||
// cached multipart or single part
|
||||
objInfo.AccTime = lastAtime
|
||||
objInfo.Name = pathJoin(c.dir, name, cacheDataFile)
|
||||
scorer.addFileWithObjInfo(objInfo, numHits)
|
||||
}
|
||||
|
||||
for fname, fi := range cachedRngFiles {
|
||||
if cc != nil {
|
||||
if cc.isStale(objInfo.ModTime) {
|
||||
if err = removeAll(fname); err != nil {
|
||||
|
@ -365,7 +425,7 @@ func (c *diskCache) purge(ctx context.Context) {
|
|||
}
|
||||
// clean up stale cache.json files for objects that never got cached but access count was maintained in cache.json
|
||||
fi, err := os.Stat(pathJoin(cacheDir, cacheMetaJSONFile))
|
||||
if err != nil || (fi.ModTime().Before(expiry) && len(cachedFiles) == 0) {
|
||||
if err != nil || (fi.ModTime().Before(expiry) && len(cachedRngFiles) == 0) {
|
||||
removeAll(cacheDir)
|
||||
scorer.adjustSaveBytes(-fi.Size())
|
||||
// Proceed to next file.
|
||||
|
@ -581,8 +641,11 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
|
|||
if m.Meta == nil {
|
||||
m.Meta = make(map[string]string)
|
||||
}
|
||||
if etag, ok := meta["etag"]; ok {
|
||||
m.Meta["etag"] = etag
|
||||
// save etag in m.Meta if missing
|
||||
if _, ok := m.Meta["etag"]; !ok {
|
||||
if etag, ok := meta["etag"]; ok {
|
||||
m.Meta["etag"] = etag
|
||||
}
|
||||
}
|
||||
}
|
||||
m.Hits++
|
||||
|
@ -591,6 +654,50 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
|
|||
return jsonSave(f, m)
|
||||
}
|
||||
|
||||
// updates the ETag and ModTime on cache with ETag from backend
|
||||
func (c *diskCache) updateMetadata(ctx context.Context, bucket, object, etag string, modTime time.Time, size int64) error {
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
||||
// Create cache directory if needed
|
||||
if err := os.MkdirAll(cachedPath, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := &cacheMeta{
|
||||
Version: cacheMetaVersion,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
if err := jsonLoad(f, m); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if m.Meta == nil {
|
||||
m.Meta = make(map[string]string)
|
||||
}
|
||||
var key []byte
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
if globalCacheKMS != nil {
|
||||
// Calculating object encryption key
|
||||
key, err = decryptObjectInfo(key, bucket, object, m.Meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(objectEncryptionKey[:], key)
|
||||
m.Meta["etag"] = hex.EncodeToString(objectEncryptionKey.SealETag([]byte(etag)))
|
||||
} else {
|
||||
m.Meta["etag"] = etag
|
||||
}
|
||||
m.Meta["last-modified"] = modTime.UTC().Format(http.TimeFormat)
|
||||
m.Meta["Content-Length"] = strconv.Itoa(int(size))
|
||||
return jsonSave(f, m)
|
||||
}
|
||||
|
||||
func getCacheSHADir(dir, bucket, object string) string {
|
||||
return pathJoin(dir, getSHA256Hash([]byte(pathJoin(bucket, object))))
|
||||
}
|
||||
|
@ -639,7 +746,7 @@ func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Rea
|
|||
}
|
||||
hashBytes := h.Sum(nil)
|
||||
// compute md5Hash of original data stream if writeback commit to cache
|
||||
if c.commitWriteback {
|
||||
if c.commitWriteback || c.commitWritethrough {
|
||||
if _, err = md5Hash.Write((*bufp)[:n]); err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
@ -655,8 +762,9 @@ func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Rea
|
|||
break
|
||||
}
|
||||
}
|
||||
md5sumCurr := md5Hash.Sum(nil)
|
||||
|
||||
return bytesWritten, base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)), nil
|
||||
return bytesWritten, base64.StdEncoding.EncodeToString(md5sumCurr), nil
|
||||
}
|
||||
|
||||
func newCacheEncryptReader(content io.Reader, bucket, object string, metadata map[string]string) (r io.Reader, err error) {
|
||||
|
@ -691,22 +799,36 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
|||
metadata[SSECacheEncrypted] = ""
|
||||
return objectKey[:], nil
|
||||
}
|
||||
func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) {
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
return cLock, lkctx, err
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly bool) (oi ObjectInfo, err error) {
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) {
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return oi, errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
cLock, lkctx, err := c.GetLockContext(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
|
||||
return c.put(ctx, bucket, object, data, size, rs, opts, incHitsOnly, writeback)
|
||||
}
|
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) {
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return oi, errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
meta, _, numHits, err := c.statCache(ctx, cachePath)
|
||||
// Case where object not yet cached
|
||||
if osIsNotExist(err) && c.after >= 1 {
|
||||
|
@ -755,7 +877,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
|||
removeAll(cachePath)
|
||||
return oi, IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
if c.commitWriteback {
|
||||
if writeback {
|
||||
metadata["content-md5"] = md5sum
|
||||
if md5bytes, err := base64.StdEncoding.DecodeString(md5sum); err == nil {
|
||||
metadata["etag"] = hex.EncodeToString(md5bytes)
|
||||
|
@ -897,7 +1019,7 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
|||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil {
|
||||
if _, err = io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil {
|
||||
if err != io.ErrClosedPipe {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
|
@ -950,19 +1072,78 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||
gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts)
|
||||
return gr, numHits, gerr
|
||||
}
|
||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts)
|
||||
fn, startOffset, length, nErr := NewGetObjectReader(rs, objInfo, opts)
|
||||
if nErr != nil {
|
||||
return nil, numHits, nErr
|
||||
}
|
||||
filePath := pathJoin(cacheObjPath, cacheFile)
|
||||
var totalBytesRead int64
|
||||
|
||||
pr, pw := xioutil.WaitPipe()
|
||||
go func() {
|
||||
err := c.bitrotReadFromCache(ctx, filePath, off, length, pw)
|
||||
if err != nil {
|
||||
removeAll(cacheObjPath)
|
||||
if len(objInfo.Parts) > 0 {
|
||||
// For negative length read everything.
|
||||
if length < 0 {
|
||||
length = objInfo.Size - startOffset
|
||||
}
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
|
||||
// Reply back invalid range if the input offset and length fall out of range.
|
||||
if startOffset > objInfo.Size || startOffset+length > objInfo.Size {
|
||||
logger.LogIf(ctx, InvalidRange{startOffset, length, objInfo.Size}, logger.Application)
|
||||
return nil, numHits, InvalidRange{startOffset, length, objInfo.Size}
|
||||
}
|
||||
// Get start part index and offset.
|
||||
partIndex, partOffset, err := cacheObjectToPartOffset(objInfo, startOffset)
|
||||
if err != nil {
|
||||
return nil, numHits, InvalidRange{startOffset, length, objInfo.Size}
|
||||
}
|
||||
// Calculate endOffset according to length
|
||||
endOffset := startOffset
|
||||
if length > 0 {
|
||||
endOffset += length - 1
|
||||
}
|
||||
|
||||
// Get last part index to read given length.
|
||||
lastPartIndex, _, err := cacheObjectToPartOffset(objInfo, endOffset)
|
||||
if err != nil {
|
||||
return nil, numHits, InvalidRange{startOffset, length, objInfo.Size}
|
||||
}
|
||||
go func() {
|
||||
for ; partIndex <= lastPartIndex; partIndex++ {
|
||||
if length == totalBytesRead {
|
||||
break
|
||||
}
|
||||
partNumber := objInfo.Parts[partIndex].Number
|
||||
// Save the current part name and size.
|
||||
partSize := objInfo.Parts[partIndex].Size
|
||||
partLength := partSize - partOffset
|
||||
// partLength should be adjusted so that we don't write more data than what was requested.
|
||||
if partLength > (length - totalBytesRead) {
|
||||
partLength = length - totalBytesRead
|
||||
}
|
||||
filePath := pathJoin(cacheObjPath, fmt.Sprintf("part.%d", partNumber))
|
||||
err := c.bitrotReadFromCache(ctx, filePath, partOffset, partLength, pw)
|
||||
if err != nil {
|
||||
removeAll(cacheObjPath)
|
||||
pw.CloseWithError(err)
|
||||
break
|
||||
}
|
||||
totalBytesRead += partLength
|
||||
// partOffset will be valid only for the first part, hence reset it to 0 for
|
||||
// the remaining parts.
|
||||
partOffset = 0
|
||||
} // End of read all parts loop.
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
} else {
|
||||
go func() {
|
||||
filePath := pathJoin(cacheObjPath, cacheFile)
|
||||
err := c.bitrotReadFromCache(ctx, filePath, startOffset, length, pw)
|
||||
if err != nil {
|
||||
removeAll(cacheObjPath)
|
||||
}
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
}
|
||||
|
||||
// Cleanup function to cause the go routine above to exit, in
|
||||
// case of incomplete read.
|
||||
pipeCloser := func() { pr.CloseWithError(nil) }
|
||||
|
@ -980,11 +1161,17 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||
gr.ObjInfo.Size = objSize
|
||||
}
|
||||
return gr, numHits, nil
|
||||
}
|
||||
|
||||
// deletes the cached object - caller should have taken write lock
|
||||
func (c *diskCache) delete(bucket, object string) (err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
return removeAll(cacheObjPath)
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cacheObjPath)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
|
@ -994,12 +1181,6 @@ func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error)
|
|||
return removeAll(cacheObjPath)
|
||||
}
|
||||
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
return c.delete(ctx, cacheObjPath)
|
||||
}
|
||||
|
||||
// convenience function to check if object is cached on this diskCache
|
||||
func (c *diskCache) Exists(ctx context.Context, bucket, object string) bool {
|
||||
if _, err := os.Stat(getCacheSHADir(c.dir, bucket, object)); err != nil {
|
||||
|
@ -1040,3 +1221,394 @@ func (c *diskCache) scanCacheWritebackFailures(ctx context.Context) {
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultipartUpload caches multipart uploads when writethrough is MINIO_CACHE_COMMIT mode
|
||||
// multiparts are saved in .minio.sys/multipart/cachePath/uploadID dir until finalized. Then the individual parts
|
||||
// are moved from the upload dir to cachePath/ directory.
|
||||
func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID string, opts ObjectOptions) (uploadID string, err error) {
|
||||
uploadID = uID
|
||||
if uploadID == "" {
|
||||
return "", InvalidUploadID{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
}
|
||||
}
|
||||
|
||||
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadIDDir := path.Join(cachePath, uploadID)
|
||||
if err := os.MkdirAll(uploadIDDir, 0777); err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := &cacheMeta{
|
||||
Version: cacheMetaVersion,
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
if err := jsonLoad(f, m); err != nil && err != io.EOF {
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
m.Meta = opts.UserDefined
|
||||
|
||||
m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize}
|
||||
m.Stat.ModTime = UTCNow()
|
||||
if globalCacheKMS != nil {
|
||||
m.Meta[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
||||
if _, err := newCacheEncryptMetadata(bucket, object, m.Meta); err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
}
|
||||
err = jsonSave(f, m)
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
// PutObjectPart caches part to cache multipart path.
|
||||
func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, opts ObjectOptions) (partInfo PartInfo, err error) {
|
||||
oi := PartInfo{}
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
io.Copy(ioutil.Discard, data)
|
||||
return oi, errDiskFull
|
||||
}
|
||||
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadIDDir := path.Join(cachePath, uploadID)
|
||||
|
||||
partIDLock := c.NewNSLockFn(pathJoin(uploadIDDir, strconv.Itoa(partID)))
|
||||
lkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
||||
ctx = lkctx.Context()
|
||||
defer partIDLock.Unlock(lkctx.Cancel)
|
||||
meta, _, _, err := c.statCache(ctx, uploadIDDir)
|
||||
// Case where object not yet cached
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
||||
if !c.diskSpaceAvailable(size) {
|
||||
return oi, errDiskFull
|
||||
}
|
||||
reader := data
|
||||
var actualSize = uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
actualSize, _ = sio.EncryptedSize(uint64(size))
|
||||
}
|
||||
n, md5sum, err := c.bitrotWriteToCache(uploadIDDir, fmt.Sprintf("part.%d", partID), reader, actualSize)
|
||||
if IsErr(err, baseErrs...) {
|
||||
// take the cache drive offline
|
||||
c.setOffline()
|
||||
}
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
||||
if actualSize != uint64(n) {
|
||||
return oi, IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
var md5hex string
|
||||
if md5bytes, err := base64.StdEncoding.DecodeString(md5sum); err == nil {
|
||||
md5hex = hex.EncodeToString(md5bytes)
|
||||
}
|
||||
|
||||
pInfo := PartInfo{
|
||||
PartNumber: partID,
|
||||
ETag: md5hex,
|
||||
Size: n,
|
||||
ActualSize: int64(actualSize),
|
||||
LastModified: UTCNow(),
|
||||
}
|
||||
return pInfo, nil
|
||||
}
|
||||
|
||||
// SavePartMetadata saves part upload metadata to uploadID directory on disk cache
|
||||
func (c *diskCache) SavePartMetadata(ctx context.Context, bucket, object, uploadID string, partID int, pinfo PartInfo) error {
|
||||
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadDir := path.Join(cachePath, uploadID)
|
||||
|
||||
// acquire a write lock at upload path to update cache.json
|
||||
uploadLock := c.NewNSLockFn(uploadDir)
|
||||
ulkctx, err := uploadLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer uploadLock.Unlock(ulkctx.Cancel)
|
||||
|
||||
metaPath := pathJoin(uploadDir, cacheMetaJSONFile)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := &cacheMeta{}
|
||||
if err := jsonLoad(f, m); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
var key []byte
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if globalCacheKMS != nil {
|
||||
// Calculating object encryption key
|
||||
key, err = decryptObjectInfo(key, bucket, object, m.Meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(objectEncryptionKey[:], key)
|
||||
pinfo.ETag = hex.EncodeToString(objectEncryptionKey.SealETag([]byte(pinfo.ETag)))
|
||||
|
||||
}
|
||||
|
||||
pIdx := cacheObjPartIndex(m, partID)
|
||||
if pIdx == -1 {
|
||||
m.PartActualSizes = append(m.PartActualSizes, pinfo.ActualSize)
|
||||
m.PartNumbers = append(m.PartNumbers, pinfo.PartNumber)
|
||||
m.PartETags = append(m.PartETags, pinfo.ETag)
|
||||
m.PartSizes = append(m.PartSizes, pinfo.Size)
|
||||
} else {
|
||||
m.PartActualSizes[pIdx] = pinfo.ActualSize
|
||||
m.PartNumbers[pIdx] = pinfo.PartNumber
|
||||
m.PartETags[pIdx] = pinfo.ETag
|
||||
m.PartSizes[pIdx] = pinfo.Size
|
||||
}
|
||||
return jsonSave(f, m)
|
||||
}
|
||||
|
||||
// newCachePartEncryptReader returns encrypted cache part reader, with part data encrypted with part encryption key
|
||||
func newCachePartEncryptReader(ctx context.Context, bucket, object string, partID int, content io.Reader, size int64, metadata map[string]string) (r io.Reader, err error) {
|
||||
var key []byte
|
||||
var objectEncryptionKey, partEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Calculating object encryption key
|
||||
key, err = decryptObjectInfo(key, bucket, object, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(objectEncryptionKey[:], key)
|
||||
|
||||
partEnckey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
||||
copy(partEncryptionKey[:], partEnckey[:])
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
hReader, err := hash.NewReader(content, wantSize, "", "", size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pReader := NewPutObjReader(hReader)
|
||||
content, err = pReader.WithEncryption(hReader, &partEncryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: partEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// uploadIDExists returns error if uploadID is not being cached.
|
||||
func (c *diskCache) uploadIDExists(bucket, object, uploadID string) (err error) {
|
||||
mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadIDDir := path.Join(mpartCachePath, uploadID)
|
||||
if _, err := os.Stat(uploadIDDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload completes multipart upload on cache. The parts and cache.json are moved from the temporary location in
|
||||
// .minio.sys/multipart/cacheSHA/.. to cacheSHA path after part verification succeeds.
|
||||
func (c *diskCache) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, roi ObjectInfo, opts ObjectOptions) (oi ObjectInfo, err error) {
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
lkctx, err := cLock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
||||
ctx = lkctx.Context()
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadIDDir := path.Join(mpartCachePath, uploadID)
|
||||
|
||||
uploadMeta, _, _, uerr := c.statCache(ctx, uploadIDDir)
|
||||
if uerr != nil {
|
||||
return oi, errUploadIDNotFound
|
||||
}
|
||||
|
||||
// Case where object not yet cached
|
||||
// Calculate full object size.
|
||||
var objectSize int64
|
||||
|
||||
// Calculate consolidated actual size.
|
||||
var objectActualSize int64
|
||||
|
||||
var partETags []string
|
||||
partETags, err = decryptCachePartETags(uploadMeta)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
for i, pi := range uploadedParts {
|
||||
pIdx := cacheObjPartIndex(uploadMeta, pi.PartNumber)
|
||||
if pIdx == -1 {
|
||||
invp := InvalidPart{
|
||||
PartNumber: pi.PartNumber,
|
||||
GotETag: pi.ETag,
|
||||
}
|
||||
return oi, invp
|
||||
}
|
||||
pi.ETag = canonicalizeETag(pi.ETag)
|
||||
if partETags[pIdx] != pi.ETag {
|
||||
invp := InvalidPart{
|
||||
PartNumber: pi.PartNumber,
|
||||
ExpETag: partETags[pIdx],
|
||||
GotETag: pi.ETag,
|
||||
}
|
||||
return oi, invp
|
||||
}
|
||||
// All parts except the last part has to be atleast 5MB.
|
||||
if (i < len(uploadedParts)-1) && !isMinAllowedPartSize(uploadMeta.PartActualSizes[pIdx]) {
|
||||
return oi, PartTooSmall{
|
||||
PartNumber: pi.PartNumber,
|
||||
PartSize: uploadMeta.PartActualSizes[pIdx],
|
||||
PartETag: pi.ETag,
|
||||
}
|
||||
}
|
||||
|
||||
// Save for total object size.
|
||||
objectSize += uploadMeta.PartSizes[pIdx]
|
||||
|
||||
// Save the consolidated actual size.
|
||||
objectActualSize += uploadMeta.PartActualSizes[pIdx]
|
||||
|
||||
}
|
||||
uploadMeta.Stat.Size = objectSize
|
||||
uploadMeta.Stat.ModTime = roi.ModTime
|
||||
// if encrypted - make sure ETag updated
|
||||
|
||||
uploadMeta.Meta["etag"] = roi.ETag
|
||||
uploadMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
|
||||
var cpartETags []string
|
||||
var cpartNums []int
|
||||
var cpartSizes, cpartActualSizes []int64
|
||||
for _, pi := range uploadedParts {
|
||||
pIdx := cacheObjPartIndex(uploadMeta, pi.PartNumber)
|
||||
if pIdx != -1 {
|
||||
cpartETags = append(cpartETags, uploadMeta.PartETags[pIdx])
|
||||
cpartNums = append(cpartNums, uploadMeta.PartNumbers[pIdx])
|
||||
cpartSizes = append(cpartSizes, uploadMeta.PartSizes[pIdx])
|
||||
cpartActualSizes = append(cpartActualSizes, uploadMeta.PartActualSizes[pIdx])
|
||||
}
|
||||
}
|
||||
uploadMeta.PartETags = cpartETags
|
||||
uploadMeta.PartSizes = cpartSizes
|
||||
uploadMeta.PartActualSizes = cpartActualSizes
|
||||
uploadMeta.PartNumbers = cpartNums
|
||||
uploadMeta.Hits++
|
||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer f.Close()
|
||||
jsonSave(f, uploadMeta)
|
||||
for _, pi := range uploadedParts {
|
||||
part := fmt.Sprintf("part.%d", pi.PartNumber)
|
||||
renameAll(pathJoin(uploadIDDir, part), pathJoin(cachePath, part))
|
||||
}
|
||||
renameAll(pathJoin(uploadIDDir, cacheMetaJSONFile), pathJoin(cachePath, cacheMetaJSONFile))
|
||||
removeAll(uploadIDDir) // clean up any unused parts in the uploadIDDir
|
||||
return uploadMeta.ToObjectInfo(bucket, object), nil
|
||||
}
|
||||
|
||||
func (c *diskCache) AbortUpload(bucket, object, uploadID string) (err error) {
|
||||
mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadDir := path.Join(mpartCachePath, uploadID)
|
||||
return removeAll(uploadDir)
|
||||
}
|
||||
|
||||
// cacheObjPartIndex - returns the index of matching object part number.
|
||||
func cacheObjPartIndex(m *cacheMeta, partNumber int) int {
|
||||
for i, part := range m.PartNumbers {
|
||||
if partNumber == part {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// cacheObjectToPartOffset calculates part index and part offset for requested offset for content on cache.
|
||||
func cacheObjectToPartOffset(objInfo ObjectInfo, offset int64) (partIndex int, partOffset int64, err error) {
|
||||
if offset == 0 {
|
||||
// Special case - if offset is 0, then partIndex and partOffset are always 0.
|
||||
return 0, 0, nil
|
||||
}
|
||||
partOffset = offset
|
||||
// Seek until object offset maps to a particular part offset.
|
||||
for i, part := range objInfo.Parts {
|
||||
partIndex = i
|
||||
// Offset is smaller than size we have reached the proper part offset.
|
||||
if partOffset < part.Size {
|
||||
return partIndex, partOffset, nil
|
||||
}
|
||||
// Continue to towards the next part.
|
||||
partOffset -= part.Size
|
||||
}
|
||||
// Offset beyond the size of the object return InvalidRange.
|
||||
return 0, 0, InvalidRange{}
|
||||
}
|
||||
|
||||
// get path of on-going multipart caching
|
||||
func getMultipartCacheSHADir(dir, bucket, object string) string {
|
||||
return pathJoin(dir, minioMetaBucket, cacheMultipartDir, getSHA256Hash([]byte(pathJoin(bucket, object))))
|
||||
}
|
||||
|
||||
// clean up stale cache multipart uploads according to cleanup interval.
|
||||
func (c *diskCache) cleanupStaleUploads(ctx context.Context) {
|
||||
if !c.commitWritethrough {
|
||||
return
|
||||
}
|
||||
timer := time.NewTimer(cacheStaleUploadCleanupInterval)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(cacheStaleUploadCleanupInterval)
|
||||
now := time.Now()
|
||||
readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir), func(shaDir string, typ os.FileMode) error {
|
||||
return readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir), func(uploadIDDir string, typ os.FileMode) error {
|
||||
uploadIDPath := pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir, uploadIDDir)
|
||||
fi, err := os.Stat(uploadIDPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if now.Sub(fi.ModTime()) > cacheStaleUploadExpiry {
|
||||
removeAll(uploadIDPath)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ func cacheControlOpts(o ObjectInfo) *cacheControl {
|
|||
|
||||
var headerVal string
|
||||
for k, v := range m {
|
||||
if strings.ToLower(k) == "cache-control" {
|
||||
if strings.EqualFold(k, "cache-control") {
|
||||
headerVal = v
|
||||
}
|
||||
|
||||
|
@ -246,6 +246,9 @@ func decryptCacheObjectETag(info *ObjectInfo) error {
|
|||
if globalCacheKMS == nil {
|
||||
return errKMSNotConfigured
|
||||
}
|
||||
if len(info.Parts) > 0 { // multipart ETag is not encrypted since it is not md5sum
|
||||
return nil
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(info.UserDefined)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -271,6 +274,43 @@ func decryptCacheObjectETag(info *ObjectInfo) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// decryptCacheObjectETag tries to decrypt the ETag saved in encrypted format using the cache KMS
|
||||
func decryptCachePartETags(c *cacheMeta) ([]string, error) {
|
||||
var partETags []string
|
||||
encrypted := crypto.S3.IsEncrypted(c.Meta) && isCacheEncrypted(c.Meta)
|
||||
|
||||
switch {
|
||||
case encrypted:
|
||||
if globalCacheKMS == nil {
|
||||
return partETags, errKMSNotConfigured
|
||||
}
|
||||
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(c.Meta)
|
||||
if err != nil {
|
||||
return partETags, err
|
||||
}
|
||||
extKey, err := globalCacheKMS.DecryptKey(keyID, kmsKey, kms.Context{c.Bucket: path.Join(c.Bucket, c.Object)})
|
||||
if err != nil {
|
||||
return partETags, err
|
||||
}
|
||||
var objectKey crypto.ObjectKey
|
||||
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), c.Bucket, c.Object); err != nil {
|
||||
return partETags, err
|
||||
}
|
||||
for i := range c.PartETags {
|
||||
etagStr := tryDecryptETag(objectKey[:], c.PartETags[i], false)
|
||||
// backend ETag was hex encoded before encrypting, so hex decode to get actual ETag
|
||||
etag, err := hex.DecodeString(etagStr)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
partETags = append(partETags, string(etag))
|
||||
}
|
||||
return partETags, nil
|
||||
default:
|
||||
return c.PartETags, nil
|
||||
}
|
||||
}
|
||||
|
||||
func isMetadataSame(m1, m2 map[string]string) bool {
|
||||
if m1 == nil && m2 == nil {
|
||||
return true
|
||||
|
@ -506,3 +546,38 @@ func bytesToClear(total, free int64, quotaPct, lowWatermark, highWatermark uint6
|
|||
lowWMUsage := total * (int64)(lowWatermark*quotaPct) / (100 * 100)
|
||||
return (uint64)(math.Min(float64(quotaAllowed), math.Max(0.0, float64(used-lowWMUsage))))
|
||||
}
|
||||
|
||||
type multiWriter struct {
|
||||
backendWriter io.Writer
|
||||
cacheWriter *io.PipeWriter
|
||||
pipeClosed bool
|
||||
}
|
||||
|
||||
// multiWriter writes to backend and cache - if cache write
|
||||
// fails close the pipe, but continue writing to the backend
|
||||
func (t *multiWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = t.backendWriter.Write(p)
|
||||
if err == nil && n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
if !t.pipeClosed {
|
||||
t.cacheWriter.CloseWithError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ignore errors writing to cache
|
||||
if !t.pipeClosed {
|
||||
_, cerr := t.cacheWriter.Write(p)
|
||||
if cerr != nil {
|
||||
t.pipeClosed = true
|
||||
t.cacheWriter.CloseWithError(cerr)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
func cacheMultiWriter(w1 io.Writer, w2 *io.PipeWriter) io.Writer {
|
||||
return &multiWriter{backendWriter: w1, cacheWriter: w2}
|
||||
}
|
||||
|
|
|
@ -59,6 +59,13 @@ const (
|
|||
CommitFailed cacheCommitStatus = "failed"
|
||||
)
|
||||
|
||||
const (
|
||||
// CommitWriteBack allows staging and write back of cached content for single object uploads
|
||||
CommitWriteBack string = "writeback"
|
||||
// CommitWriteThrough allows caching multipart uploads to disk synchronously
|
||||
CommitWriteThrough string = "writethrough"
|
||||
)
|
||||
|
||||
// String returns string representation of status
|
||||
func (s cacheCommitStatus) String() string {
|
||||
return string(s)
|
||||
|
@ -80,6 +87,13 @@ type CacheObjectLayer interface {
|
|||
DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
// Multipart operations.
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error)
|
||||
|
||||
// Storage operations.
|
||||
StorageInfo(ctx context.Context) CacheStorageInfo
|
||||
CacheStats() *CacheStats
|
||||
|
@ -94,21 +108,26 @@ type cacheObjects struct {
|
|||
// number of accesses after which to cache an object
|
||||
after int
|
||||
// commit objects in async manner
|
||||
commitWriteback bool
|
||||
commitWriteback bool
|
||||
commitWritethrough bool
|
||||
|
||||
// if true migration is in progress from v1 to v2
|
||||
migrating bool
|
||||
// mutex to protect migration bool
|
||||
migMutex sync.Mutex
|
||||
// retry queue for writeback cache mode to reattempt upload to backend
|
||||
wbRetryCh chan ObjectInfo
|
||||
// Cache stats
|
||||
cacheStats *CacheStats
|
||||
|
||||
InnerGetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
InnerGetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerGetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
InnerGetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
InnerPutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
InnerAbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
||||
InnerCompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerCopyObjectPartFn func(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error)
|
||||
}
|
||||
|
||||
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
|
||||
|
@ -349,7 +368,7 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||
// use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns.
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{
|
||||
UserDefined: getMetadata(bReader.ObjInfo),
|
||||
}, false)
|
||||
}, false, false)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
@ -367,7 +386,7 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||
io.LimitReader(pr, bkReader.ObjInfo.Size),
|
||||
bkReader.ObjInfo.Size, rs, ObjectOptions{
|
||||
UserDefined: userDefined,
|
||||
}, false)
|
||||
}, false, false)
|
||||
// close the read end of the pipe, so the error gets
|
||||
// propagated to teeReader
|
||||
pr.CloseWithError(putErr)
|
||||
|
@ -488,8 +507,6 @@ func (c *cacheObjects) CacheStats() (cs *CacheStats) {
|
|||
|
||||
// skipCache() returns true if cache migration is in progress
|
||||
func (c *cacheObjects) skipCache() bool {
|
||||
c.migMutex.Lock()
|
||||
defer c.migMutex.Unlock()
|
||||
return c.migrating
|
||||
}
|
||||
|
||||
|
@ -619,8 +636,6 @@ func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
|
|||
}
|
||||
|
||||
// update migration status
|
||||
c.migMutex.Lock()
|
||||
defer c.migMutex.Unlock()
|
||||
c.migrating = false
|
||||
logStartupMessage(color.Blue("Cache migration completed successfully."))
|
||||
}
|
||||
|
@ -663,31 +678,82 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
|||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
if c.commitWriteback {
|
||||
oi, err := dcache.Put(ctx, bucket, object, r, r.Size(), nil, opts, false)
|
||||
oi, err := dcache.Put(ctx, bucket, object, r, r.Size(), nil, opts, false, true)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
go c.uploadObject(GlobalContext, oi)
|
||||
return oi, nil
|
||||
}
|
||||
objInfo, err = putObjectFn(ctx, bucket, object, r, opts)
|
||||
|
||||
if err == nil {
|
||||
go func() {
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false)
|
||||
}
|
||||
}()
|
||||
if !c.commitWritethrough {
|
||||
objInfo, err = putObjectFn(ctx, bucket, object, r, opts)
|
||||
if err == nil {
|
||||
go func() {
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false, true)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return objInfo, err
|
||||
}
|
||||
return objInfo, err
|
||||
cLock, lkctx, cerr := dcache.GetLockContext(GlobalContext, bucket, object)
|
||||
if cerr != nil {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
defer cLock.Unlock(lkctx.Cancel)
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, "", "", r.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
infoCh := make(chan ObjectInfo)
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
info, err := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader), opts)
|
||||
if err != nil {
|
||||
close(infoCh)
|
||||
pipeReader.CloseWithError(err)
|
||||
rPipe.CloseWithError(err)
|
||||
errorCh <- err
|
||||
return
|
||||
}
|
||||
close(errorCh)
|
||||
infoCh <- info
|
||||
}()
|
||||
|
||||
go func() {
|
||||
_, err := dcache.put(lkctx.Context(), bucket, object, rPipe, r.Size(), nil, opts, false, false)
|
||||
if err != nil {
|
||||
rPipe.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
mwriter := cacheMultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, r)
|
||||
pipeWriter.Close()
|
||||
wPipe.Close()
|
||||
|
||||
if err != nil {
|
||||
err = <-errorCh
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
info := <-infoCh
|
||||
if cerr = dcache.updateMetadata(lkctx.Context(), bucket, object, info.ETag, info.ModTime, info.Size); cerr != nil {
|
||||
dcache.delete(bucket, object)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
// upload cached object to backend in async commit mode.
|
||||
|
@ -759,13 +825,14 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
|
|||
return nil, err
|
||||
}
|
||||
c := &cacheObjects{
|
||||
cache: cache,
|
||||
exclude: config.Exclude,
|
||||
after: config.After,
|
||||
migrating: migrateSw,
|
||||
migMutex: sync.Mutex{},
|
||||
commitWriteback: config.CommitWriteback,
|
||||
cacheStats: newCacheStats(),
|
||||
cache: cache,
|
||||
exclude: config.Exclude,
|
||||
after: config.After,
|
||||
migrating: migrateSw,
|
||||
commitWriteback: config.CacheCommitMode == CommitWriteBack,
|
||||
commitWritethrough: config.CacheCommitMode == CommitWriteThrough,
|
||||
|
||||
cacheStats: newCacheStats(),
|
||||
InnerGetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||
},
|
||||
|
@ -781,6 +848,21 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
|
|||
InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
||||
},
|
||||
InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
|
||||
},
|
||||
InnerPutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
},
|
||||
InnerAbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||
},
|
||||
InnerCompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
},
|
||||
InnerCopyObjectPartFn: func(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||
return newObjectLayerFn().CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
},
|
||||
}
|
||||
c.cacheStats.GetDiskStats = func() []CacheDiskStats {
|
||||
cacheDiskStats := make([]CacheDiskStats, len(c.cache))
|
||||
|
@ -859,3 +941,247 @@ func (c *cacheObjects) queuePendingWriteback(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Starts a new multipart upload operation to backend - if writethrough mode is enabled, starts caching the multipart.
|
||||
func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
newMultipartUploadFn := c.InnerNewMultipartUploadFn
|
||||
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
if c.skipCache() {
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil { // avoid caching encrypted objects
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// skip cache for objects with locks
|
||||
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
|
||||
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
|
||||
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
if !c.commitWritethrough && !c.commitWriteback {
|
||||
return newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// perform multipart upload on backend and cache simultaneously
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
dcache.NewMultipartUpload(GlobalContext, bucket, object, uploadID, opts)
|
||||
return uploadID, err
|
||||
}
|
||||
|
||||
// PutObjectPart streams part to cache concurrently if writethrough mode is enabled. Otherwise redirects the call to remote
|
||||
func (c *cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
putObjectPartFn := c.InnerPutObjectPartFn
|
||||
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
if !c.commitWritethrough && !c.commitWriteback {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
if c.skipCache() {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
size := data.Size()
|
||||
|
||||
// avoid caching part if space unavailable
|
||||
if !dcache.diskSpaceAvailable(size) {
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
// skip cache for objects with locks
|
||||
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
|
||||
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
|
||||
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
dcache.Delete(ctx, bucket, object)
|
||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
info = PartInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, "", "", data.ActualSize())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
pinfoCh := make(chan PartInfo)
|
||||
cinfoCh := make(chan PartInfo)
|
||||
|
||||
errorCh := make(chan error)
|
||||
go func() {
|
||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader), opts)
|
||||
if err != nil {
|
||||
close(pinfoCh)
|
||||
pipeReader.CloseWithError(err)
|
||||
rPipe.CloseWithError(err)
|
||||
errorCh <- err
|
||||
return
|
||||
}
|
||||
close(errorCh)
|
||||
pinfoCh <- info
|
||||
}()
|
||||
go func() {
|
||||
pinfo, perr := dcache.PutObjectPart(GlobalContext, bucket, object, uploadID, partID, rPipe, data.Size(), opts)
|
||||
if perr != nil {
|
||||
rPipe.CloseWithError(perr)
|
||||
close(cinfoCh)
|
||||
// clean up upload
|
||||
dcache.AbortUpload(bucket, object, uploadID)
|
||||
return
|
||||
}
|
||||
cinfoCh <- pinfo
|
||||
}()
|
||||
|
||||
mwriter := cacheMultiWriter(pipeWriter, wPipe)
|
||||
_, err = io.Copy(mwriter, data)
|
||||
pipeWriter.Close()
|
||||
wPipe.Close()
|
||||
|
||||
if err != nil {
|
||||
err = <-errorCh
|
||||
return PartInfo{}, err
|
||||
}
|
||||
info = <-pinfoCh
|
||||
cachedInfo := <-cinfoCh
|
||||
if info.PartNumber == cachedInfo.PartNumber {
|
||||
cachedInfo.ETag = info.ETag
|
||||
cachedInfo.LastModified = info.LastModified
|
||||
dcache.SavePartMetadata(GlobalContext, bucket, object, uploadID, partID, cachedInfo)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
// CopyObjectPart behaves similar to PutObjectPart - caches part to upload dir if writethrough mode is enabled.
|
||||
func (c *cacheObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||
copyObjectPartFn := c.InnerCopyObjectPartFn
|
||||
dcache, err := c.getCacheToLoc(ctx, dstBucket, dstObject)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
}
|
||||
|
||||
if !c.commitWritethrough && !c.commitWriteback {
|
||||
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
}
|
||||
if err := dcache.uploadIDExists(dstBucket, dstObject, uploadID); err != nil {
|
||||
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
}
|
||||
partInfo, err := copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
go func() {
|
||||
isSuffixLength := false
|
||||
if startOffset < 0 {
|
||||
isSuffixLength = true
|
||||
}
|
||||
|
||||
rs := &HTTPRangeSpec{
|
||||
IsSuffixLength: isSuffixLength,
|
||||
Start: startOffset,
|
||||
End: startOffset + length,
|
||||
}
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, srcBucket, srcObject, rs, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
dcache.PutObjectPart(GlobalContext, dstBucket, dstObject, uploadID, partID, bReader, length, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)})
|
||||
}()
|
||||
// Success.
|
||||
return partInfo, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes multipart upload operation on the backend. If writethrough mode is enabled, this also
|
||||
// finalizes the upload saved in cache multipart dir.
|
||||
func (c *cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
|
||||
completeMultipartUploadFn := c.InnerCompleteMultipartUploadFn
|
||||
if !c.commitWritethrough && !c.commitWriteback {
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
// perform multipart upload on backend and cache simultaneously
|
||||
oi, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
if err == nil {
|
||||
// fill cache in the background
|
||||
go func() {
|
||||
_, err := dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, oi, opts)
|
||||
if err != nil {
|
||||
// fill cache in the background
|
||||
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if bErr != nil {
|
||||
return
|
||||
}
|
||||
defer bReader.Close()
|
||||
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
||||
// avoid cache overwrite if another background routine filled cache
|
||||
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
||||
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false, false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - aborts multipart upload on backend and cache.
|
||||
func (c *cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
abortMultipartUploadFn := c.InnerAbortMultipartUploadFn
|
||||
if !c.commitWritethrough && !c.commitWriteback {
|
||||
return abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
// disk cache could not be located,execute backend call.
|
||||
return abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
if err = dcache.uploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// execute backend operation
|
||||
err = abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// abort multipart upload on cache
|
||||
go dcache.AbortUpload(bucket, object, uploadID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -164,7 +164,6 @@ func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *h
|
|||
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketCorsHandler - GET bucket cors, a dummy api
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -36,7 +34,6 @@ import (
|
|||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mountinfo"
|
||||
"github.com/minio/pkg/env"
|
||||
|
@ -743,72 +740,6 @@ func GetProxyEndpointLocalIndex(proxyEps []ProxyEndpoint) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
func httpDo(clnt *http.Client, req *http.Request, f func(*http.Response, error) error) error {
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Run the HTTP request in a goroutine and pass the response to f.
|
||||
c := make(chan error, 1)
|
||||
req = req.WithContext(ctx)
|
||||
go func() { c <- f(clnt.Do(req)) }()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
<-c // Wait for f to return.
|
||||
return ctx.Err()
|
||||
case err := <-c:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func getOnlineProxyEndpointIdx() int {
|
||||
type reqIndex struct {
|
||||
Request *http.Request
|
||||
Idx int
|
||||
}
|
||||
|
||||
proxyRequests := make(map[*http.Client]reqIndex, len(globalProxyEndpoints))
|
||||
for i, proxyEp := range globalProxyEndpoints {
|
||||
proxyEp := proxyEp
|
||||
serverURL := &url.URL{
|
||||
Scheme: proxyEp.Scheme,
|
||||
Host: proxyEp.Host,
|
||||
Path: pathJoin(healthCheckPathPrefix, healthCheckLivenessPath),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, serverURL.String(), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
proxyRequests[&http.Client{
|
||||
Transport: proxyEp.Transport,
|
||||
}] = reqIndex{
|
||||
Request: req,
|
||||
Idx: i,
|
||||
}
|
||||
}
|
||||
|
||||
for c, r := range proxyRequests {
|
||||
if err := httpDo(c, r.Request, func(resp *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xhttp.DrainBody(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
if v := resp.Header.Get(xhttp.MinIOServerStatus); v == unavailable {
|
||||
return errors.New(v)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
continue
|
||||
}
|
||||
return r.Idx
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// GetProxyEndpoints - get all endpoints that can be used to proxy list request.
|
||||
func GetProxyEndpoints(endpointServerPools EndpointServerPools) []ProxyEndpoint {
|
||||
var proxyEps []ProxyEndpoint
|
||||
|
|
|
@ -172,19 +172,25 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, opts D
|
|||
if err == errErasureWriteQuorum && !opts.NoRecreate {
|
||||
undoDeleteBucket(storageDisks, bucket)
|
||||
}
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// At this point we have `err == nil` but some errors might be `errVolumeNotEmpty`
|
||||
// we should proceed to attempt a force delete of such buckets.
|
||||
for index, err := range dErrs {
|
||||
if err == errVolumeNotEmpty && storageDisks[index] != nil {
|
||||
storageDisks[index].RenameFile(ctx, bucket, "", minioMetaTmpDeletedBucket, mustGetUUID())
|
||||
if err == nil || errors.Is(err, errVolumeNotFound) {
|
||||
var purgedDangling bool
|
||||
// At this point we have `err == nil` but some errors might be `errVolumeNotEmpty`
|
||||
// we should proceed to attempt a force delete of such buckets.
|
||||
for index, err := range dErrs {
|
||||
if err == errVolumeNotEmpty && storageDisks[index] != nil {
|
||||
storageDisks[index].RenameFile(ctx, bucket, "", minioMetaTmpDeletedBucket, mustGetUUID())
|
||||
purgedDangling = true
|
||||
}
|
||||
}
|
||||
// if we purged dangling buckets, ignore errVolumeNotFound error.
|
||||
if purgedDangling {
|
||||
err = nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
|
|
|
@ -1088,37 +1088,64 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
writeQuorums[i] = getWriteQuorum(len(storageDisks))
|
||||
}
|
||||
|
||||
versions := make([]FileInfo, len(objects))
|
||||
versionsMap := make(map[string]FileInfoVersions, len(objects))
|
||||
for i := range objects {
|
||||
if objects[i].VersionID == "" {
|
||||
modTime := opts.MTime
|
||||
if opts.MTime.IsZero() {
|
||||
modTime = UTCNow()
|
||||
}
|
||||
uuid := opts.VersionID
|
||||
if uuid == "" {
|
||||
uuid = mustGetUUID()
|
||||
}
|
||||
if opts.Versioned || opts.VersionSuspended {
|
||||
versions[i] = FileInfo{
|
||||
Name: objects[i].ObjectName,
|
||||
ModTime: modTime,
|
||||
Deleted: true, // delete marker
|
||||
ReplicationState: objects[i].ReplicationState(),
|
||||
}
|
||||
versions[i].SetTierFreeVersionID(mustGetUUID())
|
||||
if opts.Versioned {
|
||||
versions[i].VersionID = uuid
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
versions[i] = FileInfo{
|
||||
// Construct the FileInfo data that needs to be preserved on the disk.
|
||||
vr := FileInfo{
|
||||
Name: objects[i].ObjectName,
|
||||
VersionID: objects[i].VersionID,
|
||||
ReplicationState: objects[i].ReplicationState(),
|
||||
// save the index to set correct error at this index.
|
||||
Idx: i,
|
||||
}
|
||||
versions[i].SetTierFreeVersionID(mustGetUUID())
|
||||
vr.SetTierFreeVersionID(mustGetUUID())
|
||||
// VersionID is not set means delete is not specific about
|
||||
// any version, look for if the bucket is versioned or not.
|
||||
if objects[i].VersionID == "" {
|
||||
if opts.Versioned || opts.VersionSuspended {
|
||||
// Bucket is versioned and no version was explicitly
|
||||
// mentioned for deletes, create a delete marker instead.
|
||||
vr.ModTime = UTCNow()
|
||||
vr.Deleted = true
|
||||
// Versioning suspended means that we add a `null` version
|
||||
// delete marker, if not add a new version for this delete
|
||||
// marker.
|
||||
if opts.Versioned {
|
||||
vr.VersionID = mustGetUUID()
|
||||
}
|
||||
}
|
||||
}
|
||||
// De-dup same object name to collect multiple versions for same object.
|
||||
v, ok := versionsMap[objects[i].ObjectName]
|
||||
if ok {
|
||||
v.Versions = append(v.Versions, vr)
|
||||
} else {
|
||||
v = FileInfoVersions{
|
||||
Name: vr.Name,
|
||||
Versions: []FileInfo{vr},
|
||||
}
|
||||
}
|
||||
if vr.Deleted {
|
||||
dobjects[i] = DeletedObject{
|
||||
DeleteMarker: vr.Deleted,
|
||||
DeleteMarkerVersionID: vr.VersionID,
|
||||
DeleteMarkerMTime: DeleteMarkerMTime{vr.ModTime},
|
||||
ObjectName: vr.Name,
|
||||
ReplicationState: vr.ReplicationState,
|
||||
}
|
||||
} else {
|
||||
dobjects[i] = DeletedObject{
|
||||
ObjectName: vr.Name,
|
||||
VersionID: vr.VersionID,
|
||||
ReplicationState: vr.ReplicationState,
|
||||
}
|
||||
}
|
||||
versionsMap[objects[i].ObjectName] = v
|
||||
}
|
||||
|
||||
dedupVersions := make([]FileInfoVersions, 0, len(versionsMap))
|
||||
for _, version := range versionsMap {
|
||||
dedupVersions = append(dedupVersions, version)
|
||||
}
|
||||
|
||||
// Initialize list of errors.
|
||||
|
@ -1130,17 +1157,24 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
wg.Add(1)
|
||||
go func(index int, disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
delObjErrs[index] = make([]error, len(objects))
|
||||
if disk == nil {
|
||||
delObjErrs[index] = make([]error, len(versions))
|
||||
for i := range versions {
|
||||
for i := range objects {
|
||||
delObjErrs[index][i] = errDiskNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
delObjErrs[index] = disk.DeleteVersions(ctx, bucket, versions)
|
||||
errs := disk.DeleteVersions(ctx, bucket, dedupVersions)
|
||||
for i, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
for _, v := range dedupVersions[i].Versions {
|
||||
delObjErrs[index][v.Idx] = err
|
||||
}
|
||||
}
|
||||
}(index, disk)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Reduce errors for each object
|
||||
|
@ -1162,28 +1196,17 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
}
|
||||
|
||||
if errs[objIndex] == nil {
|
||||
NSUpdated(bucket, objects[objIndex].ObjectName)
|
||||
}
|
||||
|
||||
if versions[objIndex].Deleted {
|
||||
dobjects[objIndex] = DeletedObject{
|
||||
DeleteMarker: versions[objIndex].Deleted,
|
||||
DeleteMarkerVersionID: versions[objIndex].VersionID,
|
||||
DeleteMarkerMTime: DeleteMarkerMTime{versions[objIndex].ModTime},
|
||||
ObjectName: versions[objIndex].Name,
|
||||
ReplicationState: versions[objIndex].ReplicationState,
|
||||
}
|
||||
} else {
|
||||
dobjects[objIndex] = DeletedObject{
|
||||
ObjectName: versions[objIndex].Name,
|
||||
VersionID: versions[objIndex].VersionID,
|
||||
ReplicationState: versions[objIndex].ReplicationState,
|
||||
}
|
||||
defer NSUpdated(bucket, objects[objIndex].ObjectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Check failed deletes across multiple objects
|
||||
for _, version := range versions {
|
||||
for i, dobj := range dobjects {
|
||||
// This object errored, no need to attempt a heal.
|
||||
if errs[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if there is any offline disk and add it to the MRF list
|
||||
for _, disk := range storageDisks {
|
||||
if disk != nil && disk.IsOnline() {
|
||||
|
@ -1193,7 +1216,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
|
||||
// all other direct versionId references we should
|
||||
// ensure no dangling file is left over.
|
||||
er.addPartial(bucket, version.Name, version.VersionID, -1)
|
||||
er.addPartial(bucket, dobj.ObjectName, dobj.VersionID, -1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1440,13 +1463,21 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
|
|||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
if fi.Deleted {
|
||||
if opts.VersionID == "" {
|
||||
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
for k, v := range opts.UserDefined {
|
||||
// if version-id is not specified retention is supposed to be set on the latest object.
|
||||
if opts.VersionID == "" {
|
||||
opts.VersionID = fi.VersionID
|
||||
}
|
||||
|
||||
objInfo := fi.ToObjectInfo(bucket, object)
|
||||
if opts.EvalMetadataFn != nil {
|
||||
if err := opts.EvalMetadataFn(objInfo); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
fi.ModTime = opts.MTime
|
||||
|
@ -1456,9 +1487,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
|
|||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
objInfo := fi.ToObjectInfo(bucket, object)
|
||||
return objInfo, nil
|
||||
|
||||
return fi.ToObjectInfo(bucket, object), nil
|
||||
}
|
||||
|
||||
// PutObjectTags - replace or add tags to an existing object
|
||||
|
|
|
@ -202,13 +202,6 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
|
||||
restoreGlobalStorageClass := globalStorageClass
|
||||
defer func() {
|
||||
globalStorageClass = restoreGlobalStorageClass
|
||||
}()
|
||||
|
||||
globalStorageClass = storageclass.Config{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -278,13 +271,6 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
|
||||
restoreGlobalStorageClass := globalStorageClass
|
||||
defer func() {
|
||||
globalStorageClass = restoreGlobalStorageClass
|
||||
}()
|
||||
|
||||
globalStorageClass = storageclass.Config{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -345,13 +331,6 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
|
||||
restoreGlobalStorageClass := globalStorageClass
|
||||
defer func() {
|
||||
globalStorageClass = restoreGlobalStorageClass
|
||||
}()
|
||||
|
||||
globalStorageClass = storageclass.Config{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -807,13 +786,6 @@ func TestObjectQuorumFromMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) {
|
||||
restoreGlobalStorageClass := globalStorageClass
|
||||
defer func() {
|
||||
globalStorageClass = restoreGlobalStorageClass
|
||||
}()
|
||||
|
||||
globalStorageClass = storageclass.Config{}
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
|
||||
var opts ObjectOptions
|
||||
|
|
|
@ -516,7 +516,7 @@ func (a *auditObjectErasureMap) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects) {
|
||||
if len(logger.AuditTargets) == 0 {
|
||||
if len(logger.AuditTargets()) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1213,12 +1213,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI, errs []error) {
|
|||
|
||||
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
||||
func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
||||
storageDisks, errs := initStorageDisksWithErrorsWithoutHealthCheck(s.endpoints)
|
||||
for i, derr := range errs {
|
||||
if derr != nil && derr != errDiskNotFound {
|
||||
return madmin.HealResultItem{}, fmt.Errorf("Disk %s: %w", s.endpoints[i], derr)
|
||||
}
|
||||
}
|
||||
storageDisks, _ := initStorageDisksWithErrorsWithoutHealthCheck(s.endpoints)
|
||||
|
||||
defer func(storageDisks []StorageAPI) {
|
||||
if err != nil {
|
||||
|
@ -1279,8 +1274,14 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
|||
}
|
||||
|
||||
// Save new formats `format.json` on unformatted disks.
|
||||
if err = saveUnformattedFormat(ctx, storageDisks, tmpNewFormats); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
for index, format := range tmpNewFormats {
|
||||
if storageDisks[index] == nil || format == nil {
|
||||
continue
|
||||
}
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Disk %s failed to write updated 'format.json': %v", storageDisks[index], err))
|
||||
tmpNewFormats[index] = nil // this disk failed to write new format
|
||||
}
|
||||
}
|
||||
|
||||
s.erasureDisksMu.Lock()
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
|
@ -82,7 +83,7 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
|||
|
||||
// We have written all the blocks, write the last remaining block.
|
||||
if write < int64(len(block)) {
|
||||
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
|
||||
n, err := xioutil.Copy(dst, bytes.NewReader(block[:write]))
|
||||
if err != nil {
|
||||
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
|
||||
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
||||
|
@ -96,7 +97,7 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
|||
}
|
||||
|
||||
// Copy the block.
|
||||
n, err := io.Copy(dst, bytes.NewReader(block))
|
||||
n, err := xioutil.Copy(dst, bytes.NewReader(block))
|
||||
if err != nil {
|
||||
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
|
||||
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
||||
|
|
|
@ -698,22 +698,6 @@ func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*fo
|
|||
return nil
|
||||
}
|
||||
|
||||
// saveUnformattedFormat - populates `format.json` on unformatted disks.
|
||||
// also adds `.healing.bin` on the disks which are being actively healed.
|
||||
func saveUnformattedFormat(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
for index, format := range formats {
|
||||
if format == nil {
|
||||
continue
|
||||
}
|
||||
if storageDisks[index] != nil {
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveFormatErasureAll - populates `format.json` on disks in its order.
|
||||
func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/lock"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
@ -334,7 +335,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
|||
}
|
||||
defer writer.Close()
|
||||
|
||||
bytesWritten, err := io.Copy(writer, reader)
|
||||
bytesWritten, err := xioutil.Copy(writer, reader)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return 0, err
|
||||
|
|
58
cmd/fs-v1.go
58
cmd/fs-v1.go
|
@ -56,9 +56,6 @@ var defaultEtag = "00000000000000000000000000000000-1"
|
|||
type FSObjects struct {
|
||||
GatewayUnsupported
|
||||
|
||||
// The count of concurrent calls on FSObjects API
|
||||
activeIOCount int64
|
||||
|
||||
// Path to be exported over S3 API.
|
||||
fsPath string
|
||||
// meta json filename, varies by fs / cache backend.
|
||||
|
@ -215,11 +212,6 @@ func (fs *FSObjects) LocalStorageInfo(ctx context.Context) (StorageInfo, []error
|
|||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
di, err := getDiskInfo(fs.fsPath)
|
||||
if err != nil {
|
||||
return StorageInfo{}, []error{err}
|
||||
|
@ -444,11 +436,6 @@ func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string,
|
|||
|
||||
defer NSUpdated(bucket, slashSeparator)
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
bucketDir, err := fs.getBucketDir(ctx, bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
|
@ -509,11 +496,6 @@ func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) erro
|
|||
|
||||
// GetBucketInfo - fetch bucket metadata info.
|
||||
func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
st, err := fs.statBucketDir(ctx, bucket)
|
||||
if err != nil {
|
||||
return bi, toObjectErr(err, bucket)
|
||||
|
@ -538,11 +520,6 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
entries, err := readDirWithOpts(fs.fsPath, readDirOpts{count: -1, followDirSymlink: true})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, errDiskNotFound)
|
||||
|
@ -591,11 +568,6 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||
func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
|
||||
defer NSUpdated(bucket, slashSeparator)
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
bucketDir, err := fs.getBucketDir(ctx, bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
|
@ -656,11 +628,6 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
|||
defer objectDWLock.Unlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
if _, err := fs.statBucketDir(ctx, srcBucket); err != nil {
|
||||
return oi, toObjectErr(err, srcBucket)
|
||||
}
|
||||
|
@ -734,11 +701,6 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
|
@ -998,11 +960,6 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
|||
}
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
lk := fs.NewNSLock(bucket, object)
|
||||
|
@ -1049,11 +1006,6 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
|||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
return fs.putObject(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
|
@ -1221,11 +1173,6 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, op
|
|||
return objInfo, err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
|
||||
return objInfo, toObjectErr(err, bucket)
|
||||
}
|
||||
|
@ -1315,11 +1262,6 @@ func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, mar
|
|||
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
|
||||
// state for future re-entrant list requests.
|
||||
func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
return listObjects(ctx, fs, bucket, prefix, marker, delimiter, maxKeys, fs.listPool,
|
||||
fs.listDirFactory(), fs.isLeaf, fs.isLeafDir, fs.getObjectInfoNoFSLock, fs.getObjectInfoNoFSLock)
|
||||
}
|
||||
|
|
|
@ -287,7 +287,7 @@ func ErrorRespToObjectError(err error, params ...string) error {
|
|||
}
|
||||
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
return BackendDown{}
|
||||
return BackendDown{Err: err.Error()}
|
||||
}
|
||||
|
||||
minioErr, ok := err.(minio.ErrorResponse)
|
||||
|
|
|
@ -269,7 +269,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
addrs = append(addrs, globalMinioAddr)
|
||||
}
|
||||
|
||||
httpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(router)}, getCert)
|
||||
httpServer := xhttp.NewServer(addrs, setCriticalErrorHandler(corsHandler(router)), getCert)
|
||||
httpServer.BaseContext = func(listener net.Listener) context.Context {
|
||||
return GlobalContext
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
logger.FatalIf(globalNotificationSys.Init(GlobalContext, buckets, newObject), "Unable to initialize notification system")
|
||||
}
|
||||
|
||||
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient)
|
||||
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalRefreshIAMInterval)
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
// initialize the new disk cache objects.
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -407,6 +408,7 @@ func (n *hdfsObjects) listDirFactory() minio.ListDirFunc {
|
|||
|
||||
// ListObjects lists all blobs in HDFS bucket filtered by prefix.
|
||||
func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
var mutex sync.Mutex
|
||||
fileInfos := make(map[string]os.FileInfo)
|
||||
targetPath := n.hdfsPathJoin(bucket, prefix)
|
||||
|
||||
|
@ -430,6 +432,9 @@ func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, d
|
|||
}
|
||||
|
||||
getObjectInfo := func(ctx context.Context, bucket, entry string) (minio.ObjectInfo, error) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
|
||||
filePath := path.Clean(n.hdfsPathJoin(bucket, entry))
|
||||
fi, ok := fileInfos[filePath]
|
||||
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
xnet "github.com/minio/pkg/net"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -37,42 +37,39 @@ import (
|
|||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// Adds limiting body size middleware
|
||||
|
||||
// Maximum allowed form data field values. 64MiB is a guessed practical value
|
||||
// which is more than enough to accommodate any form data fields and headers.
|
||||
const requestFormDataSize = 64 * humanize.MiByte
|
||||
|
||||
// For any HTTP request, request body should be not more than 16GiB + requestFormDataSize
|
||||
// where, 16GiB is the maximum allowed object size for object upload.
|
||||
const requestMaxBodySize = globalMaxObjectSize + requestFormDataSize
|
||||
|
||||
func setRequestSizeLimitHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Restricting read data to a given maximum length
|
||||
r.Body = http.MaxBytesReader(w, r.Body, requestMaxBodySize)
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
// Maximum allowed form data field values. 64MiB is a guessed practical value
|
||||
// which is more than enough to accommodate any form data fields and headers.
|
||||
requestFormDataSize = 64 * humanize.MiByte
|
||||
|
||||
// For any HTTP request, request body should be not more than 16GiB + requestFormDataSize
|
||||
// where, 16GiB is the maximum allowed object size for object upload.
|
||||
requestMaxBodySize = globalMaxObjectSize + requestFormDataSize
|
||||
|
||||
// Maximum size for http headers - See: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
maxHeaderSize = 8 * 1024
|
||||
|
||||
// Maximum size for user-defined metadata - See: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
maxUserDataSize = 2 * 1024
|
||||
)
|
||||
|
||||
// ServeHTTP restricts the size of the http header to 8 KB and the size
|
||||
// of the user-defined metadata to 2 KB.
|
||||
func setRequestHeaderSizeLimitHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if isHTTPHeaderSizeTooLarge(r.Header) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrMetadataTooLarge), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsHeader, 1)
|
||||
return
|
||||
// ReservedMetadataPrefix is the prefix of a metadata key which
|
||||
// is reserved and for internal use only.
|
||||
const (
|
||||
ReservedMetadataPrefix = "X-Minio-Internal-"
|
||||
ReservedMetadataPrefixLower = "x-minio-internal-"
|
||||
)
|
||||
|
||||
// containsReservedMetadata returns true if the http.Header contains
|
||||
// keys which are treated as metadata but are reserved for internal use
|
||||
// and must not set by clients
|
||||
func containsReservedMetadata(header http.Header) bool {
|
||||
for key := range header {
|
||||
if strings.HasPrefix(strings.ToLower(key), ReservedMetadataPrefixLower) {
|
||||
return true
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isHTTPHeaderSizeTooLarge returns true if the provided
|
||||
|
@ -96,37 +93,25 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ReservedMetadataPrefix is the prefix of a metadata key which
|
||||
// is reserved and for internal use only.
|
||||
const (
|
||||
ReservedMetadataPrefix = "X-Minio-Internal-"
|
||||
ReservedMetadataPrefixLower = "x-minio-internal-"
|
||||
)
|
||||
|
||||
// ServeHTTP fails if the request contains at least one reserved header which
|
||||
// would be treated as metadata.
|
||||
func filterReservedMetadata(h http.Handler) http.Handler {
|
||||
// Limits body and header to specific allowed maximum limits as per S3/MinIO API requirements.
|
||||
func setRequestLimitHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Reject unsupported reserved metadata first before validation.
|
||||
if containsReservedMetadata(r.Header) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrUnsupportedMetadata), r.URL)
|
||||
return
|
||||
}
|
||||
if isHTTPHeaderSizeTooLarge(r.Header) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrMetadataTooLarge), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsHeader, 1)
|
||||
return
|
||||
}
|
||||
// Restricting read data to a given maximum length
|
||||
r.Body = http.MaxBytesReader(w, r.Body, requestMaxBodySize)
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// containsReservedMetadata returns true if the http.Header contains
|
||||
// keys which are treated as metadata but are reserved for internal use
|
||||
// and must not set by clients
|
||||
func containsReservedMetadata(header http.Header) bool {
|
||||
for key := range header {
|
||||
if strings.HasPrefix(strings.ToLower(key), ReservedMetadataPrefixLower) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Reserved bucket.
|
||||
const (
|
||||
minioReservedBucket = "minio"
|
||||
|
@ -134,24 +119,6 @@ const (
|
|||
loginPathPrefix = SlashSeparator + "login"
|
||||
)
|
||||
|
||||
func setRedirectHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !shouldProxy() || guessIsRPCReq(r) || guessIsBrowserReq(r) ||
|
||||
guessIsHealthCheckReq(r) || guessIsMetricsReq(r) || isAdminReq(r) {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// if this server is still initializing, proxy the request
|
||||
// to any other online servers to avoid 503 for any incoming
|
||||
// API calls.
|
||||
if idx := getOnlineProxyEndpointIdx(); idx >= 0 {
|
||||
proxyRequest(context.TODO(), w, r, globalProxyEndpoints[idx])
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func guessIsBrowserReq(r *http.Request) bool {
|
||||
aType := getRequestAuthType(r)
|
||||
return strings.Contains(r.Header.Get("User-Agent"), "Mozilla") &&
|
||||
|
@ -174,10 +141,6 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
func shouldProxy() bool {
|
||||
return newObjectLayerFn() == nil
|
||||
}
|
||||
|
||||
// Fetch redirect location if urlPath satisfies certain
|
||||
// criteria. Some special names are considered to be
|
||||
// redirectable, this is purely internal function and
|
||||
|
@ -261,31 +224,21 @@ func isAdminReq(r *http.Request) bool {
|
|||
return strings.HasPrefix(r.URL.Path, adminPathPrefix)
|
||||
}
|
||||
|
||||
// Adds verification for incoming paths.
|
||||
func setReservedBucketHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// For all other requests reject access to reserved buckets
|
||||
bucketName, _ := request2BucketObjectName(r)
|
||||
if isMinioReservedBucket(bucketName) || isMinioMetaBucket(bucketName) {
|
||||
if !guessIsRPCReq(r) && !guessIsBrowserReq(r) && !guessIsHealthCheckReq(r) && !guessIsMetricsReq(r) && !isAdminReq(r) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrAllAccessDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Supported Amz date formats.
|
||||
// Supported amz date formats.
|
||||
var amzDateFormats = []string{
|
||||
// Do not change this order, x-amz-date format is usually in
|
||||
// iso8601Format rest are meant for relaxed handling of other
|
||||
// odd SDKs that might be out there.
|
||||
iso8601Format,
|
||||
time.RFC1123,
|
||||
time.RFC1123Z,
|
||||
iso8601Format,
|
||||
// Add new AMZ date formats here.
|
||||
}
|
||||
|
||||
// Supported Amz date headers.
|
||||
var amzDateHeaders = []string{
|
||||
// Do not chane this order, x-amz-date value should be
|
||||
// validated first.
|
||||
"x-amz-date",
|
||||
"date",
|
||||
}
|
||||
|
@ -314,34 +267,6 @@ func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) {
|
|||
return time.Time{}, ErrMissingDateHeader
|
||||
}
|
||||
|
||||
// setTimeValidityHandler to validate parsable time over http header
|
||||
func setTimeValidityHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
aType := getRequestAuthType(r)
|
||||
if aType == authTypeSigned || aType == authTypeSignedV2 || aType == authTypeStreamingSigned {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
amzDate, errCode := parseAmzDateHeader(r)
|
||||
if errCode != ErrNone {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(errCode), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
// Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past
|
||||
// or in the future, reject request otherwise.
|
||||
curTime := UTCNow()
|
||||
if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrRequestTimeTooSkewed), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsTime, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// setHttpStatsHandler sets a http Stats handler to gather HTTP statistics
|
||||
func setHTTPStatsHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -354,11 +279,11 @@ func setHTTPStatsHandler(h http.Handler) http.Handler {
|
|||
h.ServeHTTP(meteredResponse, r)
|
||||
|
||||
if strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
|
||||
globalConnStats.incInputBytes(meteredRequest.BytesCount())
|
||||
globalConnStats.incOutputBytes(meteredResponse.BytesCount())
|
||||
globalConnStats.incInputBytes(meteredRequest.BytesRead())
|
||||
globalConnStats.incOutputBytes(meteredResponse.BytesWritten())
|
||||
} else {
|
||||
globalConnStats.incS3InputBytes(meteredRequest.BytesCount())
|
||||
globalConnStats.incS3OutputBytes(meteredResponse.BytesCount())
|
||||
globalConnStats.incS3InputBytes(meteredRequest.BytesRead())
|
||||
globalConnStats.incS3OutputBytes(meteredResponse.BytesWritten())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -420,6 +345,23 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
|
|||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
// For all other requests reject access to reserved buckets
|
||||
bucketName, _ := request2BucketObjectName(r)
|
||||
if isMinioReservedBucket(bucketName) || isMinioMetaBucket(bucketName) {
|
||||
if !guessIsRPCReq(r) && !guessIsBrowserReq(r) && !guessIsHealthCheckReq(r) && !guessIsMetricsReq(r) && !isAdminReq(r) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrAllAccessDisabled), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Deny SSE-C requests if not made over TLS
|
||||
if !globalIsTLS && (crypto.SSEC.IsRequested(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
|
||||
if r.Method == http.MethodHead {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInsecureSSECustomerRequest))
|
||||
} else {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInsecureSSECustomerRequest), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
@ -429,8 +371,7 @@ func setRequestValidityHandler(h http.Handler) http.Handler {
|
|||
// is obtained from centralized etcd configuration service.
|
||||
func setBucketForwardingHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if globalDNSConfig == nil || len(globalDomainNames) == 0 || !globalBucketFederation ||
|
||||
guessIsHealthCheckReq(r) || guessIsMetricsReq(r) ||
|
||||
if guessIsHealthCheckReq(r) || guessIsMetricsReq(r) ||
|
||||
guessIsRPCReq(r) || guessIsLoginSTSReq(r) || isAdminReq(r) {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
|
@ -491,62 +432,46 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
// customHeaderHandler sets x-amz-request-id header.
|
||||
// Previously, this value was set right before a response was sent to
|
||||
// the client. So, logger and Error response XML were not using this
|
||||
// value. This is set here so that this header can be logged as
|
||||
// part of the log entry, Error response XML and auditing.
|
||||
func addCustomHeaders(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Set custom headers such as x-amz-request-id for each request.
|
||||
w.Header().Set(xhttp.AmzRequestID, mustGetRequestID(UTCNow()))
|
||||
h.ServeHTTP(logger.NewResponseWriter(w), r)
|
||||
})
|
||||
}
|
||||
|
||||
// addSecurityHeaders adds various HTTP(S) response headers.
|
||||
// addCustomHeaders adds various HTTP(S) response headers.
|
||||
// Security Headers enable various security protections behaviors in the client's browser.
|
||||
func addSecurityHeaders(h http.Handler) http.Handler {
|
||||
func addCustomHeaders(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
header := w.Header()
|
||||
header.Set("X-XSS-Protection", "1; mode=block") // Prevents against XSS attacks
|
||||
header.Set("Content-Security-Policy", "block-all-mixed-content") // prevent mixed (HTTP / HTTPS content)
|
||||
header.Set("X-Content-Type-Options", "nosniff") // Prevent mime-sniff
|
||||
header.Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains") // HSTS mitigates variants of MITM attacks
|
||||
h.ServeHTTP(w, r)
|
||||
|
||||
// Previously, this value was set right before a response was sent to
|
||||
// the client. So, logger and Error response XML were not using this
|
||||
// value. This is set here so that this header can be logged as
|
||||
// part of the log entry, Error response XML and auditing.
|
||||
// Set custom headers such as x-amz-request-id for each request.
|
||||
w.Header().Set(xhttp.AmzRequestID, mustGetRequestID(UTCNow()))
|
||||
h.ServeHTTP(logger.NewResponseWriter(w), r)
|
||||
})
|
||||
}
|
||||
|
||||
// criticalErrorHandler handles critical server failures caused by
|
||||
// criticalErrorHandler handles panics and fatal errors by
|
||||
// `panic(logger.ErrCritical)` as done by `logger.CriticalIf`.
|
||||
//
|
||||
// It should be always the first / highest HTTP handler.
|
||||
type criticalErrorHandler struct{ handler http.Handler }
|
||||
|
||||
func (h criticalErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err == logger.ErrCritical { // handle
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err) // forward other panic calls
|
||||
}
|
||||
}()
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// sseTLSHandler enforces certain rules for SSE requests which are made / must be made over TLS.
|
||||
func setSSETLSHandler(h http.Handler) http.Handler {
|
||||
func setCriticalErrorHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Deny SSE-C requests if not made over TLS
|
||||
if !globalIsTLS && (crypto.SSEC.IsRequested(r.Header) || crypto.SSECopy.IsRequested(r.Header)) {
|
||||
if r.Method == http.MethodHead {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInsecureSSECustomerRequest))
|
||||
} else {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInsecureSSECustomerRequest), r.URL)
|
||||
defer func() {
|
||||
if rec := recover(); rec == logger.ErrCritical { // handle
|
||||
stack := debug.Stack()
|
||||
logger.Error("critical: \"%s %s\": %v\n%s", r.Method, r.URL, rec, string(stack))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
} else if rec != nil {
|
||||
stack := debug.Stack()
|
||||
logger.Error("panic: \"%s %s\": %v\n%s", r.Method, r.URL, rec, string(stack))
|
||||
// Try to write an error response, upstream may not have written header.
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ func TestSSETLSHandler(t *testing.T) {
|
|||
r.Header = test.Header
|
||||
r.URL = test.URL
|
||||
|
||||
h := setSSETLSHandler(okHandler)
|
||||
h := setRequestValidityHandler(okHandler)
|
||||
h.ServeHTTP(w, r)
|
||||
|
||||
switch {
|
||||
|
|
|
@ -180,7 +180,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []BucketIn
|
|||
// If we resume to the same bucket, forward to last known item.
|
||||
if tracker.Bucket != "" {
|
||||
if tracker.Bucket == bucket.Name {
|
||||
forwardTo = tracker.Bucket
|
||||
forwardTo = tracker.Object
|
||||
} else {
|
||||
// Reset to where last bucket ended if resuming.
|
||||
tracker.resume()
|
||||
|
|
|
@ -28,6 +28,10 @@ import (
|
|||
|
||||
const unavailable = "offline"
|
||||
|
||||
func shouldProxy() bool {
|
||||
return newObjectLayerFn() == nil
|
||||
}
|
||||
|
||||
// ClusterCheckHandler returns if the server is ready for requests.
|
||||
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if globalIsGateway {
|
||||
|
|
|
@ -178,8 +178,8 @@ func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
|
|||
|
||||
// Update statistics from http request and response data
|
||||
func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter) {
|
||||
// A successful request has a 2xx response code
|
||||
successReq := w.StatusCode >= 200 && w.StatusCode < 300
|
||||
// A successful request has a 2xx response code or < 4xx response
|
||||
successReq := w.StatusCode >= 200 && w.StatusCode < 400
|
||||
|
||||
if !strings.HasSuffix(r.URL.Path, prometheusMetricsPathLegacy) ||
|
||||
!strings.HasSuffix(r.URL.Path, prometheusMetricsV2ClusterPath) ||
|
||||
|
@ -189,7 +189,7 @@ func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.Response
|
|||
switch w.StatusCode {
|
||||
case 0:
|
||||
case 499:
|
||||
// 499 is a good error, shall be counted at canceled.
|
||||
// 499 is a good error, shall be counted as canceled.
|
||||
st.totalS3Canceled.Inc(api)
|
||||
default:
|
||||
st.totalS3Errors.Inc(api)
|
||||
|
|
|
@ -27,22 +27,37 @@ import (
|
|||
|
||||
type iamDummyStore struct {
|
||||
sync.RWMutex
|
||||
*iamCache
|
||||
usersSysType UsersSysType
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) lock() {
|
||||
func newIAMDummyStore(usersSysType UsersSysType) *iamDummyStore {
|
||||
return &iamDummyStore{
|
||||
iamCache: newIamCache(),
|
||||
usersSysType: usersSysType,
|
||||
}
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) rlock() *iamCache {
|
||||
ids.RLock()
|
||||
return ids.iamCache
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) runlock() {
|
||||
ids.RUnlock()
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) lock() *iamCache {
|
||||
ids.Lock()
|
||||
return ids.iamCache
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) unlock() {
|
||||
ids.Unlock()
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) rlock() {
|
||||
ids.RLock()
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) runlock() {
|
||||
ids.RUnlock()
|
||||
func (ids *iamDummyStore) getUsersSysType() UsersSysType {
|
||||
return ids.usersSysType
|
||||
}
|
||||
|
||||
func (ids *iamDummyStore) migrateBackendFormat(context.Context) error {
|
||||
|
|
|
@ -62,27 +62,41 @@ func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
|
|||
type IAMEtcdStore struct {
|
||||
sync.RWMutex
|
||||
|
||||
*iamCache
|
||||
|
||||
usersSysType UsersSysType
|
||||
|
||||
client *etcd.Client
|
||||
}
|
||||
|
||||
func newIAMEtcdStore(client *etcd.Client) *IAMEtcdStore {
|
||||
return &IAMEtcdStore{client: client}
|
||||
func newIAMEtcdStore(client *etcd.Client, usersSysType UsersSysType) *IAMEtcdStore {
|
||||
return &IAMEtcdStore{
|
||||
iamCache: newIamCache(),
|
||||
client: client,
|
||||
usersSysType: usersSysType,
|
||||
}
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) lock() {
|
||||
func (ies *IAMEtcdStore) rlock() *iamCache {
|
||||
ies.RLock()
|
||||
return ies.iamCache
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) runlock() {
|
||||
ies.RUnlock()
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) lock() *iamCache {
|
||||
ies.Lock()
|
||||
return ies.iamCache
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) unlock() {
|
||||
ies.Unlock()
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) rlock() {
|
||||
ies.RLock()
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) runlock() {
|
||||
ies.RUnlock()
|
||||
func (ies *IAMEtcdStore) getUsersSysType() UsersSysType {
|
||||
return ies.usersSysType
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error {
|
||||
|
@ -244,6 +258,8 @@ func (ies *IAMEtcdStore) migrateToV1(ctx context.Context) error {
|
|||
|
||||
// Should be called under config migration lock
|
||||
func (ies *IAMEtcdStore) migrateBackendFormat(ctx context.Context) error {
|
||||
ies.Lock()
|
||||
defer ies.Unlock()
|
||||
return ies.migrateToV1(ctx)
|
||||
}
|
||||
|
||||
|
@ -260,7 +276,7 @@ func (ies *IAMEtcdStore) loadPolicyDoc(ctx context.Context, policy string, m map
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) getPolicyDoc(ctx context.Context, kvs *mvccpb.KeyValue, m map[string]iampolicy.Policy) error {
|
||||
func (ies *IAMEtcdStore) getPolicyDocKV(ctx context.Context, kvs *mvccpb.KeyValue, m map[string]iampolicy.Policy) error {
|
||||
var p iampolicy.Policy
|
||||
err := getIAMConfig(&p, kvs.Value, string(kvs.Key))
|
||||
if err != nil {
|
||||
|
@ -286,14 +302,14 @@ func (ies *IAMEtcdStore) loadPolicyDocs(ctx context.Context, m map[string]iampol
|
|||
|
||||
// Parse all values to construct the policies data model.
|
||||
for _, kvs := range r.Kvs {
|
||||
if err = ies.getPolicyDoc(ctx, kvs, m); err != nil && err != errNoSuchPolicy {
|
||||
if err = ies.getPolicyDocKV(ctx, kvs, m); err != nil && err != errNoSuchPolicy {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) getUser(ctx context.Context, userkv *mvccpb.KeyValue, userType IAMUserType, m map[string]auth.Credentials, basePrefix string) error {
|
||||
func (ies *IAMEtcdStore) getUserKV(ctx context.Context, userkv *mvccpb.KeyValue, userType IAMUserType, m map[string]auth.Credentials, basePrefix string) error {
|
||||
var u UserIdentity
|
||||
err := getIAMConfig(&u, userkv.Value, string(userkv.Key))
|
||||
if err != nil {
|
||||
|
@ -355,7 +371,7 @@ func (ies *IAMEtcdStore) loadUsers(ctx context.Context, userType IAMUserType, m
|
|||
|
||||
// Parse all users values to create the proper data model
|
||||
for _, userKv := range r.Kvs {
|
||||
if err = ies.getUser(ctx, userKv, userType, m, basePrefix); err != nil && err != errNoSuchUser {
|
||||
if err = ies.getUserKV(ctx, userKv, userType, m, basePrefix); err != nil && err != errNoSuchUser {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,30 +34,44 @@ import (
|
|||
|
||||
// IAMObjectStore implements IAMStorageAPI
|
||||
type IAMObjectStore struct {
|
||||
// Protect assignment to objAPI
|
||||
// Protect access to storage within the current server.
|
||||
sync.RWMutex
|
||||
|
||||
*iamCache
|
||||
|
||||
usersSysType UsersSysType
|
||||
|
||||
objAPI ObjectLayer
|
||||
}
|
||||
|
||||
func newIAMObjectStore(objAPI ObjectLayer) *IAMObjectStore {
|
||||
return &IAMObjectStore{objAPI: objAPI}
|
||||
func newIAMObjectStore(objAPI ObjectLayer, usersSysType UsersSysType) *IAMObjectStore {
|
||||
return &IAMObjectStore{
|
||||
iamCache: newIamCache(),
|
||||
objAPI: objAPI,
|
||||
usersSysType: usersSysType,
|
||||
}
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) lock() {
|
||||
func (iamOS *IAMObjectStore) rlock() *iamCache {
|
||||
iamOS.RLock()
|
||||
return iamOS.iamCache
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) runlock() {
|
||||
iamOS.RUnlock()
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) lock() *iamCache {
|
||||
iamOS.Lock()
|
||||
return iamOS.iamCache
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) unlock() {
|
||||
iamOS.Unlock()
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) rlock() {
|
||||
iamOS.RLock()
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) runlock() {
|
||||
iamOS.RUnlock()
|
||||
func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType {
|
||||
return iamOS.usersSysType
|
||||
}
|
||||
|
||||
// Migrate users directory in a single scan.
|
||||
|
@ -182,6 +196,8 @@ func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error {
|
|||
|
||||
// Should be called under config migration lock
|
||||
func (iamOS *IAMObjectStore) migrateBackendFormat(ctx context.Context) error {
|
||||
iamOS.Lock()
|
||||
defer iamOS.Unlock()
|
||||
return iamOS.migrateToV1(ctx)
|
||||
}
|
||||
|
||||
|
|
1716
cmd/iam-store.go
Normal file
1716
cmd/iam-store.go
Normal file
File diff suppressed because it is too large
Load diff
1864
cmd/iam.go
1864
cmd/iam.go
File diff suppressed because it is too large
Load diff
|
@ -22,8 +22,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
jwtreq "github.com/golang-jwt/jwt/request"
|
||||
jwtgo "github.com/golang-jwt/jwt/v4"
|
||||
jwtreq "github.com/golang-jwt/jwt/v4/request"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
jwtgo "github.com/golang-jwt/jwt"
|
||||
jwtgo "github.com/golang-jwt/jwt/v4"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
)
|
||||
|
|
|
@ -190,12 +190,12 @@ func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply boo
|
|||
if reply = !isWriteLock(lri); reply {
|
||||
// Unless there is a write lock
|
||||
l.lockMap[resource] = append(l.lockMap[resource], lrInfo)
|
||||
l.lockUID[args.UID] = formatUUID(resource, 0)
|
||||
l.lockUID[formatUUID(args.UID, 0)] = resource
|
||||
}
|
||||
} else {
|
||||
// No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[resource] = []lockRequesterInfo{lrInfo}
|
||||
l.lockUID[args.UID] = formatUUID(resource, 0)
|
||||
l.lockUID[formatUUID(args.UID, 0)] = resource
|
||||
reply = true
|
||||
}
|
||||
return reply, nil
|
||||
|
|
256
cmd/local-locker_test.go
Normal file
256
cmd/local-locker_test.go
Normal file
|
@ -0,0 +1,256 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
)
|
||||
|
||||
func TestLocalLockerExpire(t *testing.T) {
|
||||
wResources := make([]string, 1000)
|
||||
rResources := make([]string, 1000)
|
||||
l := newLocker()
|
||||
ctx := context.Background()
|
||||
for i := range wResources {
|
||||
arg := dsync.LockArgs{
|
||||
UID: mustGetUUID(),
|
||||
Resources: []string{mustGetUUID()},
|
||||
Source: t.Name(),
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.Lock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
wResources[i] = arg.Resources[0]
|
||||
}
|
||||
for i := range rResources {
|
||||
name := mustGetUUID()
|
||||
arg := dsync.LockArgs{
|
||||
UID: mustGetUUID(),
|
||||
Resources: []string{name},
|
||||
Source: t.Name(),
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.RLock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
// RLock twice
|
||||
ok, err = l.RLock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
|
||||
rResources[i] = arg.Resources[0]
|
||||
}
|
||||
if len(l.lockMap) != len(rResources)+len(wResources) {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
|
||||
}
|
||||
if len(l.lockUID) != len(rResources)+len(wResources) {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
|
||||
}
|
||||
// Expire an hour from now, should keep all
|
||||
l.expireOldLocks(time.Hour)
|
||||
if len(l.lockMap) != len(rResources)+len(wResources) {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
|
||||
}
|
||||
if len(l.lockUID) != len(rResources)+len(wResources) {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
|
||||
}
|
||||
|
||||
// Expire a minute ago.
|
||||
l.expireOldLocks(-time.Minute)
|
||||
if len(l.lockMap) != 0 {
|
||||
t.Fatalf("after cleanup should be empty, got %d", len(l.lockMap))
|
||||
}
|
||||
if len(l.lockUID) != 0 {
|
||||
t.Fatalf("lockUID len, got %d, want %d", len(l.lockUID), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalLockerUnlock(t *testing.T) {
|
||||
const n = 1000
|
||||
const m = 5
|
||||
wResources := make([][m]string, n)
|
||||
rResources := make([]string, n)
|
||||
wUIDs := make([]string, n)
|
||||
rUIDs := make([]string, 0, n*2)
|
||||
l := newLocker()
|
||||
ctx := context.Background()
|
||||
for i := range wResources {
|
||||
names := [m]string{}
|
||||
for j := range names {
|
||||
names[j] = mustGetUUID()
|
||||
}
|
||||
uid := mustGetUUID()
|
||||
arg := dsync.LockArgs{
|
||||
UID: uid,
|
||||
Resources: names[:],
|
||||
Source: t.Name(),
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.Lock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
wResources[i] = names
|
||||
wUIDs[i] = uid
|
||||
|
||||
}
|
||||
for i := range rResources {
|
||||
name := mustGetUUID()
|
||||
uid := mustGetUUID()
|
||||
arg := dsync.LockArgs{
|
||||
UID: uid,
|
||||
Resources: []string{name},
|
||||
Source: t.Name(),
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.RLock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
rUIDs = append(rUIDs, uid)
|
||||
|
||||
// RLock twice, different uid
|
||||
uid = mustGetUUID()
|
||||
arg.UID = uid
|
||||
ok, err = l.RLock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
rResources[i] = name
|
||||
rUIDs = append(rUIDs, uid)
|
||||
}
|
||||
// Each Lock has m entries
|
||||
if len(l.lockMap) != len(rResources)+len(wResources)*m {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
|
||||
}
|
||||
// A UID is added for every resource
|
||||
if len(l.lockUID) != len(rResources)*2+len(wResources)*m {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources)*2, len(wResources)*m)
|
||||
}
|
||||
// RUnlock once...
|
||||
for i, name := range rResources {
|
||||
arg := dsync.LockArgs{
|
||||
UID: rUIDs[i*2],
|
||||
Resources: []string{name},
|
||||
Source: t.Name(),
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.RUnlock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
}
|
||||
|
||||
// Each Lock has m entries
|
||||
if len(l.lockMap) != len(rResources)+len(wResources)*m {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
|
||||
}
|
||||
// A UID is added for every resource.
|
||||
// We removed len(rResources) read sources.
|
||||
if len(l.lockUID) != len(rResources)+len(wResources)*m {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources)*m)
|
||||
}
|
||||
// RUnlock again, different uids
|
||||
for i, name := range rResources {
|
||||
arg := dsync.LockArgs{
|
||||
UID: rUIDs[i*2+1],
|
||||
Resources: []string{name},
|
||||
Source: "minio",
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.RUnlock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
}
|
||||
|
||||
// Each Lock has m entries
|
||||
if len(l.lockMap) != 0+len(wResources)*m {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, len(wResources)*m)
|
||||
}
|
||||
// A UID is added for every resource.
|
||||
// We removed Add Rlocked entries
|
||||
if len(l.lockUID) != len(wResources)*m {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, len(wResources)*m)
|
||||
}
|
||||
|
||||
// Remove write locked
|
||||
for i, names := range wResources {
|
||||
arg := dsync.LockArgs{
|
||||
UID: wUIDs[i],
|
||||
Resources: names[:],
|
||||
Source: "minio",
|
||||
Owner: "owner",
|
||||
Quorum: 0,
|
||||
}
|
||||
ok, err := l.Unlock(ctx, arg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not get write lock")
|
||||
}
|
||||
}
|
||||
|
||||
// All should be gone now...
|
||||
// Each Lock has m entries
|
||||
if len(l.lockMap) != 0 {
|
||||
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, 0)
|
||||
}
|
||||
if len(l.lockUID) != 0 {
|
||||
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, 0)
|
||||
}
|
||||
}
|
|
@ -56,14 +56,13 @@ const metacacheStreamVersion = 2
|
|||
|
||||
// metacacheWriter provides a serializer of metacache objects.
|
||||
type metacacheWriter struct {
|
||||
streamErr error
|
||||
mw *msgp.Writer
|
||||
creator func() error
|
||||
closer func() error
|
||||
blockSize int
|
||||
streamWg sync.WaitGroup
|
||||
reuseBlocks bool
|
||||
|
||||
streamErr error
|
||||
streamWg sync.WaitGroup
|
||||
}
|
||||
|
||||
// newMetacacheWriter will create a serializer that will write objects in given order to the output.
|
||||
|
|
|
@ -19,9 +19,11 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -114,6 +116,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
forward := ""
|
||||
if len(opts.ForwardTo) > 0 && strings.HasPrefix(opts.ForwardTo, current) {
|
||||
forward = strings.TrimPrefix(opts.ForwardTo, current)
|
||||
// Trim further directories and trailing slash.
|
||||
if idx := strings.IndexByte(forward, '/'); idx > 0 {
|
||||
forward = forward[:idx]
|
||||
}
|
||||
|
@ -161,7 +164,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
entries[i] = entry
|
||||
continue
|
||||
}
|
||||
// Trim slash, maybe compiler is clever?
|
||||
// Trim slash, since we don't know if this is folder or object.
|
||||
entries[i] = entries[i][:len(entry)-1]
|
||||
continue
|
||||
}
|
||||
|
@ -212,9 +215,12 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
dirStack := make([]string, 0, 5)
|
||||
prefix = "" // Remove prefix after first level as we have already filtered the list.
|
||||
if len(forward) > 0 {
|
||||
idx := sort.SearchStrings(entries, forward)
|
||||
if idx > 0 {
|
||||
entries = entries[idx:]
|
||||
// Conservative forwarding. Entries may be either objects or prefixes.
|
||||
for i, entry := range entries {
|
||||
if entry >= forward || strings.HasPrefix(forward, entry) {
|
||||
entries = entries[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,6 +363,12 @@ func (s *storageRESTServer) WalkDirHandler(w http.ResponseWriter, r *http.Reques
|
|||
prefix := r.Form.Get(storageRESTPrefixFilter)
|
||||
forward := r.Form.Get(storageRESTForwardFilter)
|
||||
writer := streamHTTPResponse(w)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
debug.PrintStack()
|
||||
writer.CloseWithError(fmt.Errorf("panic: %v", r))
|
||||
}
|
||||
}()
|
||||
writer.CloseWithError(s.storage.WalkDir(r.Context(), WalkDirOptions{
|
||||
Bucket: volume,
|
||||
BaseDir: dirPath,
|
||||
|
|
|
@ -56,18 +56,21 @@ const (
|
|||
|
||||
// metacache contains a tracked cache entry.
|
||||
type metacache struct {
|
||||
id string `msg:"id"`
|
||||
bucket string `msg:"b"`
|
||||
root string `msg:"root"`
|
||||
recursive bool `msg:"rec"`
|
||||
filter string `msg:"flt"`
|
||||
status scanStatus `msg:"stat"`
|
||||
fileNotFound bool `msg:"fnf"`
|
||||
error string `msg:"err"`
|
||||
started time.Time `msg:"st"`
|
||||
// do not re-arrange the struct this struct has been ordered to use less
|
||||
// space - if you do so please run https://github.com/orijtech/structslop
|
||||
// and verify if your changes are optimal.
|
||||
ended time.Time `msg:"end"`
|
||||
lastUpdate time.Time `msg:"u"`
|
||||
started time.Time `msg:"st"`
|
||||
lastHandout time.Time `msg:"lh"`
|
||||
lastUpdate time.Time `msg:"u"`
|
||||
bucket string `msg:"b"`
|
||||
filter string `msg:"flt"`
|
||||
id string `msg:"id"`
|
||||
error string `msg:"err"`
|
||||
root string `msg:"root"`
|
||||
fileNotFound bool `msg:"fnf"`
|
||||
status scanStatus `msg:"stat"`
|
||||
recursive bool `msg:"rec"`
|
||||
dataVersion uint8 `msg:"v"`
|
||||
}
|
||||
|
||||
|
|
|
@ -24,10 +24,28 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "id":
|
||||
z.id, err = dc.ReadString()
|
||||
case "end":
|
||||
z.ended, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
err = msgp.WrapError(err, "ended")
|
||||
return
|
||||
}
|
||||
case "st":
|
||||
z.started, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "started")
|
||||
return
|
||||
}
|
||||
case "lh":
|
||||
z.lastHandout, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
return
|
||||
}
|
||||
case "u":
|
||||
z.lastUpdate, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastUpdate")
|
||||
return
|
||||
}
|
||||
case "b":
|
||||
|
@ -36,22 +54,34 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err, "bucket")
|
||||
return
|
||||
}
|
||||
case "flt":
|
||||
z.filter, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
return
|
||||
}
|
||||
case "id":
|
||||
z.id, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
return
|
||||
}
|
||||
case "err":
|
||||
z.error, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
return
|
||||
}
|
||||
case "root":
|
||||
z.root, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "root")
|
||||
return
|
||||
}
|
||||
case "rec":
|
||||
z.recursive, err = dc.ReadBool()
|
||||
case "fnf":
|
||||
z.fileNotFound, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
case "flt":
|
||||
z.filter, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
case "stat":
|
||||
|
@ -64,40 +94,10 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
}
|
||||
z.status = scanStatus(zb0002)
|
||||
}
|
||||
case "fnf":
|
||||
z.fileNotFound, err = dc.ReadBool()
|
||||
case "rec":
|
||||
z.recursive, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
case "err":
|
||||
z.error, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
return
|
||||
}
|
||||
case "st":
|
||||
z.started, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "started")
|
||||
return
|
||||
}
|
||||
case "end":
|
||||
z.ended, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ended")
|
||||
return
|
||||
}
|
||||
case "u":
|
||||
z.lastUpdate, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastUpdate")
|
||||
return
|
||||
}
|
||||
case "lh":
|
||||
z.lastHandout, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
case "v":
|
||||
|
@ -120,84 +120,14 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 13
|
||||
// write "id"
|
||||
err = en.Append(0x8d, 0xa2, 0x69, 0x64)
|
||||
// write "end"
|
||||
err = en.Append(0x8d, 0xa3, 0x65, 0x6e, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.id)
|
||||
err = en.WriteTime(z.ended)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
return
|
||||
}
|
||||
// write "b"
|
||||
err = en.Append(0xa1, 0x62)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "bucket")
|
||||
return
|
||||
}
|
||||
// write "root"
|
||||
err = en.Append(0xa4, 0x72, 0x6f, 0x6f, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.root)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "root")
|
||||
return
|
||||
}
|
||||
// write "rec"
|
||||
err = en.Append(0xa3, 0x72, 0x65, 0x63)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.recursive)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
// write "flt"
|
||||
err = en.Append(0xa3, 0x66, 0x6c, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.filter)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
return
|
||||
}
|
||||
// write "stat"
|
||||
err = en.Append(0xa4, 0x73, 0x74, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint8(uint8(z.status))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "status")
|
||||
return
|
||||
}
|
||||
// write "fnf"
|
||||
err = en.Append(0xa3, 0x66, 0x6e, 0x66)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.fileNotFound)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
// write "err"
|
||||
err = en.Append(0xa3, 0x65, 0x72, 0x72)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.error)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
err = msgp.WrapError(err, "ended")
|
||||
return
|
||||
}
|
||||
// write "st"
|
||||
|
@ -210,14 +140,14 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "started")
|
||||
return
|
||||
}
|
||||
// write "end"
|
||||
err = en.Append(0xa3, 0x65, 0x6e, 0x64)
|
||||
// write "lh"
|
||||
err = en.Append(0xa2, 0x6c, 0x68)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.ended)
|
||||
err = en.WriteTime(z.lastHandout)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ended")
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
return
|
||||
}
|
||||
// write "u"
|
||||
|
@ -230,14 +160,84 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "lastUpdate")
|
||||
return
|
||||
}
|
||||
// write "lh"
|
||||
err = en.Append(0xa2, 0x6c, 0x68)
|
||||
// write "b"
|
||||
err = en.Append(0xa1, 0x62)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.lastHandout)
|
||||
err = en.WriteString(z.bucket)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
err = msgp.WrapError(err, "bucket")
|
||||
return
|
||||
}
|
||||
// write "flt"
|
||||
err = en.Append(0xa3, 0x66, 0x6c, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.filter)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
return
|
||||
}
|
||||
// write "id"
|
||||
err = en.Append(0xa2, 0x69, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.id)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
return
|
||||
}
|
||||
// write "err"
|
||||
err = en.Append(0xa3, 0x65, 0x72, 0x72)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.error)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
return
|
||||
}
|
||||
// write "root"
|
||||
err = en.Append(0xa4, 0x72, 0x6f, 0x6f, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.root)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "root")
|
||||
return
|
||||
}
|
||||
// write "fnf"
|
||||
err = en.Append(0xa3, 0x66, 0x6e, 0x66)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.fileNotFound)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
// write "stat"
|
||||
err = en.Append(0xa4, 0x73, 0x74, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint8(uint8(z.status))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "status")
|
||||
return
|
||||
}
|
||||
// write "rec"
|
||||
err = en.Append(0xa3, 0x72, 0x65, 0x63)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.recursive)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
// write "v"
|
||||
|
@ -257,42 +257,42 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
func (z *metacache) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 13
|
||||
// string "id"
|
||||
o = append(o, 0x8d, 0xa2, 0x69, 0x64)
|
||||
o = msgp.AppendString(o, z.id)
|
||||
// string "b"
|
||||
o = append(o, 0xa1, 0x62)
|
||||
o = msgp.AppendString(o, z.bucket)
|
||||
// string "root"
|
||||
o = append(o, 0xa4, 0x72, 0x6f, 0x6f, 0x74)
|
||||
o = msgp.AppendString(o, z.root)
|
||||
// string "rec"
|
||||
o = append(o, 0xa3, 0x72, 0x65, 0x63)
|
||||
o = msgp.AppendBool(o, z.recursive)
|
||||
// string "flt"
|
||||
o = append(o, 0xa3, 0x66, 0x6c, 0x74)
|
||||
o = msgp.AppendString(o, z.filter)
|
||||
// string "stat"
|
||||
o = append(o, 0xa4, 0x73, 0x74, 0x61, 0x74)
|
||||
o = msgp.AppendUint8(o, uint8(z.status))
|
||||
// string "fnf"
|
||||
o = append(o, 0xa3, 0x66, 0x6e, 0x66)
|
||||
o = msgp.AppendBool(o, z.fileNotFound)
|
||||
// string "err"
|
||||
o = append(o, 0xa3, 0x65, 0x72, 0x72)
|
||||
o = msgp.AppendString(o, z.error)
|
||||
// string "end"
|
||||
o = append(o, 0x8d, 0xa3, 0x65, 0x6e, 0x64)
|
||||
o = msgp.AppendTime(o, z.ended)
|
||||
// string "st"
|
||||
o = append(o, 0xa2, 0x73, 0x74)
|
||||
o = msgp.AppendTime(o, z.started)
|
||||
// string "end"
|
||||
o = append(o, 0xa3, 0x65, 0x6e, 0x64)
|
||||
o = msgp.AppendTime(o, z.ended)
|
||||
// string "u"
|
||||
o = append(o, 0xa1, 0x75)
|
||||
o = msgp.AppendTime(o, z.lastUpdate)
|
||||
// string "lh"
|
||||
o = append(o, 0xa2, 0x6c, 0x68)
|
||||
o = msgp.AppendTime(o, z.lastHandout)
|
||||
// string "u"
|
||||
o = append(o, 0xa1, 0x75)
|
||||
o = msgp.AppendTime(o, z.lastUpdate)
|
||||
// string "b"
|
||||
o = append(o, 0xa1, 0x62)
|
||||
o = msgp.AppendString(o, z.bucket)
|
||||
// string "flt"
|
||||
o = append(o, 0xa3, 0x66, 0x6c, 0x74)
|
||||
o = msgp.AppendString(o, z.filter)
|
||||
// string "id"
|
||||
o = append(o, 0xa2, 0x69, 0x64)
|
||||
o = msgp.AppendString(o, z.id)
|
||||
// string "err"
|
||||
o = append(o, 0xa3, 0x65, 0x72, 0x72)
|
||||
o = msgp.AppendString(o, z.error)
|
||||
// string "root"
|
||||
o = append(o, 0xa4, 0x72, 0x6f, 0x6f, 0x74)
|
||||
o = msgp.AppendString(o, z.root)
|
||||
// string "fnf"
|
||||
o = append(o, 0xa3, 0x66, 0x6e, 0x66)
|
||||
o = msgp.AppendBool(o, z.fileNotFound)
|
||||
// string "stat"
|
||||
o = append(o, 0xa4, 0x73, 0x74, 0x61, 0x74)
|
||||
o = msgp.AppendUint8(o, uint8(z.status))
|
||||
// string "rec"
|
||||
o = append(o, 0xa3, 0x72, 0x65, 0x63)
|
||||
o = msgp.AppendBool(o, z.recursive)
|
||||
// string "v"
|
||||
o = append(o, 0xa1, 0x76)
|
||||
o = msgp.AppendUint8(o, z.dataVersion)
|
||||
|
@ -317,10 +317,28 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "id":
|
||||
z.id, bts, err = msgp.ReadStringBytes(bts)
|
||||
case "end":
|
||||
z.ended, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
err = msgp.WrapError(err, "ended")
|
||||
return
|
||||
}
|
||||
case "st":
|
||||
z.started, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "started")
|
||||
return
|
||||
}
|
||||
case "lh":
|
||||
z.lastHandout, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
return
|
||||
}
|
||||
case "u":
|
||||
z.lastUpdate, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastUpdate")
|
||||
return
|
||||
}
|
||||
case "b":
|
||||
|
@ -329,22 +347,34 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err, "bucket")
|
||||
return
|
||||
}
|
||||
case "flt":
|
||||
z.filter, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
return
|
||||
}
|
||||
case "id":
|
||||
z.id, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "id")
|
||||
return
|
||||
}
|
||||
case "err":
|
||||
z.error, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
return
|
||||
}
|
||||
case "root":
|
||||
z.root, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "root")
|
||||
return
|
||||
}
|
||||
case "rec":
|
||||
z.recursive, bts, err = msgp.ReadBoolBytes(bts)
|
||||
case "fnf":
|
||||
z.fileNotFound, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
case "flt":
|
||||
z.filter, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "filter")
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
case "stat":
|
||||
|
@ -357,40 +387,10 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
}
|
||||
z.status = scanStatus(zb0002)
|
||||
}
|
||||
case "fnf":
|
||||
z.fileNotFound, bts, err = msgp.ReadBoolBytes(bts)
|
||||
case "rec":
|
||||
z.recursive, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "fileNotFound")
|
||||
return
|
||||
}
|
||||
case "err":
|
||||
z.error, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "error")
|
||||
return
|
||||
}
|
||||
case "st":
|
||||
z.started, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "started")
|
||||
return
|
||||
}
|
||||
case "end":
|
||||
z.ended, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ended")
|
||||
return
|
||||
}
|
||||
case "u":
|
||||
z.lastUpdate, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastUpdate")
|
||||
return
|
||||
}
|
||||
case "lh":
|
||||
z.lastHandout, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "lastHandout")
|
||||
err = msgp.WrapError(err, "recursive")
|
||||
return
|
||||
}
|
||||
case "v":
|
||||
|
@ -413,7 +413,7 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *metacache) Msgsize() (s int) {
|
||||
s = 1 + 3 + msgp.StringPrefixSize + len(z.id) + 2 + msgp.StringPrefixSize + len(z.bucket) + 5 + msgp.StringPrefixSize + len(z.root) + 4 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.filter) + 5 + msgp.Uint8Size + 4 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.error) + 3 + msgp.TimeSize + 4 + msgp.TimeSize + 2 + msgp.TimeSize + 3 + msgp.TimeSize + 2 + msgp.Uint8Size
|
||||
s = 1 + 4 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 2 + msgp.TimeSize + 2 + msgp.StringPrefixSize + len(z.bucket) + 4 + msgp.StringPrefixSize + len(z.filter) + 3 + msgp.StringPrefixSize + len(z.id) + 4 + msgp.StringPrefixSize + len(z.error) + 5 + msgp.StringPrefixSize + len(z.root) + 4 + msgp.BoolSize + 5 + msgp.Uint8Size + 4 + msgp.BoolSize + 2 + msgp.Uint8Size
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -108,6 +108,10 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
|
||||
func nodeHealthMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
nodesUp, nodesDown := GetPeerOnlineCount()
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
|
|
|
@ -225,7 +225,7 @@ func (d *naughtyDisk) Delete(ctx context.Context, volume string, path string, re
|
|||
return d.disk.Delete(ctx, volume, path, recursive)
|
||||
}
|
||||
|
||||
func (d *naughtyDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) []error {
|
||||
func (d *naughtyDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error {
|
||||
if err := d.calcError(); err != nil {
|
||||
errs := make([]error, len(versions))
|
||||
for i := range errs {
|
||||
|
|
|
@ -225,6 +225,18 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) {
|
|||
return cinfo
|
||||
}
|
||||
|
||||
func (o ObjectInfo) tierStats() tierStats {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(o.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
// the current version of an object is accounted towards objects count
|
||||
if o.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// ReplicateObjectInfo represents object info to be replicated
|
||||
type ReplicateObjectInfo struct {
|
||||
ObjectInfo
|
||||
|
|
|
@ -291,6 +291,13 @@ func (e MethodNotAllowed) Error() string {
|
|||
return "Method not allowed: " + e.Bucket + "/" + e.Object
|
||||
}
|
||||
|
||||
// ObjectLocked object is currently WORM protected.
|
||||
type ObjectLocked GenericError
|
||||
|
||||
func (e ObjectLocked) Error() string {
|
||||
return "Object is WORM protected and cannot be overwritten: " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")"
|
||||
}
|
||||
|
||||
// ObjectAlreadyExists object already exists.
|
||||
type ObjectAlreadyExists GenericError
|
||||
|
||||
|
@ -638,10 +645,12 @@ func (e UnsupportedMetadata) Error() string {
|
|||
}
|
||||
|
||||
// BackendDown is returned for network errors or if the gateway's backend is down.
|
||||
type BackendDown struct{}
|
||||
type BackendDown struct {
|
||||
Err string
|
||||
}
|
||||
|
||||
func (e BackendDown) Error() string {
|
||||
return "Backend down"
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// isErrBucketNotFound - Check if error type is BucketNotFound.
|
||||
|
|
|
@ -26,13 +26,18 @@ import (
|
|||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
)
|
||||
|
||||
// CheckPreconditionFn returns true if precondition check failed.
|
||||
type CheckPreconditionFn func(o ObjectInfo) bool
|
||||
|
||||
// EvalMetadataFn validates input objInfo and returns an updated metadata
|
||||
type EvalMetadataFn func(o ObjectInfo) error
|
||||
|
||||
// GetObjectInfoFn is the signature of GetObjectInfo function.
|
||||
type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
|
||||
|
@ -50,6 +55,7 @@ type ObjectOptions struct {
|
|||
UserDefined map[string]string // only set in case of POST/PUT operations
|
||||
PartNumber int // only useful in case of GetObject/HeadObject
|
||||
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
|
||||
EvalMetadataFn EvalMetadataFn // only set for retention settings, meant to be used only when updating metadata in-place.
|
||||
DeleteReplication ReplicationState // Represents internal replication state needed for Delete replication
|
||||
Transition TransitionOptions
|
||||
Expiration ExpirationOptions
|
||||
|
@ -257,6 +263,6 @@ func GetObject(ctx context.Context, api ObjectLayer, bucket, object string, star
|
|||
}
|
||||
defer reader.Close()
|
||||
|
||||
_, err = io.Copy(writer, reader)
|
||||
_, err = xioutil.Copy(writer, reader)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1378,6 +1378,152 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling ListObjects continuation tests for both Erasure multiple disks and single node setup.
|
||||
func TestListObjectsContinuation(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testListObjectsContinuation)
|
||||
}
|
||||
|
||||
// Unit test for ListObjects in general.
|
||||
func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
||||
t, _ := t1.(*testing.T)
|
||||
testBuckets := []string{
|
||||
// This bucket is used for testing ListObject operations.
|
||||
"test-bucket-list-object-continuation-1",
|
||||
"test-bucket-list-object-continuation-2",
|
||||
}
|
||||
for _, bucket := range testBuckets {
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
testObjects := []struct {
|
||||
parentBucket string
|
||||
name string
|
||||
content string
|
||||
meta map[string]string
|
||||
}{
|
||||
{testBuckets[0], "a/1.txt", "contentstring", nil},
|
||||
{testBuckets[0], "a-1.txt", "contentstring", nil},
|
||||
{testBuckets[0], "a.txt", "contentstring", nil},
|
||||
{testBuckets[0], "apache2-doc/1.txt", "contentstring", nil},
|
||||
{testBuckets[0], "apache2/1.txt", "contentstring", nil},
|
||||
{testBuckets[0], "apache2/-sub/2.txt", "contentstring", nil},
|
||||
{testBuckets[1], "azerty/1.txt", "contentstring", nil},
|
||||
{testBuckets[1], "apache2-doc/1.txt", "contentstring", nil},
|
||||
{testBuckets[1], "apache2/1.txt", "contentstring", nil},
|
||||
}
|
||||
for _, object := range testObjects {
|
||||
md5Bytes := md5.Sum([]byte(object.content))
|
||||
_, err = obj.PutObject(context.Background(), object.parentBucket, object.name, mustGetPutObjReader(t, bytes.NewBufferString(object.content),
|
||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), ObjectOptions{UserDefined: object.meta})
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Formualting the result data set to be expected from ListObjects call inside the tests,
|
||||
// This will be used in testCases and used for asserting the correctness of ListObjects output in the tests.
|
||||
|
||||
resultCases := []ListObjectsInfo{
|
||||
{
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "a-1.txt"},
|
||||
{Name: "a.txt"},
|
||||
{Name: "a/1.txt"},
|
||||
{Name: "apache2-doc/1.txt"},
|
||||
{Name: "apache2/-sub/2.txt"},
|
||||
{Name: "apache2/1.txt"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "apache2-doc/1.txt"},
|
||||
{Name: "apache2/1.txt"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Prefixes: []string{"apache2-doc/", "apache2/", "azerty/"},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
// Inputs to ListObjects.
|
||||
bucketName string
|
||||
prefix string
|
||||
delimiter string
|
||||
page int
|
||||
// Expected output of ListObjects.
|
||||
result ListObjectsInfo
|
||||
}{
|
||||
{testBuckets[0], "", "", 1, resultCases[0]},
|
||||
{testBuckets[0], "a", "", 1, resultCases[0]},
|
||||
{testBuckets[1], "apache", "", 1, resultCases[1]},
|
||||
{testBuckets[1], "", "/", 1, resultCases[2]},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
|
||||
var foundObjects []ObjectInfo
|
||||
var foundPrefixes []string
|
||||
var marker = ""
|
||||
for {
|
||||
result, err := obj.ListObjects(context.Background(), testCase.bucketName,
|
||||
testCase.prefix, marker, testCase.delimiter, testCase.page)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
foundObjects = append(foundObjects, result.Objects...)
|
||||
foundPrefixes = append(foundPrefixes, result.Prefixes...)
|
||||
if !result.IsTruncated {
|
||||
break
|
||||
}
|
||||
marker = result.NextMarker
|
||||
if len(result.Objects) > 0 {
|
||||
// Discard marker, so it cannot resume listing.
|
||||
marker = result.Objects[len(result.Objects)-1].Name
|
||||
}
|
||||
}
|
||||
|
||||
if len(testCase.result.Objects) != len(foundObjects) {
|
||||
t.Logf("want: %v", objInfoNames(testCase.result.Objects))
|
||||
t.Logf("got: %v", objInfoNames(foundObjects))
|
||||
t.Errorf("Test %d: %s: Expected number of objects in the result to be '%d', but found '%d' objects instead",
|
||||
i+1, instanceType, len(testCase.result.Objects), len(foundObjects))
|
||||
}
|
||||
for j := 0; j < len(testCase.result.Objects); j++ {
|
||||
if j >= len(foundObjects) {
|
||||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but not nothing instead", i+1, instanceType, testCase.result.Objects[j].Name)
|
||||
continue
|
||||
}
|
||||
if testCase.result.Objects[j].Name != foundObjects[j].Name {
|
||||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, foundObjects[j].Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(testCase.result.Prefixes) != len(foundPrefixes) {
|
||||
t.Logf("want: %v", testCase.result.Prefixes)
|
||||
t.Logf("got: %v", foundPrefixes)
|
||||
t.Errorf("Test %d: %s: Expected number of prefixes in the result to be '%d', but found '%d' prefixes instead",
|
||||
i+1, instanceType, len(testCase.result.Prefixes), len(foundPrefixes))
|
||||
}
|
||||
for j := 0; j < len(testCase.result.Prefixes); j++ {
|
||||
if j >= len(foundPrefixes) {
|
||||
t.Errorf("Test %d: %s: Expected prefix name to be \"%s\", but found no result", i+1, instanceType, testCase.result.Prefixes[j])
|
||||
continue
|
||||
}
|
||||
if testCase.result.Prefixes[j] != foundPrefixes[j] {
|
||||
t.Errorf("Test %d: %s: Expected prefix name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Prefixes[j], foundPrefixes[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize FS backend for the benchmark.
|
||||
func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) {
|
||||
var err error
|
||||
|
|
|
@ -54,7 +54,7 @@ import (
|
|||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/s3select"
|
||||
|
@ -504,14 +504,14 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
statusCodeWritten := false
|
||||
httpWriter := ioutil.WriteOnClose(w)
|
||||
httpWriter := xioutil.WriteOnClose(w)
|
||||
if rs != nil || opts.PartNumber > 0 {
|
||||
statusCodeWritten = true
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
}
|
||||
|
||||
// Write object content to response body
|
||||
if _, err = io.Copy(httpWriter, gr); err != nil {
|
||||
if _, err = xioutil.Copy(httpWriter, gr); err != nil {
|
||||
if !httpWriter.HasWritten() && !statusCodeWritten {
|
||||
// write error response only if no data or headers has been written to client yet
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -2283,6 +2283,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
newMultipartUpload := objectAPI.NewMultipartUpload
|
||||
if api.CacheAPI() != nil {
|
||||
newMultipartUpload = api.CacheAPI().NewMultipartUpload
|
||||
}
|
||||
|
||||
uploadID, err := newMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
|
@ -2597,9 +2600,13 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||
}
|
||||
|
||||
srcInfo.PutObjReader = pReader
|
||||
copyObjectPart := objectAPI.CopyObjectPart
|
||||
if api.CacheAPI() != nil {
|
||||
copyObjectPart = api.CacheAPI().CopyObjectPart
|
||||
}
|
||||
// Copy source object to destination, if source and destination
|
||||
// object is same then only metadata is updated.
|
||||
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
||||
partInfo, err := copyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
||||
startOffset, length, srcInfo, srcOpts, dstOpts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -2854,6 +2861,9 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
}
|
||||
|
||||
putObjectPart := objectAPI.PutObjectPart
|
||||
if api.CacheAPI() != nil {
|
||||
putObjectPart = api.CacheAPI().PutObjectPart
|
||||
}
|
||||
|
||||
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
|
||||
if err != nil {
|
||||
|
@ -2907,6 +2917,9 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
abortMultipartUpload := objectAPI.AbortMultipartUpload
|
||||
if api.CacheAPI() != nil {
|
||||
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
|
@ -3191,7 +3204,9 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
s3MD5 := getCompleteMultipartMD5(originalCompleteParts)
|
||||
|
||||
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
|
||||
|
||||
if api.CacheAPI() != nil {
|
||||
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
|
||||
}
|
||||
// This code is specifically to handle the requirements for slow
|
||||
// complete multipart upload operations on FS mode.
|
||||
writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
|
@ -3209,7 +3224,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
setCommonHeaders(w)
|
||||
w.Header().Set(xhttp.ContentType, string(mimeXML))
|
||||
w.Write(encodedErrorResponse)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
@ -3522,53 +3536,39 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||
popts := ObjectOptions{
|
||||
MTime: opts.MTime,
|
||||
VersionID: opts.VersionID,
|
||||
EvalMetadataFn: func(oi ObjectInfo) error {
|
||||
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status))
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
|
||||
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
|
||||
if dsc.ReplicateAny() {
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if objInfo.DeleteMarker {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status))
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
|
||||
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType, opts))
|
||||
if dsc.ReplicateAny() {
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
// if version-id is not specified retention is supposed to be set on the latest object.
|
||||
if opts.VersionID == "" {
|
||||
opts.VersionID = objInfo.VersionID
|
||||
}
|
||||
popts := ObjectOptions{
|
||||
MTime: opts.MTime,
|
||||
VersionID: opts.VersionID,
|
||||
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
popts.UserDefined[k] = v
|
||||
}
|
||||
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if dsc.ReplicateAny() {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.MetadataReplicationType)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
// Notify object event.
|
||||
|
@ -3704,50 +3704,37 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
objInfo, s3Err := enforceRetentionBypassForPut(ctx, r, bucket, object, getObjectInfo, objRetention, cred, owner)
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
if objInfo.DeleteMarker {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
if objRetention.Mode.Valid() {
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objRetention.Mode)
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = objRetention.RetainUntilDate.UTC().Format(time.RFC3339)
|
||||
} else {
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = ""
|
||||
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = ""
|
||||
}
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
|
||||
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType, opts))
|
||||
if dsc.ReplicateAny() {
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
objInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
// if version-id is not specified retention is supposed to be set on the latest object.
|
||||
if opts.VersionID == "" {
|
||||
opts.VersionID = objInfo.VersionID
|
||||
}
|
||||
popts := ObjectOptions{
|
||||
MTime: opts.MTime,
|
||||
VersionID: opts.VersionID,
|
||||
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
||||
MTime: opts.MTime,
|
||||
VersionID: opts.VersionID,
|
||||
EvalMetadataFn: func(oi ObjectInfo) error {
|
||||
if err := enforceRetentionBypassForPut(ctx, r, oi, objRetention, cred, owner); err != nil {
|
||||
return err
|
||||
}
|
||||
if objRetention.Mode.Valid() {
|
||||
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objRetention.Mode)
|
||||
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = objRetention.RetainUntilDate.UTC().Format(time.RFC3339)
|
||||
} else {
|
||||
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = ""
|
||||
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = ""
|
||||
}
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
|
||||
if dsc.ReplicateAny() {
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
popts.UserDefined[k] = v
|
||||
}
|
||||
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
||||
|
||||
objInfo, err := objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType, opts))
|
||||
if dsc.ReplicateAny() {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.MetadataReplicationType)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/madmin-go"
|
||||
|
@ -55,9 +55,6 @@ func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
ctx := newContext(r, w, "GetLocks")
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(globalLockServer.DupLockMap()))
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
}
|
||||
|
||||
// DeletePolicyHandler - deletes a policy on the server.
|
||||
|
@ -84,8 +81,6 @@ func (s *peerRESTServer) DeletePolicyHandler(w http.ResponseWriter, r *http.Requ
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadPolicyHandler - reloads a policy on the server.
|
||||
|
@ -112,8 +107,6 @@ func (s *peerRESTServer) LoadPolicyHandler(w http.ResponseWriter, r *http.Reques
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadPolicyMappingHandler - reloads a policy mapping on the server.
|
||||
|
@ -141,8 +134,6 @@ func (s *peerRESTServer) LoadPolicyMappingHandler(w http.ResponseWriter, r *http
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteServiceAccountHandler - deletes a service account on the server.
|
||||
|
@ -169,8 +160,6 @@ func (s *peerRESTServer) DeleteServiceAccountHandler(w http.ResponseWriter, r *h
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadServiceAccountHandler - reloads a service account on the server.
|
||||
|
@ -197,8 +186,6 @@ func (s *peerRESTServer) LoadServiceAccountHandler(w http.ResponseWriter, r *htt
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteUserHandler - deletes a user on the server.
|
||||
|
@ -225,8 +212,6 @@ func (s *peerRESTServer) DeleteUserHandler(w http.ResponseWriter, r *http.Reques
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadUserHandler - reloads a user on the server.
|
||||
|
@ -264,8 +249,6 @@ func (s *peerRESTServer) LoadUserHandler(w http.ResponseWriter, r *http.Request)
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadGroupHandler - reloads group along with members list.
|
||||
|
@ -288,8 +271,6 @@ func (s *peerRESTServer) LoadGroupHandler(w http.ResponseWriter, r *http.Request
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// StartProfilingHandler - Issues the start profiling command.
|
||||
|
@ -329,8 +310,6 @@ func (s *peerRESTServer) StartProfilingHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
globalProfiler[profiler] = prof
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DownloadProfilingDataHandler - returns profiled data.
|
||||
|
@ -346,8 +325,6 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData))
|
||||
}
|
||||
|
||||
|
@ -361,7 +338,6 @@ func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Reques
|
|||
ctx := newContext(r, w, "ServerInfo")
|
||||
info := getLocalServerProperty(globalEndpoints, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -395,7 +371,6 @@ func (s *peerRESTServer) NetInfoHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
w.Header().Set("FinalStatus", "Success")
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (s *peerRESTServer) DispatchNetInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -411,7 +386,6 @@ func (s *peerRESTServer) DispatchNetInfoHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
done(nil)
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetDrivePerfInfosHandler - returns all disk's serial/parallal performance information.
|
||||
|
@ -426,7 +400,6 @@ func (s *peerRESTServer) GetDrivePerfInfosHandler(w http.ResponseWriter, r *http
|
|||
|
||||
info := getDrivePerfInfos(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -442,7 +415,6 @@ func (s *peerRESTServer) GetCPUsHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
info := madmin.GetCPUs(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -458,7 +430,6 @@ func (s *peerRESTServer) GetPartitionsHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
info := madmin.GetPartitions(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -474,7 +445,6 @@ func (s *peerRESTServer) GetOSInfoHandler(w http.ResponseWriter, r *http.Request
|
|||
|
||||
info := madmin.GetOSInfo(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -490,7 +460,6 @@ func (s *peerRESTServer) GetProcInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
info := madmin.GetProcInfo(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -506,7 +475,6 @@ func (s *peerRESTServer) GetMemInfoHandler(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
info := madmin.GetMemInfo(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -523,7 +491,6 @@ func (s *peerRESTServer) GetSysConfigHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
info := madmin.GetSysConfig(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -540,7 +507,6 @@ func (s *peerRESTServer) GetSysServicesHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
info := madmin.GetSysServices(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -556,7 +522,6 @@ func (s *peerRESTServer) GetSysErrorsHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
info := madmin.GetSysErrors(ctx, r.Host)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
|
@ -618,7 +583,6 @@ func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Re
|
|||
bs := BucketStats{
|
||||
ReplicationStats: globalReplicationStats.Get(bucketName),
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(r.Context(), msgp.Encode(w, &bs))
|
||||
}
|
||||
|
||||
|
@ -749,7 +713,6 @@ func (s *peerRESTServer) PutBucketNotificationHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
|
||||
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// Return disk IDs of all the local disks.
|
||||
|
@ -810,7 +773,6 @@ func (s *peerRESTServer) GetLocalDiskIDs(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
ids := getLocalDiskIDs(z)
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(ids))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ServerUpdateHandler - updates the current server.
|
||||
|
@ -1060,7 +1022,6 @@ func (s *peerRESTServer) BackgroundHealStatusHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(state))
|
||||
}
|
||||
|
||||
|
@ -1144,7 +1105,6 @@ func (s *peerRESTServer) GetBandwidth(w http.ResponseWriter, r *http.Request) {
|
|||
s.writeErrorResponse(w, errors.New("Encoding report failed: "+err.Error()))
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetPeerMetrics gets the metrics to be federated across peers.
|
||||
|
@ -1167,7 +1127,6 @@ func (s *peerRESTServer) GetPeerMetrics(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SpeedtestResult return value of the speedtest function
|
||||
|
@ -1352,7 +1311,6 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
|
|||
|
||||
done(nil)
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerPeerRESTHandlers - register peer rest router.
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -267,6 +267,16 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||
dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
|
||||
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"]]}`,
|
||||
},
|
||||
// Success case, big body.
|
||||
{
|
||||
objectName: "test",
|
||||
data: bytes.Repeat([]byte("a"), 10<<20),
|
||||
expectedRespStatus: http.StatusNoContent,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)},
|
||||
policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"]]}`,
|
||||
},
|
||||
// Corrupted Base 64 result
|
||||
{
|
||||
objectName: "test",
|
||||
|
|
|
@ -40,41 +40,24 @@ func registerDistErasureRouters(router *mux.Router, endpointServerPools Endpoint
|
|||
|
||||
// List of some generic handlers which are applied for all incoming requests.
|
||||
var globalHandlers = []mux.MiddlewareFunc{
|
||||
// filters HTTP headers which are treated as metadata and are reserved
|
||||
// for internal use only.
|
||||
filterReservedMetadata,
|
||||
// Enforce rules specific for TLS requests
|
||||
setSSETLSHandler,
|
||||
// Auth handler verifies incoming authorization headers and
|
||||
// routes them accordingly. Client receives a HTTP error for
|
||||
// invalid/unsupported signatures.
|
||||
setAuthHandler,
|
||||
//
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
setTimeValidityHandler,
|
||||
// Validates if incoming request is for restricted buckets.
|
||||
setReservedBucketHandler,
|
||||
setAuthHandler,
|
||||
// Redirect some pre-defined browser request paths to a static location prefix.
|
||||
setBrowserRedirectHandler,
|
||||
// Adds 'crossdomain.xml' policy handler to serve legacy flash clients.
|
||||
setCrossDomainPolicy,
|
||||
// Limits all header sizes to a maximum fixed limit
|
||||
setRequestHeaderSizeLimitHandler,
|
||||
// Limits all requests size to a maximum fixed limit
|
||||
setRequestSizeLimitHandler,
|
||||
// Limits all body and header sizes to a maximum fixed limit
|
||||
setRequestLimitHandler,
|
||||
// Network statistics
|
||||
setHTTPStatsHandler,
|
||||
// Validate all the incoming requests.
|
||||
setRequestValidityHandler,
|
||||
// Forward path style requests to actual host in a bucket federated setup.
|
||||
setBucketForwardingHandler,
|
||||
// set HTTP security headers such as Content-Security-Policy.
|
||||
addSecurityHeaders,
|
||||
// set x-amz-request-id header.
|
||||
addCustomHeaders,
|
||||
// add redirect handler to redirect
|
||||
// requests when object layer is not
|
||||
// initialized.
|
||||
setRedirectHandler,
|
||||
// Add new handlers here.
|
||||
}
|
||||
|
||||
|
@ -104,6 +87,10 @@ func configureServerHandler(endpointServerPools EndpointServerPools) (http.Handl
|
|||
// Add API router
|
||||
registerAPIRouter(router)
|
||||
|
||||
// Enable bucket forwarding handler only if bucket federation is enabled.
|
||||
if globalDNSConfig != nil && globalBucketFederation {
|
||||
globalHandlers = append(globalHandlers, setBucketForwardingHandler)
|
||||
}
|
||||
router.Use(globalHandlers...)
|
||||
|
||||
return router, nil
|
||||
|
|
|
@ -23,13 +23,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdioutil "io/ioutil"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
xnet "github.com/minio/pkg/net"
|
||||
|
@ -187,7 +187,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
|
|||
return
|
||||
}
|
||||
} else {
|
||||
rc = stdioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
}
|
||||
|
||||
defer rc.Close()
|
||||
|
@ -199,10 +199,10 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
|
|||
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
httpWriter := ioutil.WriteOnClose(w)
|
||||
httpWriter := xioutil.WriteOnClose(w)
|
||||
|
||||
// Write object content to response body
|
||||
if _, err = io.Copy(httpWriter, rc); err != nil {
|
||||
if _, err = xioutil.Copy(httpWriter, rc); err != nil {
|
||||
if !httpWriter.HasWritten() {
|
||||
// write error response only if no data or headers has been written to client yet
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -324,7 +324,7 @@ func getFilesListFromZIPObject(ctx context.Context, objectAPI ObjectLayer, bucke
|
|||
if err != nil {
|
||||
return nil, ObjectInfo{}, err
|
||||
}
|
||||
b, err := stdioutil.ReadAll(gr)
|
||||
b, err := ioutil.ReadAll(gr)
|
||||
if err != nil {
|
||||
gr.Close()
|
||||
return nil, ObjectInfo{}, err
|
||||
|
|
|
@ -511,7 +511,7 @@ func serverMain(ctx *cli.Context) {
|
|||
addrs = append(addrs, globalMinioAddr)
|
||||
}
|
||||
|
||||
httpServer := xhttp.NewServer(addrs, criticalErrorHandler{corsHandler(handler)}, getCert)
|
||||
httpServer := xhttp.NewServer(addrs, setCriticalErrorHandler(corsHandler(handler)), getCert)
|
||||
httpServer.BaseContext = func(listener net.Listener) context.Context {
|
||||
return GlobalContext
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ func serverMain(ctx *cli.Context) {
|
|||
}
|
||||
|
||||
// Initialize users credentials and policies in background right after config has initialized.
|
||||
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient)
|
||||
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalRefreshIAMInterval)
|
||||
|
||||
initDataScanner(GlobalContext, newObject)
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -158,6 +159,27 @@ func (s *TestSuiteCommon) SetUpSuite(c *check) {
|
|||
s.secretKey = s.testServer.SecretKey
|
||||
}
|
||||
|
||||
func (s *TestSuiteCommon) RestartTestServer(c *check) {
|
||||
// Shutdown.
|
||||
s.testServer.cancel()
|
||||
s.testServer.Server.Close()
|
||||
s.testServer.Obj.Shutdown(context.Background())
|
||||
|
||||
// Restart.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
s.testServer.cancel = cancel
|
||||
s.testServer = initTestServerWithBackend(ctx, c, s.testServer, s.testServer.Obj, s.testServer.rawDiskPaths)
|
||||
if s.secure {
|
||||
s.testServer.Server.StartTLS()
|
||||
} else {
|
||||
s.testServer.Server.Start()
|
||||
}
|
||||
|
||||
s.client = s.testServer.Server.Client()
|
||||
s.endPoint = s.testServer.Server.URL
|
||||
}
|
||||
|
||||
func (s *TestSuiteCommon) TearDownSuite(c *check) {
|
||||
s.testServer.Stop()
|
||||
}
|
||||
|
|
|
@ -37,9 +37,6 @@ const (
|
|||
// Global service signal channel.
|
||||
var globalServiceSignalCh chan serviceSignal
|
||||
|
||||
// GlobalServiceDoneCh - Global service done channel.
|
||||
var GlobalServiceDoneCh <-chan struct{}
|
||||
|
||||
// GlobalContext context that is canceled when server is requested to shut down.
|
||||
var GlobalContext context.Context
|
||||
|
||||
|
@ -48,7 +45,6 @@ var cancelGlobalContext context.CancelFunc
|
|||
|
||||
func initGlobalContext() {
|
||||
GlobalContext, cancelGlobalContext = context.WithCancel(context.Background())
|
||||
GlobalServiceDoneCh = GlobalContext.Done()
|
||||
globalServiceSignalCh = make(chan serviceSignal)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
|
@ -355,20 +354,12 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, sites []madmin
|
|||
|
||||
// Generate a secret key for the service account.
|
||||
var secretKey string
|
||||
{
|
||||
secretKeyBuf := make([]byte, 40)
|
||||
n, err := rand.Read(secretKeyBuf)
|
||||
if err == nil && n != 40 {
|
||||
err = fmt.Errorf("Unable to read 40 random bytes to generate secret key")
|
||||
_, secretKey, err := auth.GenerateCredentials()
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, SRError{
|
||||
Cause: err,
|
||||
Code: ErrInternalError,
|
||||
}
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, SRError{
|
||||
Cause: err,
|
||||
Code: ErrInternalError,
|
||||
}
|
||||
}
|
||||
secretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(secretKeyBuf))[:40]),
|
||||
"/", "+", -1)
|
||||
}
|
||||
|
||||
svcCred, err := globalIAMSys.NewServiceAccount(ctx, sites[selfIdx].AccessKey, nil, newServiceAccountOpts{
|
||||
|
@ -1270,9 +1261,7 @@ func (c *SiteReplicationSys) getAdminClient(ctx context.Context, deploymentID st
|
|||
}
|
||||
|
||||
func (c *SiteReplicationSys) getPeerCreds() (*auth.Credentials, error) {
|
||||
globalIAMSys.store.rlock()
|
||||
defer globalIAMSys.store.runlock()
|
||||
creds, ok := globalIAMSys.iamUsersMap[c.state.ServiceAccountAccessKey]
|
||||
creds, ok := globalIAMSys.store.GetUser(c.state.ServiceAccountAccessKey)
|
||||
if !ok {
|
||||
return nil, errors.New("site replication service account not found!")
|
||||
}
|
||||
|
|
|
@ -80,20 +80,20 @@ type FilesInfoVersions struct {
|
|||
}
|
||||
|
||||
// FileInfoVersions represent a list of versions for a given file.
|
||||
//msgp:tuple FileInfoVersions
|
||||
// The above means that any added/deleted fields are incompatible.
|
||||
type FileInfoVersions struct {
|
||||
// Name of the volume.
|
||||
Volume string
|
||||
Volume string `msg:"v,omitempty"`
|
||||
|
||||
// Name of the file.
|
||||
Name string
|
||||
|
||||
IsEmptyDir bool
|
||||
Name string `msg:"n,omitempty"`
|
||||
|
||||
// Represents the latest mod time of the
|
||||
// latest version.
|
||||
LatestModTime time.Time
|
||||
LatestModTime time.Time `msg:"lm"`
|
||||
|
||||
Versions []FileInfo
|
||||
Versions []FileInfo `msg:"vs"`
|
||||
}
|
||||
|
||||
// findVersionIndex will return the version index where the version
|
||||
|
@ -115,69 +115,74 @@ func (f *FileInfoVersions) findVersionIndex(v string) int {
|
|||
// The above means that any added/deleted fields are incompatible.
|
||||
type FileInfo struct {
|
||||
// Name of the volume.
|
||||
Volume string
|
||||
Volume string `msg:"v,omitempty"`
|
||||
|
||||
// Name of the file.
|
||||
Name string
|
||||
Name string `msg:"n,omitempty"`
|
||||
|
||||
// Version of the file.
|
||||
VersionID string
|
||||
VersionID string `msg:"vid,omitempty"`
|
||||
|
||||
// Indicates if the version is the latest
|
||||
IsLatest bool
|
||||
IsLatest bool `msg:"is"`
|
||||
|
||||
// Deleted is set when this FileInfo represents
|
||||
// a deleted marker for a versioned bucket.
|
||||
Deleted bool
|
||||
Deleted bool `msg:"del"`
|
||||
|
||||
// TransitionStatus is set to Pending/Complete for transitioned
|
||||
// entries based on state of transition
|
||||
TransitionStatus string
|
||||
TransitionStatus string `msg:"ts"`
|
||||
// TransitionedObjName is the object name on the remote tier corresponding
|
||||
// to object (version) on the source tier.
|
||||
TransitionedObjName string
|
||||
TransitionedObjName string `msg:"to"`
|
||||
// TransitionTier is the storage class label assigned to remote tier.
|
||||
TransitionTier string
|
||||
TransitionTier string `msg:"tt"`
|
||||
// TransitionVersionID stores a version ID of the object associate
|
||||
// with the remote tier.
|
||||
TransitionVersionID string
|
||||
TransitionVersionID string `msg:"tv"`
|
||||
// ExpireRestored indicates that the restored object is to be expired.
|
||||
ExpireRestored bool
|
||||
ExpireRestored bool `msg:"exp"`
|
||||
|
||||
// DataDir of the file
|
||||
DataDir string
|
||||
DataDir string `msg:"dd"`
|
||||
|
||||
// Indicates if this object is still in V1 format.
|
||||
XLV1 bool
|
||||
XLV1 bool `msg:"v1"`
|
||||
|
||||
// Date and time when the file was last modified, if Deleted
|
||||
// is 'true' this value represents when while was deleted.
|
||||
ModTime time.Time
|
||||
ModTime time.Time `msg:"mt"`
|
||||
|
||||
// Total file size.
|
||||
Size int64
|
||||
Size int64 `msg:"sz"`
|
||||
|
||||
// File mode bits.
|
||||
Mode uint32
|
||||
Mode uint32 `msg:"m"`
|
||||
|
||||
// File metadata
|
||||
Metadata map[string]string
|
||||
Metadata map[string]string `msg:"meta"`
|
||||
|
||||
// All the parts per object.
|
||||
Parts []ObjectPartInfo
|
||||
Parts []ObjectPartInfo `msg:"parts"`
|
||||
|
||||
// Erasure info for all objects.
|
||||
Erasure ErasureInfo
|
||||
Erasure ErasureInfo `msg:"ei"`
|
||||
|
||||
MarkDeleted bool // mark this version as deleted
|
||||
ReplicationState ReplicationState // Internal replication state to be passed back in ObjectInfo
|
||||
MarkDeleted bool `msg:"md"` // mark this version as deleted
|
||||
ReplicationState ReplicationState `msg:"rs"` // Internal replication state to be passed back in ObjectInfo
|
||||
|
||||
Data []byte // optionally carries object data
|
||||
Data []byte `msg:"d,allownil"` // optionally carries object data
|
||||
|
||||
NumVersions int
|
||||
SuccessorModTime time.Time
|
||||
NumVersions int `msg:"nv"`
|
||||
SuccessorModTime time.Time `msg:"smt"`
|
||||
|
||||
Fresh bool // indicates this is a first time call to write FileInfo.
|
||||
Fresh bool `msg:"fr"` // indicates this is a first time call to write FileInfo.
|
||||
|
||||
// Position of this version or object in a multi-object delete call,
|
||||
// no other caller must set this value other than multi-object delete call.
|
||||
// usage in other calls in undefined please avoid.
|
||||
Idx int `msg:"i"`
|
||||
}
|
||||
|
||||
// InlineData returns true if object contents are inlined alongside its metadata.
|
||||
|
|
|
@ -550,8 +550,8 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 24 {
|
||||
err = msgp.ArrayError{Wanted: 24, Got: zb0001}
|
||||
if zb0001 != 25 {
|
||||
err = msgp.ArrayError{Wanted: 25, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, err = dc.ReadString()
|
||||
|
@ -711,13 +711,18 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err, "Fresh")
|
||||
return
|
||||
}
|
||||
z.Idx, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Idx")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 24
|
||||
err = en.Append(0xdc, 0x0, 0x18)
|
||||
// array header, size 25
|
||||
err = en.Append(0xdc, 0x0, 0x19)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -860,14 +865,19 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "Fresh")
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Idx)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Idx")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 24
|
||||
o = append(o, 0xdc, 0x0, 0x18)
|
||||
// array header, size 25
|
||||
o = append(o, 0xdc, 0x0, 0x19)
|
||||
o = msgp.AppendString(o, z.Volume)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
o = msgp.AppendString(o, z.VersionID)
|
||||
|
@ -911,6 +921,7 @@ func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
|||
o = msgp.AppendInt(o, z.NumVersions)
|
||||
o = msgp.AppendTime(o, z.SuccessorModTime)
|
||||
o = msgp.AppendBool(o, z.Fresh)
|
||||
o = msgp.AppendInt(o, z.Idx)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -922,8 +933,8 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 24 {
|
||||
err = msgp.ArrayError{Wanted: 24, Got: zb0001}
|
||||
if zb0001 != 25 {
|
||||
err = msgp.ArrayError{Wanted: 25, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, bts, err = msgp.ReadStringBytes(bts)
|
||||
|
@ -1083,6 +1094,11 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err, "Fresh")
|
||||
return
|
||||
}
|
||||
z.Idx, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Idx")
|
||||
return
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
@ -1100,87 +1116,62 @@ func (z *FileInfo) Msgsize() (s int) {
|
|||
for za0003 := range z.Parts {
|
||||
s += z.Parts[za0003].Msgsize()
|
||||
}
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *FileInfoVersions) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
zb0001, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if zb0001 != 4 {
|
||||
err = msgp.ArrayError{Wanted: 4, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Volume")
|
||||
return
|
||||
}
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
z.LatestModTime, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LatestModTime")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
if cap(z.Versions) >= int(zb0002) {
|
||||
z.Versions = (z.Versions)[:zb0002]
|
||||
} else {
|
||||
z.Versions = make([]FileInfo, zb0002)
|
||||
}
|
||||
for za0001 := range z.Versions {
|
||||
err = z.Versions[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
err = msgp.WrapError(err, "Versions", za0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Volume":
|
||||
z.Volume, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Volume")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "IsEmptyDir":
|
||||
z.IsEmptyDir, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "IsEmptyDir")
|
||||
return
|
||||
}
|
||||
case "LatestModTime":
|
||||
z.LatestModTime, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LatestModTime")
|
||||
return
|
||||
}
|
||||
case "Versions":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
if cap(z.Versions) >= int(zb0002) {
|
||||
z.Versions = (z.Versions)[:zb0002]
|
||||
} else {
|
||||
z.Versions = make([]FileInfo, zb0002)
|
||||
}
|
||||
for za0001 := range z.Versions {
|
||||
err = z.Versions[za0001].DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 5
|
||||
// write "Volume"
|
||||
err = en.Append(0x85, 0xa6, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65)
|
||||
// array header, size 4
|
||||
err = en.Append(0x94)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -1189,41 +1180,16 @@ func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "Volume")
|
||||
return
|
||||
}
|
||||
// write "Name"
|
||||
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "IsEmptyDir"
|
||||
err = en.Append(0xaa, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x44, 0x69, 0x72)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.IsEmptyDir)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "IsEmptyDir")
|
||||
return
|
||||
}
|
||||
// write "LatestModTime"
|
||||
err = en.Append(0xad, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.LatestModTime)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LatestModTime")
|
||||
return
|
||||
}
|
||||
// write "Versions"
|
||||
err = en.Append(0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Versions)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
|
@ -1242,21 +1208,11 @@ func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *FileInfoVersions) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 5
|
||||
// string "Volume"
|
||||
o = append(o, 0x85, 0xa6, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65)
|
||||
// array header, size 4
|
||||
o = append(o, 0x94)
|
||||
o = msgp.AppendString(o, z.Volume)
|
||||
// string "Name"
|
||||
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "IsEmptyDir"
|
||||
o = append(o, 0xaa, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x44, 0x69, 0x72)
|
||||
o = msgp.AppendBool(o, z.IsEmptyDir)
|
||||
// string "LatestModTime"
|
||||
o = append(o, 0xad, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65)
|
||||
o = msgp.AppendTime(o, z.LatestModTime)
|
||||
// string "Versions"
|
||||
o = append(o, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Versions)))
|
||||
for za0001 := range z.Versions {
|
||||
o, err = z.Versions[za0001].MarshalMsg(o)
|
||||
|
@ -1270,72 +1226,48 @@ func (z *FileInfoVersions) MarshalMsg(b []byte) (o []byte, err error) {
|
|||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *FileInfoVersions) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if zb0001 != 4 {
|
||||
err = msgp.ArrayError{Wanted: 4, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Volume")
|
||||
return
|
||||
}
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
z.LatestModTime, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LatestModTime")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
if cap(z.Versions) >= int(zb0002) {
|
||||
z.Versions = (z.Versions)[:zb0002]
|
||||
} else {
|
||||
z.Versions = make([]FileInfo, zb0002)
|
||||
}
|
||||
for za0001 := range z.Versions {
|
||||
bts, err = z.Versions[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
err = msgp.WrapError(err, "Versions", za0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Volume":
|
||||
z.Volume, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Volume")
|
||||
return
|
||||
}
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "IsEmptyDir":
|
||||
z.IsEmptyDir, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "IsEmptyDir")
|
||||
return
|
||||
}
|
||||
case "LatestModTime":
|
||||
z.LatestModTime, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LatestModTime")
|
||||
return
|
||||
}
|
||||
case "Versions":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
if cap(z.Versions) >= int(zb0002) {
|
||||
z.Versions = (z.Versions)[:zb0002]
|
||||
} else {
|
||||
z.Versions = make([]FileInfo, zb0002)
|
||||
}
|
||||
for za0001 := range z.Versions {
|
||||
bts, err = z.Versions[za0001].UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
|
@ -1343,7 +1275,7 @@ func (z *FileInfoVersions) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *FileInfoVersions) Msgsize() (s int) {
|
||||
s = 1 + 7 + msgp.StringPrefixSize + len(z.Volume) + 5 + msgp.StringPrefixSize + len(z.Name) + 11 + msgp.BoolSize + 14 + msgp.TimeSize + 9 + msgp.ArrayHeaderSize
|
||||
s = 1 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.TimeSize + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Versions {
|
||||
s += z.Versions[za0001].Msgsize()
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ type StorageAPI interface {
|
|||
|
||||
// Metadata operations
|
||||
DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) error
|
||||
DeleteVersions(ctx context.Context, volume string, versions []FileInfo) []error
|
||||
DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error
|
||||
WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) error
|
||||
UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo) error
|
||||
ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (FileInfo, error)
|
||||
|
|
|
@ -590,7 +590,7 @@ func (client *storageRESTClient) Delete(ctx context.Context, volume string, path
|
|||
}
|
||||
|
||||
// DeleteVersions - deletes list of specified versions if present
|
||||
func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) (errs []error) {
|
||||
func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) (errs []error) {
|
||||
if len(versions) == 0 {
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
package cmd
|
||||
|
||||
const (
|
||||
storageRESTVersion = "v40" // Add ReplicationState field
|
||||
storageRESTVersion = "v41" // Optimized DeleteVersions API
|
||||
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
||||
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
||||
)
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"net/http"
|
||||
"os/user"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -37,10 +38,11 @@ import (
|
|||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
|
||||
jwtreq "github.com/golang-jwt/jwt/request"
|
||||
jwtreq "github.com/golang-jwt/jwt/v4/request"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/config"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/net"
|
||||
|
@ -60,7 +62,6 @@ func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error)
|
|||
w.WriteHeader(http.StatusForbidden)
|
||||
}
|
||||
w.Write([]byte(err.Error()))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DefaultSkewTime - skew time is 15 minutes between minio peers.
|
||||
|
@ -156,7 +157,6 @@ func (s *storageRESTServer) DiskInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
if err != nil {
|
||||
info.Error = err.Error()
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(r.Context(), msgp.Encode(w, &info))
|
||||
}
|
||||
|
||||
|
@ -178,6 +178,12 @@ func (s *storageRESTServer) NSScannerHandler(w http.ResponseWriter, r *http.Requ
|
|||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
resp := streamHTTPResponse(w)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
debug.PrintStack()
|
||||
resp.CloseWithError(fmt.Errorf("panic: %v", r))
|
||||
}
|
||||
}()
|
||||
respW := msgp.NewWriter(resp)
|
||||
|
||||
// Collect updates, stream them before the full cache is sent.
|
||||
|
@ -255,7 +261,6 @@ func (s *storageRESTServer) ListVolsHandler(w http.ResponseWriter, r *http.Reque
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(r.Context(), msgp.Encode(w, VolsInfo(infos)))
|
||||
}
|
||||
|
||||
|
@ -271,7 +276,6 @@ func (s *storageRESTServer) StatVolHandler(w http.ResponseWriter, r *http.Reques
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(r.Context(), msgp.Encode(w, &info))
|
||||
}
|
||||
|
||||
|
@ -501,9 +505,10 @@ func (s *storageRESTServer) ReadAllHandler(w http.ResponseWriter, r *http.Reques
|
|||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
// Reuse after return.
|
||||
defer metaDataPoolPut(buf)
|
||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
|
||||
w.Write(buf)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ReadFileHandler - read section of a file.
|
||||
|
@ -540,6 +545,7 @@ func (s *storageRESTServer) ReadFileHandler(w http.ResponseWriter, r *http.Reque
|
|||
verifier = NewBitrotVerifier(BitrotAlgorithmFromString(vars[storageRESTBitrotAlgo]), hash)
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
defer metaDataPoolPut(buf) // Reuse if we can.
|
||||
_, err = s.storage.ReadFile(r.Context(), volume, filePath, int64(offset), buf, verifier)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
|
@ -547,7 +553,6 @@ func (s *storageRESTServer) ReadFileHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
|
||||
w.Write(buf)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ReadFileHandler - read section of a file.
|
||||
|
@ -577,13 +582,12 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
|
|||
defer rc.Close()
|
||||
|
||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(length))
|
||||
if _, err = io.Copy(w, rc); err != nil {
|
||||
if _, err = xioutil.Copy(w, rc); err != nil {
|
||||
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
||||
logger.LogIf(r.Context(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ListDirHandler - list a directory.
|
||||
|
@ -606,7 +610,6 @@ func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
gob.NewEncoder(w).Encode(&entries)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteFileHandler - delete a file.
|
||||
|
@ -648,7 +651,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
versions := make([]FileInfo, totalVersions)
|
||||
versions := make([]FileInfoVersions, totalVersions)
|
||||
decoder := msgp.NewReader(r.Body)
|
||||
for i := 0; i < totalVersions; i++ {
|
||||
dst := &versions[i]
|
||||
|
@ -671,7 +674,6 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
|
|||
}
|
||||
}
|
||||
encoder.Encode(dErrsResp)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RenameDataHandler - renames a meta object and data dir to destination.
|
||||
|
@ -960,13 +962,22 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse {
|
|||
return &h
|
||||
}
|
||||
|
||||
var poolBuf8k = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, 8192)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
// waitForHTTPStream will wait for responses where
|
||||
// streamHTTPResponse has been used.
|
||||
// The returned reader contains the payload and must be closed if no error is returned.
|
||||
func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
||||
var tmp [1]byte
|
||||
// 8K copy buffer, reused for less allocs...
|
||||
var buf [8 << 10]byte
|
||||
bufp := poolBuf8k.Get().(*[]byte)
|
||||
buf := *bufp
|
||||
defer poolBuf8k.Put(bufp)
|
||||
for {
|
||||
_, err := io.ReadFull(respBody, tmp[:])
|
||||
if err != nil {
|
||||
|
@ -976,7 +987,7 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|||
switch tmp[0] {
|
||||
case 0:
|
||||
// 0 is unbuffered, copy the rest.
|
||||
_, err := io.CopyBuffer(w, respBody, buf[:])
|
||||
_, err := io.CopyBuffer(w, respBody, buf)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
@ -995,7 +1006,7 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|||
return err
|
||||
}
|
||||
length := binary.LittleEndian.Uint32(tmp[:])
|
||||
_, err = io.CopyBuffer(w, io.LimitReader(respBody, int64(length)), buf[:])
|
||||
_, err = io.CopyBuffer(w, io.LimitReader(respBody, int64(length)), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1043,7 +1054,6 @@ func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Req
|
|||
vresp.Err = StorageErr(err.Error())
|
||||
}
|
||||
encoder.Encode(vresp)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// A single function to write certain errors to be fatal
|
||||
|
|
|
@ -234,14 +234,16 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
m := make(map[string]interface{})
|
||||
m[expClaim], err = openid.GetDefaultExpiration(r.Form.Get(stsDurationSeconds))
|
||||
duration, err := openid.GetDefaultExpiration(r.Form.Get(stsDurationSeconds))
|
||||
if err != nil {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
|
||||
return
|
||||
}
|
||||
|
||||
m := map[string]interface{}{
|
||||
expClaim: UTCNow().Add(duration).Unix(),
|
||||
}
|
||||
|
||||
policies, err := globalIAMSys.PolicyDBGet(user.AccessKey, false)
|
||||
if err != nil {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
|
||||
|
@ -742,6 +744,31 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h
|
|||
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidClientCertificate, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Technically, there is no security argument for verifying the key usage
|
||||
// when we don't verify that the certificate has been issued by a trusted CA.
|
||||
// Any client can create a certificate with arbitrary key usage settings.
|
||||
//
|
||||
// However, this check ensures that a certificate with an invalid key usage
|
||||
// gets rejected even when we skip certificate verification. This helps
|
||||
// clients detect malformed certificates during testing instead of e.g.
|
||||
// a self-signed certificate that works while a comparable certificate
|
||||
// issued by a trusted CA fails due to the MinIO server being less strict
|
||||
// w.r.t. key usage verification.
|
||||
//
|
||||
// Basically, MinIO is more consistent (from a client perspective) when
|
||||
// we verify the key usage all the time.
|
||||
var validKeyUsage bool
|
||||
for _, usage := range certificate.ExtKeyUsage {
|
||||
if usage == x509.ExtKeyUsageAny || usage == x509.ExtKeyUsageClientAuth {
|
||||
validKeyUsage = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !validKeyUsage {
|
||||
writeSTSErrorResponse(ctx, w, true, ErrSTSMissingParameter, errors.New("certificate is not valid for client authentication"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We map the X.509 subject common name to the policy. So, a client
|
||||
|
@ -773,7 +800,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h
|
|||
parentUser := "tls:" + certificate.Subject.CommonName
|
||||
|
||||
tmpCredentials, err := auth.GetNewCredentialsWithMetadata(map[string]interface{}{
|
||||
expClaim: time.Now().UTC().Add(expiry).Unix(),
|
||||
expClaim: UTCNow().Add(expiry).Unix(),
|
||||
parentClaim: parentUser,
|
||||
subClaim: certificate.Subject.CommonName,
|
||||
audClaim: certificate.Subject.Organization,
|
||||
|
|
340
cmd/sts-handlers_test.go
Normal file
340
cmd/sts-handlers_test.go
Normal file
|
@ -0,0 +1,340 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
cr "github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) {
|
||||
suite.SetUpSuite(c)
|
||||
suite.TestSTS(c)
|
||||
suite.TearDownSuite(c)
|
||||
}
|
||||
|
||||
func TestIAMInternalIDPSTSServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
runAllIAMSTSTests(testCase, &check{t, testCase.serverType})
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestSTS(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
// confirm that the user is able to access the bucket
|
||||
uClient := s.getUserClient(c, accessKey, secretKey, "")
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
||||
assumeRole := cr.STSAssumeRole{
|
||||
Client: s.TestSuiteCommon.client,
|
||||
STSEndpoint: s.endPoint,
|
||||
Options: cr.STSAssumeRoleOptions{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
Location: "",
|
||||
},
|
||||
}
|
||||
|
||||
value, err := assumeRole.Retrieve()
|
||||
if err != nil {
|
||||
c.Fatalf("err calling assumeRole: %v", err)
|
||||
}
|
||||
|
||||
minioClient, err := minio.New(s.endpoint, &minio.Options{
|
||||
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
|
||||
Secure: s.secure,
|
||||
Transport: s.TestSuiteCommon.client.Transport,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Error initializing client: %v", err)
|
||||
}
|
||||
|
||||
// Validate that the client from sts creds can access the bucket.
|
||||
c.mustListObjects(ctx, minioClient, bucket)
|
||||
|
||||
// Validate that the client cannot remove any objects
|
||||
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
|
||||
if err.Error() != "Access Denied." {
|
||||
c.Fatalf("unexpected non-access-denied err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) GetLDAPServer(c *check) string {
|
||||
return os.Getenv(EnvTestLDAPServer)
|
||||
}
|
||||
|
||||
// SetUpLDAP - expects to setup an LDAP test server using the test LDAP
|
||||
// container and canned data from https://github.com/minio/minio-ldap-testing
|
||||
func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
configCmds := []string{
|
||||
"identity_ldap",
|
||||
fmt.Sprintf("server_addr=%s", serverAddr),
|
||||
"server_insecure=on",
|
||||
"lookup_bind_dn=cn=admin,dc=min,dc=io",
|
||||
"lookup_bind_password=admin",
|
||||
"user_dn_search_base_dn=dc=min,dc=io",
|
||||
"user_dn_search_filter=(uid=%s)",
|
||||
"group_search_base_dn=ou=swengg,dc=min,dc=io",
|
||||
"group_search_filter=(&(objectclass=groupofnames)(member=%d))",
|
||||
}
|
||||
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to setup LDAP for tests: %v", err)
|
||||
}
|
||||
|
||||
s.RestartIAMSuite(c)
|
||||
}
|
||||
|
||||
const (
|
||||
EnvTestLDAPServer = "LDAP_TEST_SERVER"
|
||||
)
|
||||
|
||||
func TestIAMWithLDAPServerSuite(t *testing.T) {
|
||||
baseTestCases := []TestSuiteCommon{
|
||||
// Init and run test on FS backend with signature v4.
|
||||
{serverType: "FS", signer: signerV4},
|
||||
// Init and run test on FS backend, with tls enabled.
|
||||
{serverType: "FS", signer: signerV4, secure: true},
|
||||
// Init and run test on Erasure backend.
|
||||
{serverType: "Erasure", signer: signerV4},
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
testCases := []*TestSuiteIAM{}
|
||||
for _, bt := range baseTestCases {
|
||||
testCases = append(testCases,
|
||||
newTestSuiteIAM(bt, false),
|
||||
newTestSuiteIAM(bt, true),
|
||||
)
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
etcdStr := ""
|
||||
if testCase.withEtcdBackend {
|
||||
etcdStr = " (with etcd backend)"
|
||||
}
|
||||
t.Run(
|
||||
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
|
||||
func(t *testing.T) {
|
||||
c := &check{t, testCase.serverType}
|
||||
suite := testCase
|
||||
|
||||
ldapServer := os.Getenv(EnvTestLDAPServer)
|
||||
if ldapServer == "" {
|
||||
c.Skip("Skipping LDAP test as no LDAP server is provided.")
|
||||
}
|
||||
|
||||
suite.SetUpSuite(c)
|
||||
suite.SetUpLDAP(c, ldapServer)
|
||||
suite.TestLDAPSTS(c)
|
||||
suite.TearDownSuite(c)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bucket := getRandomBucketName()
|
||||
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket create error: %v", err)
|
||||
}
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
ldapID := cr.LDAPIdentity{
|
||||
Client: s.TestSuiteCommon.client,
|
||||
STSEndpoint: s.endPoint,
|
||||
LDAPUsername: "dillon",
|
||||
LDAPPassword: "dillon",
|
||||
}
|
||||
|
||||
_, err = ldapID.Retrieve()
|
||||
if err == nil {
|
||||
c.Fatalf("Expected to fail to create a user with no associated policy!")
|
||||
}
|
||||
|
||||
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
value, err := ldapID.Retrieve()
|
||||
if err != nil {
|
||||
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
|
||||
}
|
||||
|
||||
minioClient, err := minio.New(s.endpoint, &minio.Options{
|
||||
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
|
||||
Secure: s.secure,
|
||||
Transport: s.TestSuiteCommon.client.Transport,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Error initializing client: %v", err)
|
||||
}
|
||||
|
||||
// Validate that the client from sts creds can access the bucket.
|
||||
c.mustListObjects(ctx, minioClient, bucket)
|
||||
|
||||
// Validate that the client cannot remove any objects
|
||||
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
|
||||
if err.Error() != "Access Denied." {
|
||||
c.Fatalf("unexpected non-access-denied err: %v", err)
|
||||
}
|
||||
|
||||
// Remove the policy assignment on the user DN:
|
||||
err = s.adm.SetPolicy(ctx, "", userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to remove policy setting: %v", err)
|
||||
}
|
||||
|
||||
_, err = ldapID.Retrieve()
|
||||
if err == nil {
|
||||
c.Fatalf("Expected to fail to create a user with no associated policy!")
|
||||
}
|
||||
|
||||
// Set policy via group and validate policy assignment.
|
||||
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set group policy: %v", err)
|
||||
}
|
||||
|
||||
value, err = ldapID.Retrieve()
|
||||
if err != nil {
|
||||
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
|
||||
}
|
||||
|
||||
minioClient, err = minio.New(s.endpoint, &minio.Options{
|
||||
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
|
||||
Secure: s.secure,
|
||||
Transport: s.TestSuiteCommon.client.Transport,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Error initializing client: %v", err)
|
||||
}
|
||||
|
||||
// Validate that the client from sts creds can access the bucket.
|
||||
c.mustListObjects(ctx, minioClient, bucket)
|
||||
|
||||
// Validate that the client cannot remove any objects
|
||||
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
|
||||
c.Assert(err.Error(), "Access Denied.")
|
||||
}
|
|
@ -292,13 +292,14 @@ func isSameType(obj1, obj2 interface{}) bool {
|
|||
// s := StartTestServer(t,"Erasure")
|
||||
// defer s.Stop()
|
||||
type TestServer struct {
|
||||
Root string
|
||||
Disks EndpointServerPools
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
Server *httptest.Server
|
||||
Obj ObjectLayer
|
||||
cancel context.CancelFunc
|
||||
Root string
|
||||
Disks EndpointServerPools
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
Server *httptest.Server
|
||||
Obj ObjectLayer
|
||||
cancel context.CancelFunc
|
||||
rawDiskPaths []string
|
||||
}
|
||||
|
||||
// UnstartedTestServer - Configures a temp FS/Erasure backend,
|
||||
|
@ -314,16 +315,22 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set the server configuration.
|
||||
// set new server configuration.
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
return initTestServerWithBackend(ctx, t, testServer, objLayer, disks)
|
||||
}
|
||||
|
||||
// initializes a test server with the given object layer and disks.
|
||||
func initTestServerWithBackend(ctx context.Context, t TestErrHandler, testServer TestServer, objLayer ObjectLayer, disks []string) TestServer {
|
||||
// Test Server needs to start before formatting of disks.
|
||||
// Get credential.
|
||||
credentials := globalActiveCred
|
||||
|
||||
testServer.Obj = objLayer
|
||||
testServer.rawDiskPaths = disks
|
||||
testServer.Disks = mustGetPoolEndpoints(disks...)
|
||||
testServer.AccessKey = credentials.AccessKey
|
||||
testServer.SecretKey = credentials.SecretKey
|
||||
|
@ -334,7 +341,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||
}
|
||||
|
||||
// Run TestServer.
|
||||
testServer.Server = httptest.NewUnstartedServer(criticalErrorHandler{corsHandler(httpHandler)})
|
||||
testServer.Server = httptest.NewUnstartedServer(setCriticalErrorHandler(corsHandler(httpHandler)))
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = objLayer
|
||||
|
@ -348,9 +355,11 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||
|
||||
newAllSubsystems()
|
||||
|
||||
globalEtcdClient = nil
|
||||
|
||||
initAllSubsystems(ctx, objLayer)
|
||||
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient)
|
||||
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)
|
||||
|
||||
return testServer
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
|
@ -60,6 +61,12 @@ var (
|
|||
Message: "Invalid remote tier credentials",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
// error returned when reserved internal names are used.
|
||||
errTierReservedName = AdminError{
|
||||
Code: "XMinioAdminTierReserved",
|
||||
Message: "Cannot use reserved tier name",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
)
|
||||
|
||||
func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -92,6 +99,12 @@ func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
// Disallow remote tiers with internal storage class names
|
||||
switch cfg.Name {
|
||||
case storageclass.STANDARD, storageclass.RRS:
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTierReservedName), r.URL)
|
||||
return
|
||||
}
|
||||
// Refresh from the disk in case we had missed notifications about edits from peers.
|
||||
if err := globalTierConfigMgr.Reload(ctx, objAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
@ -191,3 +204,33 @@ func (api adminAPIHandlers) EditTierHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
func (api adminAPIHandlers) TierStatsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "TierStats")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListTierAction)
|
||||
if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dui, err := loadDataUsageFromBackend(ctx, objAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(dui.tierStats())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
|
|
@ -46,6 +46,8 @@ const (
|
|||
tierConfigFile = "tier-config.bin"
|
||||
tierConfigFormat = 1
|
||||
tierConfigVersion = 1
|
||||
|
||||
minioHotTier = "STANDARD"
|
||||
)
|
||||
|
||||
// tierConfigPath refers to remote tier config object name
|
||||
|
@ -85,7 +87,6 @@ func (config *TierConfigMgr) Add(ctx context.Context, tier madmin.TierConfig) er
|
|||
defer config.Unlock()
|
||||
|
||||
// check if tier name is in all caps
|
||||
|
||||
tierName := tier.Name
|
||||
if tierName != strings.ToUpper(tierName) {
|
||||
return errTierNameNotUppercase
|
||||
|
|
|
@ -81,6 +81,9 @@ var errGroupNotEmpty = errors.New("Specified group is not empty - cannot remove
|
|||
// error returned in IAM subsystem when policy doesn't exist.
|
||||
var errNoSuchPolicy = errors.New("Specified canned policy does not exist")
|
||||
|
||||
// error returned when policy to be deleted is in use.
|
||||
var errPolicyInUse = errors.New("Specified policy is in use and cannot be deleted.")
|
||||
|
||||
// error returned in IAM subsystem when an external users systems is configured.
|
||||
var errIAMActionNotAllowed = errors.New("Specified IAM action is not allowed")
|
||||
|
||||
|
|
219
cmd/utils.go
219
cmd/utils.go
|
@ -973,73 +973,74 @@ func auditLogInternal(ctx context.Context, bucket, object string, opts AuditLogO
|
|||
}
|
||||
|
||||
// Get the max throughput and iops numbers.
|
||||
func speedTest(ctx context.Context, throughputSize, iopsSize int, concurrencyStart int, duration time.Duration, autotune bool) (madmin.SpeedTestResult, error) {
|
||||
var result madmin.SpeedTestResult
|
||||
func speedTest(ctx context.Context, throughputSize, concurrencyStart int, duration time.Duration, autotune bool) chan madmin.SpeedTestResult {
|
||||
ch := make(chan madmin.SpeedTestResult, 1)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return result, errServerNotInitialized
|
||||
}
|
||||
|
||||
concurrency := concurrencyStart
|
||||
|
||||
throughputHighestGet := uint64(0)
|
||||
throughputHighestPut := uint64(0)
|
||||
var throughputHighestPutResults []SpeedtestResult
|
||||
var throughputHighestGetResults []SpeedtestResult
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If the client got disconnected stop the speedtest.
|
||||
return result, errUnexpected
|
||||
default:
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
results := globalNotificationSys.Speedtest(ctx, throughputSize, concurrency, duration)
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Endpoint < results[j].Endpoint
|
||||
})
|
||||
totalPut := uint64(0)
|
||||
totalGet := uint64(0)
|
||||
for _, result := range results {
|
||||
totalPut += result.Uploads
|
||||
totalGet += result.Downloads
|
||||
}
|
||||
if totalPut < throughputHighestPut && totalGet < throughputHighestGet {
|
||||
break
|
||||
concurrency := concurrencyStart
|
||||
|
||||
throughputHighestGet := uint64(0)
|
||||
throughputHighestPut := uint64(0)
|
||||
var throughputHighestResults []SpeedtestResult
|
||||
|
||||
sendResult := func() {
|
||||
var result madmin.SpeedTestResult
|
||||
|
||||
durationSecs := duration.Seconds()
|
||||
|
||||
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
||||
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
|
||||
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
||||
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
|
||||
for i := 0; i < len(throughputHighestResults); i++ {
|
||||
errStr := ""
|
||||
if throughputHighestResults[i].Error != "" {
|
||||
errStr = throughputHighestResults[i].Error
|
||||
}
|
||||
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
||||
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
||||
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
}
|
||||
|
||||
numDisks := 0
|
||||
if pools, ok := objAPI.(*erasureServerPools); ok {
|
||||
for _, set := range pools.serverPools {
|
||||
numDisks = set.setCount * set.setDriveCount
|
||||
}
|
||||
}
|
||||
result.Disks = numDisks
|
||||
result.Servers = len(globalNotificationSys.peerClients) + 1
|
||||
result.Version = Version
|
||||
result.Size = throughputSize
|
||||
result.Concurrent = concurrency
|
||||
|
||||
ch <- result
|
||||
}
|
||||
|
||||
if totalPut > throughputHighestPut {
|
||||
throughputHighestPut = totalPut
|
||||
throughputHighestPutResults = results
|
||||
}
|
||||
if totalGet > throughputHighestGet {
|
||||
throughputHighestGet = totalGet
|
||||
throughputHighestGetResults = results
|
||||
}
|
||||
if !autotune {
|
||||
break
|
||||
}
|
||||
// Try with a higher concurrency to see if we get better throughput
|
||||
concurrency += (concurrency + 1) / 2
|
||||
}
|
||||
|
||||
concurrency = concurrencyStart
|
||||
iopsHighestPut := uint64(0)
|
||||
iopsHighestGet := uint64(0)
|
||||
var iopsHighestPutResults []SpeedtestResult
|
||||
var iopsHighestGetResults []SpeedtestResult
|
||||
|
||||
if autotune {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If the client got disconnected stop the speedtest.
|
||||
return result, errUnexpected
|
||||
return
|
||||
default:
|
||||
}
|
||||
results := globalNotificationSys.Speedtest(ctx, iopsSize, concurrency, duration)
|
||||
|
||||
results := globalNotificationSys.Speedtest(ctx, throughputSize, concurrency, duration)
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Endpoint < results[j].Endpoint
|
||||
})
|
||||
|
@ -1049,85 +1050,49 @@ func speedTest(ctx context.Context, throughputSize, iopsSize int, concurrencySta
|
|||
totalPut += result.Uploads
|
||||
totalGet += result.Downloads
|
||||
}
|
||||
if totalPut < iopsHighestPut && totalGet < iopsHighestGet {
|
||||
|
||||
if totalGet < throughputHighestGet {
|
||||
// Following check is for situations
|
||||
// when Writes() scale higher than Reads()
|
||||
// - practically speaking this never happens
|
||||
// and should never happen - however it has
|
||||
// been seen recently due to hardware issues
|
||||
// causes Reads() to go slower than Writes().
|
||||
//
|
||||
// Send such results anyways as this shall
|
||||
// expose a problem underneath.
|
||||
if totalPut > throughputHighestPut {
|
||||
throughputHighestResults = results
|
||||
throughputHighestPut = totalPut
|
||||
// let the client see lower value as well
|
||||
throughputHighestGet = totalGet
|
||||
}
|
||||
sendResult()
|
||||
break
|
||||
}
|
||||
if totalPut > iopsHighestPut {
|
||||
iopsHighestPut = totalPut
|
||||
iopsHighestPutResults = results
|
||||
|
||||
doBreak := false
|
||||
if float64(totalGet-throughputHighestGet)/float64(totalGet) < 0.025 {
|
||||
doBreak = true
|
||||
}
|
||||
if totalGet > iopsHighestGet {
|
||||
iopsHighestGet = totalGet
|
||||
iopsHighestGetResults = results
|
||||
|
||||
throughputHighestGet = totalGet
|
||||
throughputHighestResults = results
|
||||
throughputHighestPut = totalPut
|
||||
|
||||
if doBreak {
|
||||
sendResult()
|
||||
break
|
||||
}
|
||||
|
||||
if !autotune {
|
||||
sendResult()
|
||||
break
|
||||
}
|
||||
sendResult()
|
||||
// Try with a higher concurrency to see if we get better throughput
|
||||
concurrency += (concurrency + 1) / 2
|
||||
}
|
||||
} else {
|
||||
iopsHighestPut = throughputHighestPut
|
||||
iopsHighestGet = throughputHighestGet
|
||||
iopsHighestPutResults = throughputHighestPutResults
|
||||
iopsHighestGetResults = throughputHighestGetResults
|
||||
}
|
||||
|
||||
if len(throughputHighestPutResults) != len(iopsHighestPutResults) {
|
||||
return result, errors.New("throughput and iops differ in number of nodes")
|
||||
}
|
||||
|
||||
if len(throughputHighestGetResults) != len(iopsHighestGetResults) {
|
||||
return result, errors.New("throughput and iops differ in number of nodes")
|
||||
}
|
||||
|
||||
durationSecs := duration.Seconds()
|
||||
|
||||
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
||||
result.PUTStats.ObjectsPerSec = iopsHighestPut / uint64(iopsSize) / uint64(durationSecs)
|
||||
for i := 0; i < len(throughputHighestPutResults); i++ {
|
||||
errStr := ""
|
||||
if throughputHighestPutResults[i].Error != "" {
|
||||
errStr = throughputHighestPutResults[i].Error
|
||||
}
|
||||
if iopsHighestPutResults[i].Error != "" {
|
||||
errStr = iopsHighestPutResults[i].Error
|
||||
}
|
||||
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestPutResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestPutResults[i].Uploads / uint64(durationSecs),
|
||||
ObjectsPerSec: iopsHighestPutResults[i].Uploads / uint64(iopsSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
}
|
||||
|
||||
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
||||
result.GETStats.ObjectsPerSec = iopsHighestGet / uint64(iopsSize) / uint64(durationSecs)
|
||||
for i := 0; i < len(throughputHighestGetResults); i++ {
|
||||
errStr := ""
|
||||
if throughputHighestGetResults[i].Error != "" {
|
||||
errStr = throughputHighestGetResults[i].Error
|
||||
}
|
||||
if iopsHighestGetResults[i].Error != "" {
|
||||
errStr = iopsHighestGetResults[i].Error
|
||||
}
|
||||
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestGetResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestGetResults[i].Downloads / uint64(durationSecs),
|
||||
ObjectsPerSec: iopsHighestGetResults[i].Downloads / uint64(iopsSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
}
|
||||
|
||||
numDisks := 0
|
||||
if pools, ok := objAPI.(*erasureServerPools); ok {
|
||||
for _, set := range pools.serverPools {
|
||||
numDisks = set.setCount * set.setDriveCount
|
||||
}
|
||||
}
|
||||
result.Disks = numDisks
|
||||
result.Servers = len(globalNotificationSys.peerClients) + 1
|
||||
result.Version = Version
|
||||
|
||||
return result, nil
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func (az *warmBackendAzure) getDest(object string) string {
|
|||
}
|
||||
func (az *warmBackendAzure) tier() azblob.AccessTierType {
|
||||
for _, t := range azblob.PossibleAccessTierTypeValues() {
|
||||
if strings.ToLower(az.StorageClass) == strings.ToLower(string(t)) {
|
||||
if strings.EqualFold(az.StorageClass, string(t)) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ import (
|
|||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
)
|
||||
|
||||
type warmBackendGCS struct {
|
||||
|
@ -54,7 +56,7 @@ func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader,
|
|||
if gcs.StorageClass != "" {
|
||||
w.ObjectAttrs.StorageClass = gcs.StorageClass
|
||||
}
|
||||
if _, err := io.Copy(w, data); err != nil {
|
||||
if _, err := xioutil.Copy(w, data); err != nil {
|
||||
return "", gcsToObjectError(err, gcs.Bucket, key)
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,10 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
|
|||
var empty bytes.Reader
|
||||
rv, err := w.Put(ctx, probeObject, &empty, 0)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case BackendDown:
|
||||
return err
|
||||
}
|
||||
return tierPermErr{
|
||||
Op: tierPut,
|
||||
Err: err,
|
||||
|
@ -58,6 +62,10 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
|
|||
|
||||
_, err = w.Get(ctx, probeObject, rv, WarmBackendGetOpts{})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case BackendDown:
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case isErrBucketNotFound(err):
|
||||
return errTierBucketNotFound
|
||||
|
@ -72,6 +80,10 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
|
|||
}
|
||||
|
||||
if err = w.Remove(ctx, probeObject, rv); err != nil {
|
||||
switch err.(type) {
|
||||
case BackendDown:
|
||||
return err
|
||||
}
|
||||
return tierPermErr{
|
||||
Op: tierDelete,
|
||||
Err: err,
|
||||
|
|
|
@ -421,8 +421,8 @@ func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path s
|
|||
|
||||
// DeleteVersions deletes slice of versions, it can be same object
|
||||
// or multiple objects.
|
||||
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) (errs []error) {
|
||||
// Mererly for tracing storage
|
||||
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) (errs []error) {
|
||||
// Merely for tracing storage
|
||||
path := ""
|
||||
if len(versions) > 0 {
|
||||
path = versions[0].Name
|
||||
|
|
|
@ -463,6 +463,10 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
|||
return sizeSummary{}, errSkipFile
|
||||
}
|
||||
sizeS := sizeSummary{}
|
||||
var noTiers bool
|
||||
if noTiers = globalTierConfigMgr.Empty(); !noTiers {
|
||||
sizeS.tiers = make(map[string]tierStats)
|
||||
}
|
||||
atomic.AddUint64(&globalScannerStats.accTotalObjects, 1)
|
||||
for _, version := range fivs.Versions {
|
||||
atomic.AddUint64(&globalScannerStats.accTotalVersions, 1)
|
||||
|
@ -472,6 +476,21 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
|||
sizeS.versions++
|
||||
}
|
||||
sizeS.totalSize += sz
|
||||
|
||||
// Skip tier accounting if,
|
||||
// 1. no tiers configured
|
||||
// 2. object version is a delete-marker or a free-version
|
||||
// tracking deleted transitioned objects
|
||||
switch {
|
||||
case noTiers, oi.DeleteMarker, oi.TransitionedObject.FreeVersion:
|
||||
|
||||
continue
|
||||
}
|
||||
tier := minioHotTier
|
||||
if oi.TransitionedObject.Status == lifecycle.TransitionComplete {
|
||||
tier = oi.TransitionedObject.Tier
|
||||
}
|
||||
sizeS.tiers[tier] = sizeS.tiers[tier].add(oi.tierStats())
|
||||
}
|
||||
return sizeS, nil
|
||||
})
|
||||
|
@ -802,13 +821,102 @@ func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count i
|
|||
return entries, nil
|
||||
}
|
||||
|
||||
func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis ...FileInfo) error {
|
||||
buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile))
|
||||
if err != nil {
|
||||
if err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
metaDataPoolPut(buf) // Never used, return it
|
||||
|
||||
buf, err = s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFileV1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return errFileNotFound
|
||||
}
|
||||
|
||||
volumeDir, err := s.getVolDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isXL2V1Format(buf) {
|
||||
// Delete the meta file, if there are no more versions the
|
||||
// top level parent is automatically removed.
|
||||
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true)
|
||||
}
|
||||
|
||||
var xlMeta xlMetaV2
|
||||
if err = xlMeta.Load(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
dataDir string
|
||||
lastVersion bool
|
||||
)
|
||||
|
||||
for _, fi := range fis {
|
||||
dataDir, lastVersion, err = xlMeta.DeleteVersion(fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dataDir != "" {
|
||||
versionID := fi.VersionID
|
||||
if versionID == "" {
|
||||
versionID = nullVersionID
|
||||
}
|
||||
// PR #11758 used DataDir, preserve it
|
||||
// for users who might have used master
|
||||
// branch
|
||||
if !xlMeta.data.remove(versionID, dataDir) {
|
||||
filePath := pathJoin(volumeDir, path, dataDir)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s.moveToTrash(filePath, true); err != nil {
|
||||
if err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !lastVersion {
|
||||
buf, err = xlMeta.AppendTo(metaDataPoolGet())
|
||||
defer metaDataPoolPut(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
|
||||
}
|
||||
|
||||
// Move xl.meta to trash
|
||||
filePath := pathJoin(volumeDir, path, xlStorageFormatFile)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.moveToTrash(filePath, false)
|
||||
if err == nil || err == errFileNotFound {
|
||||
s.deleteFile(volumeDir, pathJoin(volumeDir, path), false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteVersions deletes slice of versions, it can be same object
|
||||
// or multiple objects.
|
||||
func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) []error {
|
||||
func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error {
|
||||
errs := make([]error, len(versions))
|
||||
|
||||
for i, version := range versions {
|
||||
if err := s.DeleteVersion(ctx, volume, version.Name, version, false); err != nil {
|
||||
for i, fiv := range versions {
|
||||
if err := s.deleteVersions(ctx, volume, fiv.Name, fiv.Versions...); err != nil {
|
||||
errs[i] = err
|
||||
}
|
||||
}
|
||||
|
@ -840,6 +948,8 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
|||
// Create a new xl.meta with a delete marker in it
|
||||
return s.WriteMetadata(ctx, volume, path, fi)
|
||||
}
|
||||
metaDataPoolPut(buf) // Never used, return it
|
||||
|
||||
buf, err = s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFileV1))
|
||||
if err != nil {
|
||||
if err == errFileNotFound && fi.VersionID != "" {
|
||||
|
@ -1213,12 +1323,29 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string) (buf []byte,
|
|||
SmallFile: true,
|
||||
}
|
||||
defer r.Close()
|
||||
buf, err = ioutil.ReadAll(r)
|
||||
|
||||
// Get size for precise allocation.
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
err = osErrToFileErr(err)
|
||||
buf, err = ioutil.ReadAll(r)
|
||||
return buf, osErrToFileErr(err)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return nil, errFileNotFound
|
||||
}
|
||||
|
||||
return buf, err
|
||||
// Read into appropriate buffer.
|
||||
sz := stat.Size()
|
||||
if sz <= metaDataReadDefault {
|
||||
buf = metaDataPoolGet()
|
||||
buf = buf[:sz]
|
||||
} else {
|
||||
buf = make([]byte, sz)
|
||||
}
|
||||
// Read file...
|
||||
_, err = io.ReadFull(r, buf)
|
||||
|
||||
return buf, osErrToFileErr(err)
|
||||
}
|
||||
|
||||
// ReadAll reads from r until an error or EOF and returns the data it read.
|
||||
|
|
|
@ -443,7 +443,8 @@ func TestXLStorageReadAll(t *testing.T) {
|
|||
for i, testCase := range testCases {
|
||||
dataRead, err = xlStorage.ReadAll(context.Background(), testCase.volume, testCase.path)
|
||||
if err != testCase.err {
|
||||
t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err)
|
||||
t.Errorf("TestXLStorage %d: Expected err \"%v\", got err \"%v\"", i+1, testCase.err, err)
|
||||
continue
|
||||
}
|
||||
if err == nil {
|
||||
if string(dataRead) != string([]byte("Hello, World")) {
|
||||
|
|
|
@ -1,10 +1,22 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
trap 'catch' ERR
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
for site in sitea siteb sitec; do
|
||||
echo "$site server logs ========="
|
||||
cat "/tmp/${site}_1.log"
|
||||
echo "==========================="
|
||||
cat "/tmp/${site}_2.log"
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio
|
||||
pkill -9 minio
|
||||
rm -rf /tmp/multisitea
|
||||
rm -rf /tmp/multisiteb
|
||||
rm -rf /tmp/multisitec
|
||||
|
@ -13,47 +25,47 @@ catch() {
|
|||
catch
|
||||
|
||||
set -e
|
||||
go install -v
|
||||
export MINIO_BROWSER=off
|
||||
export MINIO_ROOT_USER="minio"
|
||||
export MINIO_ROOT_PASSWORD="minio123"
|
||||
export MINIO_PROMETHEUS_AUTH_TYPE=public
|
||||
export PATH=${GOPATH}/bin:${PATH}
|
||||
|
||||
minio server --address :9001 "http://localhost:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
|
||||
minio server --address :9002 "http://localhost:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
|
||||
|
||||
minio server --address :9003 "http://localhost:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
|
||||
minio server --address :9004 "http://localhost:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
minio server --address :9005 "http://localhost:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_1.log 2>&1 &
|
||||
minio server --address :9006 "http://localhost:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://localhost:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_2.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9005 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9006 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_2.log 2>&1 &
|
||||
|
||||
sleep 30
|
||||
|
||||
mc alias set sitea http://localhost:9001 minio minio123
|
||||
mc alias set sitea http://127.0.0.1:9001 minio minio123
|
||||
mc mb sitea/bucket
|
||||
mc version enable sitea/bucket
|
||||
mc mb -l sitea/olockbucket
|
||||
|
||||
mc alias set siteb http://localhost:9004 minio minio123
|
||||
mc alias set siteb http://127.0.0.1:9004 minio minio123
|
||||
mc mb siteb/bucket/
|
||||
mc version enable siteb/bucket/
|
||||
mc mb -l siteb/olockbucket/
|
||||
|
||||
mc alias set sitec http://localhost:9006 minio minio123
|
||||
mc alias set sitec http://127.0.0.1:9006 minio minio123
|
||||
mc mb sitec/bucket/
|
||||
mc version enable sitec/bucket/
|
||||
mc mb -l sitec/olockbucket
|
||||
|
||||
echo "adding replication config for site a -> site b"
|
||||
remote_arn=$(mc admin bucket remote add sitea/bucket/ \
|
||||
http://minio:minio123@localhost:9004/bucket \
|
||||
http://minio:minio123@127.0.0.1:9004/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
echo "adding replication rule for a -> b : ${remote_arn}"
|
||||
sleep 1
|
||||
|
@ -64,7 +76,7 @@ sleep 1
|
|||
|
||||
echo "adding replication config for site b -> site a"
|
||||
remote_arn=$(mc admin bucket remote add siteb/bucket/ \
|
||||
http://minio:minio123@localhost:9001/bucket \
|
||||
http://minio:minio123@127.0.0.1:9001/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for b -> a : ${remote_arn}"
|
||||
|
@ -75,7 +87,7 @@ sleep 1
|
|||
|
||||
echo "adding replication config for site a -> site c"
|
||||
remote_arn=$(mc admin bucket remote add sitea/bucket/ \
|
||||
http://minio:minio123@localhost:9006/bucket \
|
||||
http://minio:minio123@127.0.0.1:9006/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for a -> c : ${remote_arn}"
|
||||
|
@ -85,7 +97,7 @@ mc replicate add sitea/bucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site c -> site a"
|
||||
remote_arn=$(mc admin bucket remote add sitec/bucket/ \
|
||||
http://minio:minio123@localhost:9001/bucket \
|
||||
http://minio:minio123@127.0.0.1:9001/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for c -> a : ${remote_arn}"
|
||||
|
@ -95,7 +107,7 @@ mc replicate add sitec/bucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site b -> site c"
|
||||
remote_arn=$(mc admin bucket remote add siteb/bucket/ \
|
||||
http://minio:minio123@localhost:9006/bucket \
|
||||
http://minio:minio123@127.0.0.1:9006/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for b -> c : ${remote_arn}"
|
||||
|
@ -106,7 +118,7 @@ sleep 1
|
|||
|
||||
echo "adding replication config for site c -> site b"
|
||||
remote_arn=$(mc admin bucket remote add sitec/bucket \
|
||||
http://minio:minio123@localhost:9004/bucket \
|
||||
http://minio:minio123@127.0.0.1:9004/bucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for c -> b : ${remote_arn}"
|
||||
|
@ -116,7 +128,7 @@ mc replicate add sitec/bucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for olockbucket site a -> site b"
|
||||
remote_arn=$(mc admin bucket remote add sitea/olockbucket/ \
|
||||
http://minio:minio123@localhost:9004/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9004/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket a -> b : ${remote_arn}"
|
||||
|
@ -126,7 +138,7 @@ mc replicate add sitea/olockbucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site b -> site a"
|
||||
remote_arn=$(mc admin bucket remote add siteb/olockbucket/ \
|
||||
http://minio:minio123@localhost:9001/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9001/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket b -> a : ${remote_arn}"
|
||||
|
@ -136,7 +148,7 @@ mc replicate add siteb/olockbucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for olockbucket site a -> site c"
|
||||
remote_arn=$(mc admin bucket remote add sitea/olockbucket/ \
|
||||
http://minio:minio123@localhost:9006/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9006/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket a -> c : ${remote_arn}"
|
||||
|
@ -146,7 +158,7 @@ mc replicate add sitea/olockbucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site c -> site a"
|
||||
remote_arn=$(mc admin bucket remote add sitec/olockbucket/ \
|
||||
http://minio:minio123@localhost:9001/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9001/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket c -> a : ${remote_arn}"
|
||||
|
@ -156,7 +168,7 @@ mc replicate add sitec/olockbucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site b -> site c"
|
||||
remote_arn=$(mc admin bucket remote add siteb/olockbucket/ \
|
||||
http://minio:minio123@localhost:9006/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9006/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket b -> c : ${remote_arn}"
|
||||
|
@ -166,7 +178,7 @@ mc replicate add siteb/olockbucket/ \
|
|||
sleep 1
|
||||
echo "adding replication config for site c -> site b"
|
||||
remote_arn=$(mc admin bucket remote add sitec/olockbucket \
|
||||
http://minio:minio123@localhost:9004/olockbucket \
|
||||
http://minio:minio123@127.0.0.1:9004/olockbucket \
|
||||
--service "replication" --json | jq -r ".RemoteARN")
|
||||
sleep 1
|
||||
echo "adding replication rule for olockbucket c -> b : ${remote_arn}"
|
||||
|
@ -175,11 +187,33 @@ mc replicate add sitec/olockbucket/ \
|
|||
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 3
|
||||
sleep 1
|
||||
|
||||
echo "Set default governance retention 30d"
|
||||
mc retention set --default governance 30d sitea/olockbucket
|
||||
|
||||
echo "Copying data to source sitea"
|
||||
echo "Copying data to source sitea/bucket"
|
||||
mc cp --quiet /etc/hosts sitea/bucket
|
||||
sleep 1
|
||||
|
||||
echo "Copying data to source sitea/olockbucket"
|
||||
mc cp --quiet /etc/hosts sitea/olockbucket
|
||||
sleep 1
|
||||
|
||||
echo "Verifying the metadata difference between source and target"
|
||||
diff -pruN <(mc stat --json sitea/bucket/hosts | jq .) <(mc stat --json siteb/bucket/hosts | jq .)
|
||||
diff -pruN <(mc stat --json sitea/bucket/hosts | jq .) <(mc stat --json sitec/bucket/hosts | jq .)
|
||||
if diff -pruN <(mc stat --json sitea/bucket/hosts | jq .) <(mc stat --json siteb/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
|
||||
echo "verified sitea-> COMPLETED, siteb-> REPLICA"
|
||||
fi
|
||||
|
||||
if diff -pruN <(mc stat --json sitea/bucket/hosts | jq .) <(mc stat --json sitec/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
|
||||
echo "verified sitea-> COMPLETED, sitec-> REPLICA"
|
||||
fi
|
||||
|
||||
echo "Verifying the metadata difference between source and target"
|
||||
if diff -pruN <(mc stat --json sitea/olockbucket/hosts | jq .) <(mc stat --json siteb/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
|
||||
echo "verified sitea-> COMPLETED, siteb-> REPLICA"
|
||||
fi
|
||||
|
||||
if diff -pruN <(mc stat --json sitea/olockbucket/hosts | jq .) <(mc stat --json sitec/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
|
||||
echo "verified sitea-> COMPLETED, sitec-> REPLICA"
|
||||
fi
|
||||
|
||||
catch
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue