Merge branch 'master' into cput

This commit is contained in:
Harshavardhana 2021-11-09 16:43:51 -08:00 committed by GitHub
commit 2604e8d54c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
25 changed files with 813 additions and 358 deletions

125
.github/workflows/iam-integrations.yaml vendored Normal file
View file

@ -0,0 +1,125 @@
name: IAM integration with external systems
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
ldap-test:
name: LDAP Tests with Go ${{ matrix.go-version }}
runs-on: ubuntu-latest
services:
openldap:
image: quay.io/minio/openldap
ports:
- "389:389"
- "636:636"
env:
LDAP_ORGANIZATION: "MinIO Inc"
LDAP_DOMAIN: "min.io"
LDAP_ADMIN_PASSWORD: "admin"
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Test LDAP
env:
LDAP_TEST_SERVER: "localhost:389"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-iam
etcd-test:
name: Etcd Backend Tests with Go ${{ matrix.go-version }}
runs-on: ubuntu-latest
services:
etcd:
image: "quay.io/coreos/etcd:v3.5.1"
env:
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
ports:
- "2379:2379"
options: >-
--health-cmd "etcdctl endpoint health"
--health-interval 10s
--health-timeout 5s
--health-retries 5
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Test Etcd IAM backend
env:
ETCD_SERVER: "http://localhost:2379"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-iam
iam-etcd-test:
name: Etcd Backend + LDAP Tests with Go ${{ matrix.go-version }}
runs-on: ubuntu-latest
services:
openldap:
image: quay.io/minio/openldap
ports:
- "389:389"
- "636:636"
env:
LDAP_ORGANIZATION: "MinIO Inc"
LDAP_DOMAIN: "min.io"
LDAP_ADMIN_PASSWORD: "admin"
etcd:
image: "quay.io/coreos/etcd:v3.5.1"
env:
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379"
ports:
- "2379:2379"
options: >-
--health-cmd "etcdctl endpoint health"
--health-interval 10s
--health-timeout 5s
--health-retries 5
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Test Etcd IAM backend with LDAP IDP
env:
ETCD_SERVER: "http://localhost:2379"
LDAP_TEST_SERVER: "localhost:389"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-iam

View file

@ -1,45 +0,0 @@
name: External IDP Integration Tests
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
ldap-test:
name: LDAP Tests with Go ${{ matrix.go-version }}
runs-on: ubuntu-latest
services:
openldap:
image: quay.io/minio/openldap
ports:
- 389:389
- 636:636
env:
LDAP_ORGANIZATION: "MinIO Inc"
LDAP_DOMAIN: "min.io"
LDAP_ADMIN_PASSWORD: "admin"
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Test LDAP
env:
LDAP_TEST_SERVER: "localhost:389"
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-ldap

View file

@ -46,11 +46,11 @@ test-race: verifiers build
@echo "Running unit tests under -race"
@(env bash $(PWD)/buildscripts/race.sh)
test-ldap: build
@echo "Running tests for LDAP integration"
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAMWithLDAPServerSuite ./cmd
@echo "Running tests for LDAP integration with -race"
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAMWithLDAPServerSuite ./cmd
test-iam: build
@echo "Running tests for IAM (external IDP, etcd backends)"
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
verify: ## verify minio various setups
@echo "Verifying build with race"

View file

@ -15,6 +15,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build !race
// +build !race
// Tests in this file are not run under the `-race` flag as they are too slow
@ -40,20 +41,34 @@ func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
}
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
testCases := []*TestSuiteIAM{
baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
})
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
},
)
}
}

View file

@ -20,6 +20,7 @@ package cmd
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
@ -39,13 +40,16 @@ const (
type TestSuiteIAM struct {
TestSuiteCommon
// Flag to turn on tests for etcd backend IAM
withEtcdBackend bool
endpoint string
adm *madmin.AdminClient
client *minio.Client
}
func newTestSuiteIAM(c TestSuiteCommon) *TestSuiteIAM {
return &TestSuiteIAM{TestSuiteCommon: c}
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
return &TestSuiteIAM{TestSuiteCommon: c, withEtcdBackend: withEtcdBackend}
}
func (s *TestSuiteIAM) iamSetup(c *check) {
@ -73,10 +77,42 @@ func (s *TestSuiteIAM) iamSetup(c *check) {
}
}
const (
EnvTestEtcdBackend = "ETCD_SERVER"
)
func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
configCmds := []string{
"etcd",
"endpoints=" + etcdServer,
"path_prefix=" + mustGetUUID(),
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
c.Fatalf("unable to setup Etcd for tests: %v", err)
}
s.RestartIAMSuite(c)
}
func (s *TestSuiteIAM) SetUpSuite(c *check) {
// If etcd backend is specified and etcd server is not present, the test
// is skipped.
etcdServer := os.Getenv(EnvTestEtcdBackend)
if s.withEtcdBackend && etcdServer == "" {
c.Skip("Skipping etcd backend IAM test as no etcd server is configured.")
}
s.TestSuiteCommon.SetUpSuite(c)
s.iamSetup(c)
if s.withEtcdBackend {
s.setUpEtcd(c, etcdServer)
}
}
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
@ -108,20 +144,34 @@ func runAllIAMTests(suite *TestSuiteIAM, c *check) {
}
func TestIAMInternalIDPServerSuite(t *testing.T) {
testCases := []*TestSuiteIAM{
baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
runAllIAMTests(testCase, &check{t, testCase.serverType})
})
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMTests(testCase, &check{t, testCase.serverType})
},
)
}
}

View file

@ -22,6 +22,7 @@ import (
"context"
"encoding/binary"
"errors"
"io/fs"
"math"
"math/rand"
"net/http"
@ -816,14 +817,13 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
// scannerItem represents each file while walking.
type scannerItem struct {
Path string
Typ os.FileMode
Path string
bucket string // Bucket.
prefix string // Only the prefix if any, does not have final object name.
objectName string // Only the object name without prefixes.
lifeCycle *lifecycle.Lifecycle
replication replicationConfig
lifeCycle *lifecycle.Lifecycle
Typ fs.FileMode
heal bool // Has the object been selected for heal check?
debug bool
}

View file

@ -28,6 +28,7 @@ import (
"path"
"sort"
"strings"
"sync"
"syscall"
"time"
@ -407,6 +408,7 @@ func (n *hdfsObjects) listDirFactory() minio.ListDirFunc {
// ListObjects lists all blobs in HDFS bucket filtered by prefix.
func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
var mutex sync.Mutex
fileInfos := make(map[string]os.FileInfo)
targetPath := n.hdfsPathJoin(bucket, prefix)
@ -430,6 +432,9 @@ func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, d
}
getObjectInfo := func(ctx context.Context, bucket, entry string) (minio.ObjectInfo, error) {
mutex.Lock()
defer mutex.Unlock()
filePath := path.Clean(n.hdfsPathJoin(bucket, entry))
fi, ok := fileInfos[filePath]

View file

@ -21,6 +21,7 @@ import (
"net"
"net/http"
"path"
"runtime/debug"
"strings"
"sync/atomic"
"time"
@ -28,7 +29,7 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
xnet "github.com/minio/pkg/net"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
@ -451,18 +452,24 @@ func addCustomHeaders(h http.Handler) http.Handler {
})
}
// criticalErrorHandler handles critical server failures caused by
// criticalErrorHandler handles panics and fatal errors by
// `panic(logger.ErrCritical)` as done by `logger.CriticalIf`.
//
// It should be always the first / highest HTTP handler.
func setCriticalErrorHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err == logger.ErrCritical { // handle
if rec := recover(); rec == logger.ErrCritical { // handle
stack := debug.Stack()
logger.Error("critical: \"%s %s\": %v\n%s", r.Method, r.URL, rec, string(stack))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
} else if rec != nil {
stack := debug.Stack()
logger.Error("panic: \"%s %s\": %v\n%s", r.Method, r.URL, rec, string(stack))
// Try to write an error response, upstream may not have written header.
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
} else if err != nil {
panic(err) // forward other panic calls
}
}()
h.ServeHTTP(w, r)

View file

@ -70,7 +70,11 @@ type IAMEtcdStore struct {
}
func newIAMEtcdStore(client *etcd.Client, usersSysType UsersSysType) *IAMEtcdStore {
return &IAMEtcdStore{client: client, usersSysType: usersSysType}
return &IAMEtcdStore{
iamCache: newIamCache(),
client: client,
usersSysType: usersSysType,
}
}
func (ies *IAMEtcdStore) rlock() *iamCache {

View file

@ -474,8 +474,8 @@ func (store *IAMStoreSys) GetMappedPolicy(name string, isGroup bool) (MappedPoli
// change (e.g. peer notification for object storage and etcd watch
// notification).
func (store *IAMStoreSys) GroupNotificationHandler(ctx context.Context, group string) error {
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
err := store.loadGroup(ctx, group, cache.iamGroupsMap)
if err != nil && err != errNoSuchGroup {
@ -730,8 +730,8 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc
// ListGroups - lists groups. Since this is not going to be a frequent
// operation, we fetch this info from storage, and refresh the cache as well.
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
if store.getUsersSysType() == MinIOUsersSysType {
m := map[string]GroupInfo{}
@ -834,8 +834,8 @@ func (store *IAMStoreSys) PolicyNotificationHandler(ctx context.Context, policy
return errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
err := store.loadPolicyDoc(ctx, policy, cache.iamPolicyDocsMap)
if err == errNoSuchPolicy {
@ -1165,8 +1165,8 @@ func (store *IAMStoreSys) PolicyMappingNotificationHandler(ctx context.Context,
return errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
m := cache.iamGroupPolicyMap
if !isGroup {
@ -1189,8 +1189,8 @@ func (store *IAMStoreSys) UserNotificationHandler(ctx context.Context, accessKey
return errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
err := store.loadUser(ctx, accessKey, userType, cache.iamUsersMap)
if err == errNoSuchUser {
@ -1678,8 +1678,8 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred
// LoadUser - attempts to load user info from storage and updates cache.
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) {
cache := store.rlock()
defer store.runlock()
cache := store.lock()
defer store.unlock()
_, found := cache.iamUsersMap[accessKey]
if !found {

View file

@ -351,13 +351,6 @@ func (sys *IAMSys) loadWatchedEvent(outerCtx context.Context, event iamWatchEven
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
defer cancel()
// We need to read from storage and write to in-memory map, so we need
// only a read lock on storage, however in some cases we modify storage
// too (e.g. when credentials from storage are expired, we delete them),
// so we take write locks for both.
sys.Lock()
defer sys.Unlock()
if event.isCreated {
switch {
case usersPrefix:

View file

@ -190,12 +190,12 @@ func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply boo
if reply = !isWriteLock(lri); reply {
// Unless there is a write lock
l.lockMap[resource] = append(l.lockMap[resource], lrInfo)
l.lockUID[args.UID] = formatUUID(resource, 0)
l.lockUID[formatUUID(args.UID, 0)] = resource
}
} else {
// No locks held on the given name, so claim (first) read lock
l.lockMap[resource] = []lockRequesterInfo{lrInfo}
l.lockUID[args.UID] = formatUUID(resource, 0)
l.lockUID[formatUUID(args.UID, 0)] = resource
reply = true
}
return reply, nil

256
cmd/local-locker_test.go Normal file
View file

@ -0,0 +1,256 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"testing"
"time"
"github.com/minio/minio/internal/dsync"
)
func TestLocalLockerExpire(t *testing.T) {
wResources := make([]string, 1000)
rResources := make([]string, 1000)
l := newLocker()
ctx := context.Background()
for i := range wResources {
arg := dsync.LockArgs{
UID: mustGetUUID(),
Resources: []string{mustGetUUID()},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.Lock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
wResources[i] = arg.Resources[0]
}
for i := range rResources {
name := mustGetUUID()
arg := dsync.LockArgs{
UID: mustGetUUID(),
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
// RLock twice
ok, err = l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rResources[i] = arg.Resources[0]
}
if len(l.lockMap) != len(rResources)+len(wResources) {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
}
if len(l.lockUID) != len(rResources)+len(wResources) {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
}
// Expire an hour from now, should keep all
l.expireOldLocks(time.Hour)
if len(l.lockMap) != len(rResources)+len(wResources) {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
}
if len(l.lockUID) != len(rResources)+len(wResources) {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
}
// Expire a minute ago.
l.expireOldLocks(-time.Minute)
if len(l.lockMap) != 0 {
t.Fatalf("after cleanup should be empty, got %d", len(l.lockMap))
}
if len(l.lockUID) != 0 {
t.Fatalf("lockUID len, got %d, want %d", len(l.lockUID), 0)
}
}
func TestLocalLockerUnlock(t *testing.T) {
const n = 1000
const m = 5
wResources := make([][m]string, n)
rResources := make([]string, n)
wUIDs := make([]string, n)
rUIDs := make([]string, 0, n*2)
l := newLocker()
ctx := context.Background()
for i := range wResources {
names := [m]string{}
for j := range names {
names[j] = mustGetUUID()
}
uid := mustGetUUID()
arg := dsync.LockArgs{
UID: uid,
Resources: names[:],
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.Lock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
wResources[i] = names
wUIDs[i] = uid
}
for i := range rResources {
name := mustGetUUID()
uid := mustGetUUID()
arg := dsync.LockArgs{
UID: uid,
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rUIDs = append(rUIDs, uid)
// RLock twice, different uid
uid = mustGetUUID()
arg.UID = uid
ok, err = l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rResources[i] = name
rUIDs = append(rUIDs, uid)
}
// Each Lock has m entries
if len(l.lockMap) != len(rResources)+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
}
// A UID is added for every resource
if len(l.lockUID) != len(rResources)*2+len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources)*2, len(wResources)*m)
}
// RUnlock once...
for i, name := range rResources {
arg := dsync.LockArgs{
UID: rUIDs[i*2],
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RUnlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// Each Lock has m entries
if len(l.lockMap) != len(rResources)+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
}
// A UID is added for every resource.
// We removed len(rResources) read sources.
if len(l.lockUID) != len(rResources)+len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources)*m)
}
// RUnlock again, different uids
for i, name := range rResources {
arg := dsync.LockArgs{
UID: rUIDs[i*2+1],
Resources: []string{name},
Source: "minio",
Owner: "owner",
Quorum: 0,
}
ok, err := l.RUnlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// Each Lock has m entries
if len(l.lockMap) != 0+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, len(wResources)*m)
}
// A UID is added for every resource.
// We removed Add Rlocked entries
if len(l.lockUID) != len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, len(wResources)*m)
}
// Remove write locked
for i, names := range wResources {
arg := dsync.LockArgs{
UID: wUIDs[i],
Resources: names[:],
Source: "minio",
Owner: "owner",
Quorum: 0,
}
ok, err := l.Unlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// All should be gone now...
// Each Lock has m entries
if len(l.lockMap) != 0 {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, 0)
}
if len(l.lockUID) != 0 {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, 0)
}
}

View file

@ -56,14 +56,13 @@ const metacacheStreamVersion = 2
// metacacheWriter provides a serializer of metacache objects.
type metacacheWriter struct {
streamErr error
mw *msgp.Writer
creator func() error
closer func() error
blockSize int
streamWg sync.WaitGroup
reuseBlocks bool
streamErr error
streamWg sync.WaitGroup
}
// newMetacacheWriter will create a serializer that will write objects in given order to the output.

View file

@ -19,9 +19,11 @@ package cmd
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"runtime/debug"
"sort"
"strconv"
"strings"
@ -357,6 +359,12 @@ func (s *storageRESTServer) WalkDirHandler(w http.ResponseWriter, r *http.Reques
prefix := r.Form.Get(storageRESTPrefixFilter)
forward := r.Form.Get(storageRESTForwardFilter)
writer := streamHTTPResponse(w)
defer func() {
if r := recover(); r != nil {
debug.PrintStack()
writer.CloseWithError(fmt.Errorf("panic: %v", r))
}
}()
writer.CloseWithError(s.storage.WalkDir(r.Context(), WalkDirOptions{
Bucket: volume,
BaseDir: dirPath,

View file

@ -56,18 +56,21 @@ const (
// metacache contains a tracked cache entry.
type metacache struct {
id string `msg:"id"`
bucket string `msg:"b"`
root string `msg:"root"`
recursive bool `msg:"rec"`
filter string `msg:"flt"`
status scanStatus `msg:"stat"`
fileNotFound bool `msg:"fnf"`
error string `msg:"err"`
started time.Time `msg:"st"`
// do not re-arrange the struct this struct has been ordered to use less
// space - if you do so please run https://github.com/orijtech/structslop
// and verify if your changes are optimal.
ended time.Time `msg:"end"`
lastUpdate time.Time `msg:"u"`
started time.Time `msg:"st"`
lastHandout time.Time `msg:"lh"`
lastUpdate time.Time `msg:"u"`
bucket string `msg:"b"`
filter string `msg:"flt"`
id string `msg:"id"`
error string `msg:"err"`
root string `msg:"root"`
fileNotFound bool `msg:"fnf"`
status scanStatus `msg:"stat"`
recursive bool `msg:"rec"`
dataVersion uint8 `msg:"v"`
}

View file

@ -24,10 +24,28 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
switch msgp.UnsafeString(field) {
case "id":
z.id, err = dc.ReadString()
case "end":
z.ended, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "id")
err = msgp.WrapError(err, "ended")
return
}
case "st":
z.started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "started")
return
}
case "lh":
z.lastHandout, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "lastHandout")
return
}
case "u":
z.lastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "lastUpdate")
return
}
case "b":
@ -36,22 +54,34 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "bucket")
return
}
case "flt":
z.filter, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "filter")
return
}
case "id":
z.id, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "id")
return
}
case "err":
z.error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "error")
return
}
case "root":
z.root, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "root")
return
}
case "rec":
z.recursive, err = dc.ReadBool()
case "fnf":
z.fileNotFound, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "recursive")
return
}
case "flt":
z.filter, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "filter")
err = msgp.WrapError(err, "fileNotFound")
return
}
case "stat":
@ -64,40 +94,10 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
}
z.status = scanStatus(zb0002)
}
case "fnf":
z.fileNotFound, err = dc.ReadBool()
case "rec":
z.recursive, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "fileNotFound")
return
}
case "err":
z.error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "error")
return
}
case "st":
z.started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "started")
return
}
case "end":
z.ended, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "ended")
return
}
case "u":
z.lastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "lastUpdate")
return
}
case "lh":
z.lastHandout, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "lastHandout")
err = msgp.WrapError(err, "recursive")
return
}
case "v":
@ -120,84 +120,14 @@ func (z *metacache) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 13
// write "id"
err = en.Append(0x8d, 0xa2, 0x69, 0x64)
// write "end"
err = en.Append(0x8d, 0xa3, 0x65, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteString(z.id)
err = en.WriteTime(z.ended)
if err != nil {
err = msgp.WrapError(err, "id")
return
}
// write "b"
err = en.Append(0xa1, 0x62)
if err != nil {
return
}
err = en.WriteString(z.bucket)
if err != nil {
err = msgp.WrapError(err, "bucket")
return
}
// write "root"
err = en.Append(0xa4, 0x72, 0x6f, 0x6f, 0x74)
if err != nil {
return
}
err = en.WriteString(z.root)
if err != nil {
err = msgp.WrapError(err, "root")
return
}
// write "rec"
err = en.Append(0xa3, 0x72, 0x65, 0x63)
if err != nil {
return
}
err = en.WriteBool(z.recursive)
if err != nil {
err = msgp.WrapError(err, "recursive")
return
}
// write "flt"
err = en.Append(0xa3, 0x66, 0x6c, 0x74)
if err != nil {
return
}
err = en.WriteString(z.filter)
if err != nil {
err = msgp.WrapError(err, "filter")
return
}
// write "stat"
err = en.Append(0xa4, 0x73, 0x74, 0x61, 0x74)
if err != nil {
return
}
err = en.WriteUint8(uint8(z.status))
if err != nil {
err = msgp.WrapError(err, "status")
return
}
// write "fnf"
err = en.Append(0xa3, 0x66, 0x6e, 0x66)
if err != nil {
return
}
err = en.WriteBool(z.fileNotFound)
if err != nil {
err = msgp.WrapError(err, "fileNotFound")
return
}
// write "err"
err = en.Append(0xa3, 0x65, 0x72, 0x72)
if err != nil {
return
}
err = en.WriteString(z.error)
if err != nil {
err = msgp.WrapError(err, "error")
err = msgp.WrapError(err, "ended")
return
}
// write "st"
@ -210,14 +140,14 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "started")
return
}
// write "end"
err = en.Append(0xa3, 0x65, 0x6e, 0x64)
// write "lh"
err = en.Append(0xa2, 0x6c, 0x68)
if err != nil {
return
}
err = en.WriteTime(z.ended)
err = en.WriteTime(z.lastHandout)
if err != nil {
err = msgp.WrapError(err, "ended")
err = msgp.WrapError(err, "lastHandout")
return
}
// write "u"
@ -230,14 +160,84 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "lastUpdate")
return
}
// write "lh"
err = en.Append(0xa2, 0x6c, 0x68)
// write "b"
err = en.Append(0xa1, 0x62)
if err != nil {
return
}
err = en.WriteTime(z.lastHandout)
err = en.WriteString(z.bucket)
if err != nil {
err = msgp.WrapError(err, "lastHandout")
err = msgp.WrapError(err, "bucket")
return
}
// write "flt"
err = en.Append(0xa3, 0x66, 0x6c, 0x74)
if err != nil {
return
}
err = en.WriteString(z.filter)
if err != nil {
err = msgp.WrapError(err, "filter")
return
}
// write "id"
err = en.Append(0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.id)
if err != nil {
err = msgp.WrapError(err, "id")
return
}
// write "err"
err = en.Append(0xa3, 0x65, 0x72, 0x72)
if err != nil {
return
}
err = en.WriteString(z.error)
if err != nil {
err = msgp.WrapError(err, "error")
return
}
// write "root"
err = en.Append(0xa4, 0x72, 0x6f, 0x6f, 0x74)
if err != nil {
return
}
err = en.WriteString(z.root)
if err != nil {
err = msgp.WrapError(err, "root")
return
}
// write "fnf"
err = en.Append(0xa3, 0x66, 0x6e, 0x66)
if err != nil {
return
}
err = en.WriteBool(z.fileNotFound)
if err != nil {
err = msgp.WrapError(err, "fileNotFound")
return
}
// write "stat"
err = en.Append(0xa4, 0x73, 0x74, 0x61, 0x74)
if err != nil {
return
}
err = en.WriteUint8(uint8(z.status))
if err != nil {
err = msgp.WrapError(err, "status")
return
}
// write "rec"
err = en.Append(0xa3, 0x72, 0x65, 0x63)
if err != nil {
return
}
err = en.WriteBool(z.recursive)
if err != nil {
err = msgp.WrapError(err, "recursive")
return
}
// write "v"
@ -257,42 +257,42 @@ func (z *metacache) EncodeMsg(en *msgp.Writer) (err error) {
func (z *metacache) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 13
// string "id"
o = append(o, 0x8d, 0xa2, 0x69, 0x64)
o = msgp.AppendString(o, z.id)
// string "b"
o = append(o, 0xa1, 0x62)
o = msgp.AppendString(o, z.bucket)
// string "root"
o = append(o, 0xa4, 0x72, 0x6f, 0x6f, 0x74)
o = msgp.AppendString(o, z.root)
// string "rec"
o = append(o, 0xa3, 0x72, 0x65, 0x63)
o = msgp.AppendBool(o, z.recursive)
// string "flt"
o = append(o, 0xa3, 0x66, 0x6c, 0x74)
o = msgp.AppendString(o, z.filter)
// string "stat"
o = append(o, 0xa4, 0x73, 0x74, 0x61, 0x74)
o = msgp.AppendUint8(o, uint8(z.status))
// string "fnf"
o = append(o, 0xa3, 0x66, 0x6e, 0x66)
o = msgp.AppendBool(o, z.fileNotFound)
// string "err"
o = append(o, 0xa3, 0x65, 0x72, 0x72)
o = msgp.AppendString(o, z.error)
// string "end"
o = append(o, 0x8d, 0xa3, 0x65, 0x6e, 0x64)
o = msgp.AppendTime(o, z.ended)
// string "st"
o = append(o, 0xa2, 0x73, 0x74)
o = msgp.AppendTime(o, z.started)
// string "end"
o = append(o, 0xa3, 0x65, 0x6e, 0x64)
o = msgp.AppendTime(o, z.ended)
// string "u"
o = append(o, 0xa1, 0x75)
o = msgp.AppendTime(o, z.lastUpdate)
// string "lh"
o = append(o, 0xa2, 0x6c, 0x68)
o = msgp.AppendTime(o, z.lastHandout)
// string "u"
o = append(o, 0xa1, 0x75)
o = msgp.AppendTime(o, z.lastUpdate)
// string "b"
o = append(o, 0xa1, 0x62)
o = msgp.AppendString(o, z.bucket)
// string "flt"
o = append(o, 0xa3, 0x66, 0x6c, 0x74)
o = msgp.AppendString(o, z.filter)
// string "id"
o = append(o, 0xa2, 0x69, 0x64)
o = msgp.AppendString(o, z.id)
// string "err"
o = append(o, 0xa3, 0x65, 0x72, 0x72)
o = msgp.AppendString(o, z.error)
// string "root"
o = append(o, 0xa4, 0x72, 0x6f, 0x6f, 0x74)
o = msgp.AppendString(o, z.root)
// string "fnf"
o = append(o, 0xa3, 0x66, 0x6e, 0x66)
o = msgp.AppendBool(o, z.fileNotFound)
// string "stat"
o = append(o, 0xa4, 0x73, 0x74, 0x61, 0x74)
o = msgp.AppendUint8(o, uint8(z.status))
// string "rec"
o = append(o, 0xa3, 0x72, 0x65, 0x63)
o = msgp.AppendBool(o, z.recursive)
// string "v"
o = append(o, 0xa1, 0x76)
o = msgp.AppendUint8(o, z.dataVersion)
@ -317,10 +317,28 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
switch msgp.UnsafeString(field) {
case "id":
z.id, bts, err = msgp.ReadStringBytes(bts)
case "end":
z.ended, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "id")
err = msgp.WrapError(err, "ended")
return
}
case "st":
z.started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "started")
return
}
case "lh":
z.lastHandout, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "lastHandout")
return
}
case "u":
z.lastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "lastUpdate")
return
}
case "b":
@ -329,22 +347,34 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "bucket")
return
}
case "flt":
z.filter, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "filter")
return
}
case "id":
z.id, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "id")
return
}
case "err":
z.error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "error")
return
}
case "root":
z.root, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "root")
return
}
case "rec":
z.recursive, bts, err = msgp.ReadBoolBytes(bts)
case "fnf":
z.fileNotFound, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "recursive")
return
}
case "flt":
z.filter, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "filter")
err = msgp.WrapError(err, "fileNotFound")
return
}
case "stat":
@ -357,40 +387,10 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
z.status = scanStatus(zb0002)
}
case "fnf":
z.fileNotFound, bts, err = msgp.ReadBoolBytes(bts)
case "rec":
z.recursive, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "fileNotFound")
return
}
case "err":
z.error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "error")
return
}
case "st":
z.started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "started")
return
}
case "end":
z.ended, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ended")
return
}
case "u":
z.lastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "lastUpdate")
return
}
case "lh":
z.lastHandout, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "lastHandout")
err = msgp.WrapError(err, "recursive")
return
}
case "v":
@ -413,7 +413,7 @@ func (z *metacache) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *metacache) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.id) + 2 + msgp.StringPrefixSize + len(z.bucket) + 5 + msgp.StringPrefixSize + len(z.root) + 4 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.filter) + 5 + msgp.Uint8Size + 4 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.error) + 3 + msgp.TimeSize + 4 + msgp.TimeSize + 2 + msgp.TimeSize + 3 + msgp.TimeSize + 2 + msgp.Uint8Size
s = 1 + 4 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 2 + msgp.TimeSize + 2 + msgp.StringPrefixSize + len(z.bucket) + 4 + msgp.StringPrefixSize + len(z.filter) + 3 + msgp.StringPrefixSize + len(z.id) + 4 + msgp.StringPrefixSize + len(z.error) + 5 + msgp.StringPrefixSize + len(z.root) + 4 + msgp.BoolSize + 5 + msgp.Uint8Size + 4 + msgp.BoolSize + 2 + msgp.Uint8Size
return
}

View file

@ -108,6 +108,10 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
}
func nodeHealthMetricsPrometheus(ch chan<- prometheus.Metric) {
if globalIsGateway {
return
}
nodesUp, nodesDown := GetPeerOnlineCount()
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(

View file

@ -30,6 +30,7 @@ import (
"net/http"
"os/user"
"path"
"runtime/debug"
"strconv"
"strings"
"sync"
@ -177,6 +178,12 @@ func (s *storageRESTServer) NSScannerHandler(w http.ResponseWriter, r *http.Requ
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
resp := streamHTTPResponse(w)
defer func() {
if r := recover(); r != nil {
debug.PrintStack()
resp.CloseWithError(fmt.Errorf("panic: %v", r))
}
}()
respW := msgp.NewWriter(resp)
// Collect updates, stream them before the full cache is sent.

View file

@ -36,20 +36,34 @@ func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) {
}
func TestIAMInternalIDPSTSServerSuite(t *testing.T) {
testCases := []*TestSuiteIAM{
baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
runAllIAMSTSTests(testCase, &check{t, testCase.serverType})
})
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMSTSTests(testCase, &check{t, testCase.serverType})
},
)
}
}
@ -135,21 +149,16 @@ func (s *TestSuiteIAM) TestSTS(c *check) {
}
}
const (
EnvTestLDAPServer = "LDAP_TEST_SERVER"
)
func (s *TestSuiteIAM) GetLDAPServer(c *check) string {
return os.Getenv(EnvTestLDAPServer)
}
// SetUpLDAP - expects to setup an LDAP test server using the test LDAP
// container and canned data from https://github.com/minio/minio-ldap-testing
func (s *TestSuiteIAM) SetUpLDAP(c *check) {
func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
serverAddr := s.GetLDAPServer(c)
configCmds := []string{
"identity_ldap",
fmt.Sprintf("server_addr=%s", serverAddr),
@ -169,31 +178,50 @@ func (s *TestSuiteIAM) SetUpLDAP(c *check) {
s.RestartIAMSuite(c)
}
const (
EnvTestLDAPServer = "LDAP_TEST_SERVER"
)
func TestIAMWithLDAPServerSuite(t *testing.T) {
testCases := []*TestSuiteIAM{
baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4}),
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
newTestSuiteIAM(TestSuiteCommon{serverType: "FS", signer: signerV4, secure: true}),
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "Erasure", signer: signerV4}),
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
newTestSuiteIAM(TestSuiteCommon{serverType: "ErasureSet", signer: signerV4}),
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
if suite.GetLDAPServer(c) == "" {
return
}
ldapServer := os.Getenv(EnvTestLDAPServer)
if ldapServer == "" {
c.Skip("Skipping LDAP test as no LDAP server is provided.")
}
suite.SetUpSuite(c)
suite.SetUpLDAP(c)
suite.TestLDAPSTS(c)
suite.TearDownSuite(c)
})
suite.SetUpSuite(c)
suite.SetUpLDAP(c, ldapServer)
suite.TestLDAPSTS(c)
suite.TearDownSuite(c)
},
)
}
}

View file

@ -355,6 +355,8 @@ func initTestServerWithBackend(ctx context.Context, t TestErrHandler, testServer
newAllSubsystems()
globalEtcdClient = nil
initAllSubsystems(ctx, objLayer)
globalIAMSys.Init(ctx, objLayer, globalEtcdClient, 2*time.Second)

View file

@ -2,7 +2,7 @@ version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:RELEASE.2021-11-05T09-16-26Z
image: quay.io/minio/minio:RELEASE.2021-11-09T03-21-45Z
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
expose:
- "9000"

2
go.mod
View file

@ -43,7 +43,7 @@ require (
github.com/lib/pq v1.9.0
github.com/miekg/dns v1.1.43
github.com/minio/cli v1.22.0
github.com/minio/console v0.12.1
github.com/minio/console v0.12.2
github.com/minio/csvparser v1.0.0
github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.14.0

4
go.sum
View file

@ -1068,8 +1068,8 @@ github.com/minio/cli v1.22.0 h1:VTQm7lmXm3quxO917X3p+el1l0Ca5X3S4PM2ruUYO68=
github.com/minio/cli v1.22.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
github.com/minio/colorjson v1.0.1 h1:+hvfP8C1iMB95AT+ZFDRE+Knn9QPd9lg0CRJY9DRpos=
github.com/minio/colorjson v1.0.1/go.mod h1:oPM3zQQY8Gz9NGtgvuBEjQ+gPZLKAGc7T+kjMlwtOgs=
github.com/minio/console v0.12.1 h1:y5ngZcZ5+T+cGBi3GWgjoWqP28wvkQLyJObf+js10NA=
github.com/minio/console v0.12.1/go.mod h1:7mJoMR1Ki176CAsIsF3T2LmgRWzHDVvOfRVjnO9oyAI=
github.com/minio/console v0.12.2 h1:0MrD+vknaxqVdcyD6bFyWo8EPfyS14NqaY1DIAVx7iI=
github.com/minio/console v0.12.2/go.mod h1:7mJoMR1Ki176CAsIsF3T2LmgRWzHDVvOfRVjnO9oyAI=
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
github.com/minio/csvparser v1.0.0/go.mod h1:lKXskSLzPgC5WQyzP7maKH7Sl1cqvANXo9YCto8zbtM=
github.com/minio/direct-csi v1.3.5-0.20210601185811-f7776f7961bf h1:wylCc/PdvdTIqYqVNEU9LJAZBanvfGY1TwTnjM3zQaA=

View file

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"go/build"
"hash"
"net/http"
"path/filepath"
"reflect"
@ -39,8 +38,6 @@ import (
var (
// HighwayHash key for logging in anonymous mode
magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// HighwayHash hasher for logging in anonymous mode
loggerHighwayHasher hash.Hash
)
// Disable disables all logging, false by default. (used for "go test")
@ -207,8 +204,6 @@ func Init(goPath string, goRoot string) {
// paths like "{GOROOT}/src/github.com/minio/minio"
// and "{GOPATH}/src/github.com/minio/minio"
trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator))
loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
}
func trimTrace(f string) string {
@ -263,10 +258,9 @@ func getTrace(traceLevel int) []string {
// Return the highway hash of the passed string
func hashString(input string) string {
defer loggerHighwayHasher.Reset()
loggerHighwayHasher.Write([]byte(input))
checksum := loggerHighwayHasher.Sum(nil)
return hex.EncodeToString(checksum)
hh, _ := highwayhash.New(magicHighwayHash256Key)
hh.Write([]byte(input))
return hex.EncodeToString(hh.Sum(nil))
}
// Kind specifies the kind of error log