tolerate listing with only readQuorum disks (#10357)

We can reduce this further in the future, but this is a good
value to keep around. With the advent of continuous healing,
we can be assured that namespace will eventually be
consistent so we are okay to avoid the necessity to
a list across all drives on all sets.

Bonus Pop()'s in parallel seem to have the potential to
wait too on large drive setups and cause more slowness
instead of gaining any performance remove it for now.

Also, implement load balanced reply for local disks,
ensuring that local disks have an affinity for

- cleanupStaleMultipartUploads()
This commit is contained in:
Harshavardhana 2020-08-26 19:29:35 -07:00 committed by GitHub
parent 0a2e6d58a5
commit a359e36e35
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 162 additions and 204 deletions

View file

@ -183,7 +183,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, drivesToHeal
// Heal all erasure sets that need // Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal { for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal { for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet) err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].setDriveCount)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }

View file

@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
} }
// Validates the parity disks. // Validates the parity disks.
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) { func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
if ssParity == 0 && rrsParity == 0 { if ssParity == 0 && rrsParity == 0 {
return nil return nil
} }
@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks) return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
} }
if ssParity > drivesPerSet/2 { if ssParity > setDriveCount/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2) return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
} }
if rrsParity > drivesPerSet/2 { if rrsParity > setDriveCount/2 {
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2) return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
} }
if ssParity > 0 && rrsParity > 0 { if ssParity > 0 && rrsParity > 0 {
@ -220,9 +220,9 @@ func Enabled(kvs config.KVS) bool {
} }
// LookupConfig - lookup storage class config and override with valid environment settings if any. // LookupConfig - lookup storage class config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) { func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
cfg = Config{} cfg = Config{}
cfg.Standard.Parity = drivesPerSet / 2 cfg.Standard.Parity = setDriveCount / 2
cfg.RRS.Parity = defaultRRSParity cfg.RRS.Parity = defaultRRSParity
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil { if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
@ -239,7 +239,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
} }
} }
if cfg.Standard.Parity == 0 { if cfg.Standard.Parity == 0 {
cfg.Standard.Parity = drivesPerSet / 2 cfg.Standard.Parity = setDriveCount / 2
} }
if rrsc != "" { if rrsc != "" {
@ -254,7 +254,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
// Validation is done after parsing both the storage classes. This is needed because we need one // Validation is done after parsing both the storage classes. This is needed because we need one
// storage class value to deduce the correct value of the other storage class. // storage class value to deduce the correct value of the other storage class.
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil { if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
return Config{}, err return Config{}, err
} }

View file

@ -69,10 +69,10 @@ func TestParseStorageClass(t *testing.T) {
func TestValidateParity(t *testing.T) { func TestValidateParity(t *testing.T) {
tests := []struct { tests := []struct {
rrsParity int rrsParity int
ssParity int ssParity int
success bool success bool
drivesPerSet int setDriveCount int
}{ }{
{2, 4, true, 16}, {2, 4, true, 16},
{3, 3, true, 16}, {3, 3, true, 16},
@ -85,7 +85,7 @@ func TestValidateParity(t *testing.T) {
{9, 2, false, 16}, {9, 2, false, 16},
} }
for i, tt := range tests { for i, tt := range tests {
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet) err := validateParity(tt.ssParity, tt.rrsParity, tt.setDriveCount)
if err != nil && tt.success { if err != nil && tt.success {
t.Errorf("Test %d, Expected success, got %s", i+1, err) t.Errorf("Test %d, Expected success, got %s", i+1, err)
} }

View file

@ -193,7 +193,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
} }
// ZoneEndpoints represent endpoints in a given zone // ZoneEndpoints represent endpoints in a given zone
// along with its setCount and drivesPerSet. // along with its setCount and setDriveCount.
type ZoneEndpoints struct { type ZoneEndpoints struct {
SetCount int SetCount int
DrivesPerSet int DrivesPerSet int

View file

@ -133,8 +133,7 @@ func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketI
if err == nil { if err == nil {
// NOTE: The assumption here is that volumes across all disks in // NOTE: The assumption here is that volumes across all disks in
// readQuorum have consistent view i.e they all have same number // readQuorum have consistent view i.e they all have same number
// of buckets. This is essentially not verified since healing // of buckets.
// should take care of this.
var bucketsInfo []BucketInfo var bucketsInfo []BucketInfo
for _, volInfo := range volsInfo { for _, volInfo := range volsInfo {
if isReservedOrInvalidBucket(volInfo.Name, true) { if isReservedOrInvalidBucket(volInfo.Name, true) {

View file

@ -23,6 +23,17 @@ import (
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
disks := er.getDisks()
// Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
if disks[i-1] != nil && disks[i-1].IsLocal() {
newDisks = append(newDisks, disks[i-1])
}
}
return newDisks
}
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
disks := er.getDisks() disks := er.getDisks()

View file

@ -81,7 +81,8 @@ func (er erasureObjects) cleanupStaleMultipartUploads(ctx context.Context, clean
return return
case <-ticker.C: case <-ticker.C:
var disk StorageAPI var disk StorageAPI
for _, d := range er.getLoadBalancedDisks() { // run multiple cleanup's local to this server.
for _, d := range er.getLoadBalancedLocalDisks() {
if d != nil { if d != nil {
disk = d disk = d
break break

View file

@ -75,7 +75,8 @@ type erasureSets struct {
endpointStrings []string endpointStrings []string
// Total number of sets and the number of disks per set. // Total number of sets and the number of disks per set.
setCount, drivesPerSet int setCount, setDriveCount int
listTolerancePerSet int
disksConnectEvent chan diskConnectInfo disksConnectEvent chan diskConnectInfo
@ -112,7 +113,7 @@ func (s *erasureSets) getDiskMap() map[string]StorageAPI {
defer s.erasureDisksMu.RUnlock() defer s.erasureDisksMu.RUnlock()
for i := 0; i < s.setCount; i++ { for i := 0; i < s.setCount; i++ {
for j := 0; j < s.drivesPerSet; j++ { for j := 0; j < s.setDriveCount; j++ {
disk := s.erasureDisks[i][j] disk := s.erasureDisks[i][j]
if disk == nil { if disk == nil {
continue continue
@ -228,7 +229,7 @@ func (s *erasureSets) connectDisks() {
s.erasureDisks[setIndex][diskIndex].Close() s.erasureDisks[setIndex][diskIndex].Close()
} }
s.erasureDisks[setIndex][diskIndex] = disk s.erasureDisks[setIndex][diskIndex] = disk
s.endpointStrings[setIndex*s.drivesPerSet+diskIndex] = disk.String() s.endpointStrings[setIndex*s.setDriveCount+diskIndex] = disk.String()
s.erasureDisksMu.Unlock() s.erasureDisksMu.Unlock()
go func(setIndex int) { go func(setIndex int) {
// Send a new disk connect event with a timeout // Send a new disk connect event with a timeout
@ -260,7 +261,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt
func (s *erasureSets) GetLockers(setIndex int) func() []dsync.NetLocker { func (s *erasureSets) GetLockers(setIndex int) func() []dsync.NetLocker {
return func() []dsync.NetLocker { return func() []dsync.NetLocker {
lockers := make([]dsync.NetLocker, s.drivesPerSet) lockers := make([]dsync.NetLocker, s.setDriveCount)
copy(lockers, s.erasureLockers[setIndex]) copy(lockers, s.erasureLockers[setIndex])
return lockers return lockers
} }
@ -271,9 +272,9 @@ func (s *erasureSets) GetEndpoints(setIndex int) func() []string {
s.erasureDisksMu.RLock() s.erasureDisksMu.RLock()
defer s.erasureDisksMu.RUnlock() defer s.erasureDisksMu.RUnlock()
eps := make([]string, s.drivesPerSet) eps := make([]string, s.setDriveCount)
for i := 0; i < s.drivesPerSet; i++ { for i := 0; i < s.setDriveCount; i++ {
eps[i] = s.endpointStrings[setIndex*s.drivesPerSet+i] eps[i] = s.endpointStrings[setIndex*s.setDriveCount+i]
} }
return eps return eps
} }
@ -284,7 +285,7 @@ func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI {
return func() []StorageAPI { return func() []StorageAPI {
s.erasureDisksMu.RLock() s.erasureDisksMu.RLock()
defer s.erasureDisksMu.RUnlock() defer s.erasureDisksMu.RUnlock()
disks := make([]StorageAPI, s.drivesPerSet) disks := make([]StorageAPI, s.setDriveCount)
copy(disks, s.erasureDisks[setIndex]) copy(disks, s.erasureDisks[setIndex])
return disks return disks
} }
@ -295,46 +296,47 @@ const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs
// Initialize new set of erasure coded sets. // Initialize new set of erasure coded sets.
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3) (*erasureSets, error) { func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3) (*erasureSets, error) {
setCount := len(format.Erasure.Sets) setCount := len(format.Erasure.Sets)
drivesPerSet := len(format.Erasure.Sets[0]) setDriveCount := len(format.Erasure.Sets[0])
endpointStrings := make([]string, len(endpoints)) endpointStrings := make([]string, len(endpoints))
// Initialize the erasure sets instance. // Initialize the erasure sets instance.
s := &erasureSets{ s := &erasureSets{
sets: make([]*erasureObjects, setCount), sets: make([]*erasureObjects, setCount),
erasureDisks: make([][]StorageAPI, setCount), erasureDisks: make([][]StorageAPI, setCount),
erasureLockers: make([][]dsync.NetLocker, setCount), erasureLockers: make([][]dsync.NetLocker, setCount),
endpoints: endpoints, endpoints: endpoints,
endpointStrings: endpointStrings, endpointStrings: endpointStrings,
setCount: setCount, setCount: setCount,
drivesPerSet: drivesPerSet, setDriveCount: setDriveCount,
format: format, listTolerancePerSet: setDriveCount / 2,
disksConnectEvent: make(chan diskConnectInfo), format: format,
disksConnectDoneCh: make(chan struct{}), disksConnectEvent: make(chan diskConnectInfo),
distributionAlgo: format.Erasure.DistributionAlgo, disksConnectDoneCh: make(chan struct{}),
deploymentID: uuid.MustParse(format.ID), distributionAlgo: format.Erasure.DistributionAlgo,
pool: NewMergeWalkPool(globalMergeLookupTimeout), deploymentID: uuid.MustParse(format.ID),
poolSplunk: NewMergeWalkPool(globalMergeLookupTimeout), pool: NewMergeWalkPool(globalMergeLookupTimeout),
poolVersions: NewMergeWalkVersionsPool(globalMergeLookupTimeout), poolSplunk: NewMergeWalkPool(globalMergeLookupTimeout),
mrfOperations: make(map[healSource]int), poolVersions: NewMergeWalkVersionsPool(globalMergeLookupTimeout),
mrfOperations: make(map[healSource]int),
} }
mutex := newNSLock(globalIsDistErasure) mutex := newNSLock(globalIsDistErasure)
// Initialize byte pool once for all sets, bpool size is set to // Initialize byte pool once for all sets, bpool size is set to
// setCount * drivesPerSet with each memory upto blockSizeV1. // setCount * setDriveCount with each memory upto blockSizeV1.
bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2) bp := bpool.NewBytePoolCap(setCount*setDriveCount, blockSizeV1, blockSizeV1*2)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
s.erasureDisks[i] = make([]StorageAPI, drivesPerSet) s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
s.erasureLockers[i] = make([]dsync.NetLocker, drivesPerSet) s.erasureLockers[i] = make([]dsync.NetLocker, setDriveCount)
} }
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
endpoint := endpoints[i*drivesPerSet+j] endpoint := endpoints[i*setDriveCount+j]
// Rely on endpoints list to initialize, init lockers and available disks. // Rely on endpoints list to initialize, init lockers and available disks.
s.erasureLockers[i][j] = newLockAPI(endpoint) s.erasureLockers[i][j] = newLockAPI(endpoint)
disk := storageDisks[i*drivesPerSet+j] disk := storageDisks[i*setDriveCount+j]
if disk == nil { if disk == nil {
continue continue
} }
@ -348,7 +350,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
disk.Close() disk.Close()
continue continue
} }
s.endpointStrings[m*drivesPerSet+n] = disk.String() s.endpointStrings[m*setDriveCount+n] = disk.String()
s.erasureDisks[m][n] = disk s.erasureDisks[m][n] = disk
} }
@ -384,7 +386,7 @@ func (s *erasureSets) NewNSLock(ctx context.Context, bucket string, objects ...s
// SetDriveCount returns the current drives per set. // SetDriveCount returns the current drives per set.
func (s *erasureSets) SetDriveCount() int { func (s *erasureSets) SetDriveCount() int {
return s.drivesPerSet return s.setDriveCount
} }
// StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets. // StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets.
@ -458,13 +460,13 @@ func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo,
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD) scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity == 0 { if scParity == 0 {
scParity = s.drivesPerSet / 2 scParity = s.setDriveCount / 2
} }
storageInfo.Backend.StandardSCData = s.drivesPerSet - scParity storageInfo.Backend.StandardSCData = s.setDriveCount - scParity
storageInfo.Backend.StandardSCParity = scParity storageInfo.Backend.StandardSCParity = scParity
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS) rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
storageInfo.Backend.RRSCData = s.drivesPerSet - rrSCParity storageInfo.Backend.RRSCData = s.setDriveCount - rrSCParity
storageInfo.Backend.RRSCParity = rrSCParity storageInfo.Backend.RRSCParity = rrSCParity
if local { if local {
@ -838,17 +840,9 @@ func (f *FileInfoCh) Push(fi FileInfo) {
// if the caller wishes to list N entries to call lexicallySortedEntry // if the caller wishes to list N entries to call lexicallySortedEntry
// N times until this boolean is 'false'. // N times until this boolean is 'false'.
func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) (FileInfo, int, bool) { func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) (FileInfo, int, bool) {
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j entries[j], entriesValid[j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
entries[j], entriesValid[j] = entryChs[j].Pop()
}()
} }
wg.Wait()
var isTruncated = false var isTruncated = false
for _, valid := range entriesValid { for _, valid := range entriesValid {
@ -910,17 +904,9 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali
// if the caller wishes to list N entries to call lexicallySortedEntry // if the caller wishes to list N entries to call lexicallySortedEntry
// N times until this boolean is 'false'. // N times until this boolean is 'false'.
func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) { func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) {
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j entries[j], entriesValid[j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
entries[j], entriesValid[j] = entryChs[j].Pop()
}()
} }
wg.Wait()
var isTruncated = false var isTruncated = false
for _, valid := range entriesValid { for _, valid := range entriesValid {
@ -1232,7 +1218,7 @@ func (s *erasureSets) ReloadFormat(ctx context.Context, dryRun bool) (err error)
}(storageDisks) }(storageDisks)
formats, _ := loadFormatErasureAll(storageDisks, false) formats, _ := loadFormatErasureAll(storageDisks, false)
if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { if err = checkFormatErasureValues(formats, s.setDriveCount); err != nil {
return err return err
} }
@ -1272,7 +1258,7 @@ func (s *erasureSets) ReloadFormat(ctx context.Context, dryRun bool) (err error)
s.erasureDisks[m][n].Close() s.erasureDisks[m][n].Close()
} }
s.endpointStrings[m*s.drivesPerSet+n] = disk.String() s.endpointStrings[m*s.setDriveCount+n] = disk.String()
s.erasureDisks[m][n] = disk s.erasureDisks[m][n] = disk
} }
s.erasureDisksMu.Unlock() s.erasureDisksMu.Unlock()
@ -1354,7 +1340,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
}(storageDisks) }(storageDisks)
formats, sErrs := loadFormatErasureAll(storageDisks, true) formats, sErrs := loadFormatErasureAll(storageDisks, true)
if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { if err = checkFormatErasureValues(formats, s.setDriveCount); err != nil {
return madmin.HealResultItem{}, err return madmin.HealResultItem{}, err
} }
@ -1365,7 +1351,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
res = madmin.HealResultItem{ res = madmin.HealResultItem{
Type: madmin.HealItemMetadata, Type: madmin.HealItemMetadata,
Detail: "disk-format", Detail: "disk-format",
DiskCount: s.setCount * s.drivesPerSet, DiskCount: s.setCount * s.setDriveCount,
SetCount: s.setCount, SetCount: s.setCount,
} }
@ -1396,7 +1382,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
markUUIDsOffline(refFormat, formats) markUUIDsOffline(refFormat, formats)
// Initialize a new set of set formats which will be written to disk. // Initialize a new set of set formats which will be written to disk.
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.drivesPerSet, formats, sErrs) newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs)
// Look for all offline/unformatted disks in our reference format, // Look for all offline/unformatted disks in our reference format,
// such that we can fill them up with new UUIDs, this looping also // such that we can fill them up with new UUIDs, this looping also
@ -1413,7 +1399,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
newFormatSets[i][l].Erasure.This = mustGetUUID() newFormatSets[i][l].Erasure.This = mustGetUUID()
refFormat.Erasure.Sets[i][j] = newFormatSets[i][l].Erasure.This refFormat.Erasure.Sets[i][j] = newFormatSets[i][l].Erasure.This
for m, v := range res.After.Drives { for m, v := range res.After.Drives {
if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) { if v.Endpoint == s.endpoints.GetString(i*s.setDriveCount+l) {
res.After.Drives[m].UUID = newFormatSets[i][l].Erasure.This res.After.Drives[m].UUID = newFormatSets[i][l].Erasure.This
res.After.Drives[m].State = madmin.DriveStateOk res.After.Drives[m].State = madmin.DriveStateOk
} }
@ -1426,14 +1412,14 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
} }
if !dryRun { if !dryRun {
var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.drivesPerSet) var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.setDriveCount)
for i := range newFormatSets { for i := range newFormatSets {
for j := range newFormatSets[i] { for j := range newFormatSets[i] {
if newFormatSets[i][j] == nil { if newFormatSets[i][j] == nil {
continue continue
} }
tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j] tmpNewFormats[i*s.setDriveCount+j] = newFormatSets[i][j]
tmpNewFormats[i*s.drivesPerSet+j].Erasure.Sets = refFormat.Erasure.Sets tmpNewFormats[i*s.setDriveCount+j].Erasure.Sets = refFormat.Erasure.Sets
} }
} }
@ -1478,7 +1464,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
s.erasureDisks[m][n].Close() s.erasureDisks[m][n].Close()
} }
s.endpointStrings[m*s.drivesPerSet+n] = disk.String() s.endpointStrings[m*s.setDriveCount+n] = disk.String()
s.erasureDisks[m][n] = disk s.erasureDisks[m][n] = disk
} }
s.erasureDisksMu.Unlock() s.erasureDisksMu.Unlock()
@ -1496,7 +1482,7 @@ func (s *erasureSets) HealBucket(ctx context.Context, bucket string, dryRun, rem
result = madmin.HealResultItem{ result = madmin.HealResultItem{
Type: madmin.HealItemBucket, Type: madmin.HealItemBucket,
Bucket: bucket, Bucket: bucket,
DiskCount: s.setCount * s.drivesPerSet, DiskCount: s.setCount * s.setDriveCount,
SetCount: s.setCount, SetCount: s.setCount,
} }
@ -1512,7 +1498,7 @@ func (s *erasureSets) HealBucket(ctx context.Context, bucket string, dryRun, rem
// Check if we had quorum to write, if not return an appropriate error. // Check if we had quorum to write, if not return an appropriate error.
_, afterDriveOnline := result.GetOnlineCounts() _, afterDriveOnline := result.GetOnlineCounts()
if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 { if afterDriveOnline < ((s.setCount*s.setDriveCount)/2)+1 {
return result, toObjectErr(errErasureWriteQuorum, bucket) return result, toObjectErr(errErasureWriteQuorum, bucket)
} }
@ -1568,7 +1554,7 @@ func (s *erasureSets) Walk(ctx context.Context, bucket, prefix string, results c
return return
} }
if quorumCount >= s.drivesPerSet/2 { if quorumCount >= s.setDriveCount/2 {
// Read quorum exists proceed // Read quorum exists proceed
for _, version := range entry.Versions { for _, version := range entry.Versions {
results <- version.ToObjectInfo(bucket, version.Name) results <- version.ToObjectInfo(bucket, version.Name)
@ -1595,7 +1581,7 @@ func (s *erasureSets) Walk(ctx context.Context, bucket, prefix string, results c
return return
} }
if quorumCount >= s.drivesPerSet/2 { if quorumCount >= s.setDriveCount/2 {
// Read quorum exists proceed // Read quorum exists proceed
results <- entry.ToObjectInfo(bucket, entry.Name) results <- entry.ToObjectInfo(bucket, entry.Name)
} }
@ -1622,14 +1608,14 @@ func (s *erasureSets) HealObjects(ctx context.Context, bucket, prefix string, op
break break
} }
if quorumCount == s.drivesPerSet && opts.ScanMode == madmin.HealNormalScan { if quorumCount == s.setDriveCount && opts.ScanMode == madmin.HealNormalScan {
// Skip good entries. // Skip good entries.
continue continue
} }
for _, version := range entry.Versions { for _, version := range entry.Versions {
// Wait and proceed if there are active requests // Wait and proceed if there are active requests
waitForLowHTTPReq(int32(s.drivesPerSet), time.Second) waitForLowHTTPReq(int32(s.setDriveCount), time.Second)
if err := healObject(bucket, version.Name, version.VersionID); err != nil { if err := healObject(bucket, version.Name, version.VersionID); err != nil {
return toObjectErr(err, bucket, version.Name) return toObjectErr(err, bucket, version.Name)

View file

@ -690,15 +690,15 @@ func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, contin
func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
var zonesEntryChs [][]FileInfoCh var zonesEntryChs [][]FileInfoCh
var zonesDrivesPerSet []int var zonesListTolerancePerSet []int
endWalkCh := make(chan struct{}) endWalkCh := make(chan struct{})
defer close(endWalkCh) defer close(endWalkCh)
for _, zone := range z.zones { for _, zone := range z.zones {
zonesEntryChs = append(zonesEntryChs, zonesEntryChs = append(zonesEntryChs,
zone.startMergeWalksN(ctx, bucket, prefix, "", true, endWalkCh, zone.drivesPerSet)) zone.startMergeWalksN(ctx, bucket, prefix, "", true, endWalkCh, zone.listTolerancePerSet))
zonesDrivesPerSet = append(zonesDrivesPerSet, zone.drivesPerSet) zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
} }
var objInfos []ObjectInfo var objInfos []ObjectInfo
@ -723,7 +723,7 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix,
break break
} }
if quorumCount < zonesDrivesPerSet[zoneIndex]/2 { if quorumCount < zonesListTolerancePerSet[zoneIndex] {
// Skip entries which are not found on upto ndisks/2. // Skip entries which are not found on upto ndisks/2.
continue continue
} }
@ -810,20 +810,20 @@ func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, ma
var zonesEntryChs [][]FileInfoCh var zonesEntryChs [][]FileInfoCh
var zonesEndWalkCh []chan struct{} var zonesEndWalkCh []chan struct{}
var drivesPerSets []int var zonesListTolerancePerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix}) entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix})
if entryChs == nil { if entryChs == nil {
endWalkCh = make(chan struct{}) endWalkCh = make(chan struct{})
entryChs = zone.startSplunkMergeWalksN(ctx, bucket, prefix, marker, endWalkCh, zone.drivesPerSet) entryChs = zone.startSplunkMergeWalksN(ctx, bucket, prefix, marker, endWalkCh, zone.listTolerancePerSet)
} }
zonesEntryChs = append(zonesEntryChs, entryChs) zonesEntryChs = append(zonesEntryChs, entryChs)
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
drivesPerSets = append(drivesPerSets, zone.drivesPerSet) zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
} }
entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, drivesPerSets) entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet)
if len(entries.Files) == 0 { if len(entries.Files) == 0 {
return loi, nil return loi, nil
} }
@ -902,20 +902,20 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker,
var zonesEntryChs [][]FileInfoCh var zonesEntryChs [][]FileInfoCh
var zonesEndWalkCh []chan struct{} var zonesEndWalkCh []chan struct{}
var drivesPerSets []int var zonesListTolerancePerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix}) entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix})
if entryChs == nil { if entryChs == nil {
endWalkCh = make(chan struct{}) endWalkCh = make(chan struct{})
entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.drivesPerSet) entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet)
} }
zonesEntryChs = append(zonesEntryChs, entryChs) zonesEntryChs = append(zonesEntryChs, entryChs)
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
drivesPerSets = append(drivesPerSets, zone.drivesPerSet) zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
} }
entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, drivesPerSets) entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet)
if len(entries.Files) == 0 { if len(entries.Files) == 0 {
return loi, nil return loi, nil
} }
@ -951,18 +951,9 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker,
// N times until this boolean is 'false'. // N times until this boolean is 'false'.
func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) (FileInfo, int, int, bool) { func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) (FileInfo, int, int, bool) {
for i, entryChs := range zoneEntryChs { for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
} }
wg.Wait()
} }
var isTruncated = false var isTruncated = false
@ -1040,18 +1031,9 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
// N times until this boolean is 'false'. // N times until this boolean is 'false'.
func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) (FileInfoVersions, int, int, bool) { func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) (FileInfoVersions, int, int, bool) {
for i, entryChs := range zoneEntryChs { for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
} }
wg.Wait()
} }
var isTruncated = false var isTruncated = false
@ -1119,7 +1101,7 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
} }
// mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys. // mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys.
func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, drivesPerSets []int) (entries FilesInfoVersions) { func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfoVersions) {
var i = 0 var i = 0
var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesInfos [][]FileInfoVersions
var zonesEntriesValid [][]bool var zonesEntriesValid [][]bool
@ -1134,8 +1116,8 @@ func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys i
break break
} }
if quorumCount < drivesPerSets[zoneIndex]/2 { if quorumCount < zonesListTolerancePerSet[zoneIndex] {
// Skip entries which are not found on upto ndisks/2. // Skip entries which are not found upto the expected tolerance
continue continue
} }
@ -1150,7 +1132,7 @@ func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys i
} }
// mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys. // mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys.
func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, drivesPerSets []int) (entries FilesInfo) { func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfo) {
var i = 0 var i = 0
var zonesEntriesInfos [][]FileInfo var zonesEntriesInfos [][]FileInfo
var zonesEntriesValid [][]bool var zonesEntriesValid [][]bool
@ -1165,8 +1147,8 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, drivesPerSet
break break
} }
if quorumCount < drivesPerSets[zoneIndex]/2 { if quorumCount < zonesListTolerancePerSet[zoneIndex] {
// Skip entries which are not found on upto ndisks/2. // Skip entries which are not found upto configured tolerance.
continue continue
} }
@ -1182,18 +1164,9 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, drivesPerSet
func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) bool { func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) bool {
for i, entryChs := range zoneEntryChs { for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
} }
wg.Wait()
} }
var isTruncated = false var isTruncated = false
@ -1214,24 +1187,16 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon
zoneEntryChs[i][j].Push(zoneEntries[i][j]) zoneEntryChs[i][j].Push(zoneEntries[i][j])
} }
} }
} }
return isTruncated return isTruncated
} }
func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool { func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool {
for i, entryChs := range zoneEntryChs { for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs { for j := range entryChs {
j := j zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
} }
wg.Wait()
} }
var isTruncated = false var isTruncated = false
@ -1307,19 +1272,19 @@ func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, m
var zonesEntryChs [][]FileInfoVersionsCh var zonesEntryChs [][]FileInfoVersionsCh
var zonesEndWalkCh []chan struct{} var zonesEndWalkCh []chan struct{}
var drivesPerSets []int var zonesListTolerancePerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix}) entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix})
if entryChs == nil { if entryChs == nil {
endWalkCh = make(chan struct{}) endWalkCh = make(chan struct{})
entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.drivesPerSet) entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet)
} }
zonesEntryChs = append(zonesEntryChs, entryChs) zonesEntryChs = append(zonesEntryChs, entryChs)
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
drivesPerSets = append(drivesPerSets, zone.drivesPerSet) zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
} }
entries := mergeZonesEntriesVersionsCh(zonesEntryChs, maxKeys, drivesPerSets) entries := mergeZonesEntriesVersionsCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet)
if len(entries.FilesVersions) == 0 { if len(entries.FilesVersions) == 0 {
return loi, nil return loi, nil
} }
@ -1830,7 +1795,7 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results
var zoneDrivesPerSet []int var zoneDrivesPerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
} }
var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesInfos [][]FileInfoVersions
@ -1871,7 +1836,7 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results
var zoneDrivesPerSet []int var zoneDrivesPerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
} }
var zonesEntriesInfos [][]FileInfo var zonesEntriesInfos [][]FileInfo
@ -1918,7 +1883,7 @@ func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, o
var zoneDrivesPerSet []int var zoneDrivesPerSet []int
for _, zone := range z.zones { for _, zone := range z.zones {
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
} }
var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesInfos [][]FileInfoVersions
@ -2082,7 +2047,7 @@ func (z *erasureZones) Health(ctx context.Context, opts HealthOptions) HealthRes
for zoneIdx := range erasureSetUpCount { for zoneIdx := range erasureSetUpCount {
parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD) parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD)
diskCount := z.zones[zoneIdx].drivesPerSet diskCount := z.zones[zoneIdx].setDriveCount
if parityDrives == 0 { if parityDrives == 0 {
parityDrives = getDefaultParityBlocks(diskCount) parityDrives = getDefaultParityBlocks(diskCount)
} }

View file

@ -439,7 +439,7 @@ func checkFormatErasureValue(formatErasure *formatErasureV3) error {
} }
// Check all format values. // Check all format values.
func checkFormatErasureValues(formats []*formatErasureV3, drivesPerSet int) error { func checkFormatErasureValues(formats []*formatErasureV3, setDriveCount int) error {
for i, formatErasure := range formats { for i, formatErasure := range formats {
if formatErasure == nil { if formatErasure == nil {
continue continue
@ -454,8 +454,8 @@ func checkFormatErasureValues(formats []*formatErasureV3, drivesPerSet int) erro
// Only if custom erasure drive count is set, // Only if custom erasure drive count is set,
// we should fail here other proceed to honor what // we should fail here other proceed to honor what
// is present on the disk. // is present on the disk.
if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != drivesPerSet { if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != setDriveCount {
return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), drivesPerSet) return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), setDriveCount)
} }
} }
return nil return nil
@ -788,22 +788,22 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
} }
// initFormatErasure - save Erasure format configuration on all disks. // initFormatErasure - save Erasure format configuration on all disks.
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string, sErrs []error) (*formatErasureV3, error) { func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID string, sErrs []error) (*formatErasureV3, error) {
format := newFormatErasureV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, setDriveCount)
formats := make([]*formatErasureV3, len(storageDisks)) formats := make([]*formatErasureV3, len(storageDisks))
wantAtMost := ecDrivesNoConfig(drivesPerSet) wantAtMost := ecDrivesNoConfig(setDriveCount)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
hostCount := make(map[string]int, drivesPerSet) hostCount := make(map[string]int, setDriveCount)
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
disk := storageDisks[i*drivesPerSet+j] disk := storageDisks[i*setDriveCount+j]
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
if deploymentID != "" { if deploymentID != "" {
newFormat.ID = deploymentID newFormat.ID = deploymentID
} }
hostCount[disk.Hostname()]++ hostCount[disk.Hostname()]++
formats[i*drivesPerSet+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
if len(hostCount) > 0 { if len(hostCount) > 0 {
var once sync.Once var once sync.Once
@ -817,8 +817,8 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return return
} }
logger.Info(" * Set %v:", i+1) logger.Info(" * Set %v:", i+1)
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
disk := storageDisks[i*drivesPerSet+j] disk := storageDisks[i*setDriveCount+j]
logger.Info(" - Drive: %s", disk.String()) logger.Info(" - Drive: %s", disk.String())
} }
}) })
@ -842,15 +842,15 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
// ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set. // ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set.
// It will attempt to read it from env variable and fall back to drives/2. // It will attempt to read it from env variable and fall back to drives/2.
func ecDrivesNoConfig(drivesPerSet int) int { func ecDrivesNoConfig(setDriveCount int) int {
ecDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD) ecDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if ecDrives == 0 { if ecDrives == 0 {
cfg, err := storageclass.LookupConfig(nil, drivesPerSet) cfg, err := storageclass.LookupConfig(nil, setDriveCount)
if err == nil { if err == nil {
ecDrives = cfg.Standard.Parity ecDrives = cfg.Standard.Parity
} }
if ecDrives == 0 { if ecDrives == 0 {
ecDrives = drivesPerSet / 2 ecDrives = setDriveCount / 2
} }
} }
return ecDrives return ecDrives
@ -920,14 +920,14 @@ func markUUIDsOffline(refFormat *formatErasureV3, formats []*formatErasureV3) {
} }
// Initialize a new set of set formats which will be written to all disks. // Initialize a new set of set formats which will be written to all disks.
func newHealFormatSets(refFormat *formatErasureV3, setCount, drivesPerSet int, formats []*formatErasureV3, errs []error) [][]*formatErasureV3 { func newHealFormatSets(refFormat *formatErasureV3, setCount, setDriveCount int, formats []*formatErasureV3, errs []error) [][]*formatErasureV3 {
newFormats := make([][]*formatErasureV3, setCount) newFormats := make([][]*formatErasureV3, setCount)
for i := range refFormat.Erasure.Sets { for i := range refFormat.Erasure.Sets {
newFormats[i] = make([]*formatErasureV3, drivesPerSet) newFormats[i] = make([]*formatErasureV3, setDriveCount)
} }
for i := range refFormat.Erasure.Sets { for i := range refFormat.Erasure.Sets {
for j := range refFormat.Erasure.Sets[i] { for j := range refFormat.Erasure.Sets[i] {
if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil { if errs[i*setDriveCount+j] == errUnformattedDisk || errs[i*setDriveCount+j] == nil {
newFormats[i][j] = &formatErasureV3{} newFormats[i][j] = &formatErasureV3{}
newFormats[i][j].Version = refFormat.Version newFormats[i][j].Version = refFormat.Version
newFormats[i][j].ID = refFormat.ID newFormats[i][j].ID = refFormat.ID
@ -935,13 +935,13 @@ func newHealFormatSets(refFormat *formatErasureV3, setCount, drivesPerSet int, f
newFormats[i][j].Erasure.Version = refFormat.Erasure.Version newFormats[i][j].Erasure.Version = refFormat.Erasure.Version
newFormats[i][j].Erasure.DistributionAlgo = refFormat.Erasure.DistributionAlgo newFormats[i][j].Erasure.DistributionAlgo = refFormat.Erasure.DistributionAlgo
} }
if errs[i*drivesPerSet+j] == errUnformattedDisk { if errs[i*setDriveCount+j] == errUnformattedDisk {
newFormats[i][j].Erasure.This = "" newFormats[i][j].Erasure.This = ""
newFormats[i][j].Erasure.Sets = nil newFormats[i][j].Erasure.Sets = nil
continue continue
} }
if errs[i*drivesPerSet+j] == nil { if errs[i*setDriveCount+j] == nil {
newFormats[i][j].Erasure.This = formats[i*drivesPerSet+j].Erasure.This newFormats[i][j].Erasure.This = formats[i*setDriveCount+j].Erasure.This
newFormats[i][j].Erasure.Sets = nil newFormats[i][j].Erasure.Sets = nil
} }
} }

View file

@ -324,16 +324,16 @@ func TestCheckFormatErasureValue(t *testing.T) {
// Tests getFormatErasureInQuorum() // Tests getFormatErasureInQuorum()
func TestGetFormatErasureInQuorumCheck(t *testing.T) { func TestGetFormatErasureInQuorumCheck(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 16 setDriveCount := 16
format := newFormatErasureV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, setDriveCount)
formats := make([]*formatErasureV3, 32) formats := make([]*formatErasureV3, 32)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
} }
@ -390,16 +390,16 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) {
// Tests formatErasureGetDeploymentID() // Tests formatErasureGetDeploymentID()
func TestGetErasureID(t *testing.T) { func TestGetErasureID(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 8 setDriveCount := 8
format := newFormatErasureV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, setDriveCount)
formats := make([]*formatErasureV3, 16) formats := make([]*formatErasureV3, 16)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
} }
@ -445,17 +445,17 @@ func TestGetErasureID(t *testing.T) {
// Initialize new format sets. // Initialize new format sets.
func TestNewFormatSets(t *testing.T) { func TestNewFormatSets(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 16 setDriveCount := 16
format := newFormatErasureV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, setDriveCount)
formats := make([]*formatErasureV3, 32) formats := make([]*formatErasureV3, 32)
errs := make([]error, 32) errs := make([]error, 32)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
} }
@ -467,7 +467,7 @@ func TestNewFormatSets(t *testing.T) {
// 16th disk is unformatted. // 16th disk is unformatted.
errs[15] = errUnformattedDisk errs[15] = errUnformattedDisk
newFormats := newHealFormatSets(quorumFormat, setCount, drivesPerSet, formats, errs) newFormats := newHealFormatSets(quorumFormat, setCount, setDriveCount, formats, errs)
if newFormats == nil { if newFormats == nil {
t.Fatal("Unexpected failure") t.Fatal("Unexpected failure")
} }

View file

@ -89,7 +89,7 @@ func getLocalBackgroundHealStatus() (madmin.BgHealState, bool) {
} }
// healErasureSet lists and heals all objects in a specific erasure set // healErasureSet lists and heals all objects in a specific erasure set
func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, drivesPerSet int) error { func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, setDriveCount int) error {
buckets, err := xlObj.ListBuckets(ctx) buckets, err := xlObj.ListBuckets(ctx)
if err != nil { if err != nil {
return err return err
@ -151,7 +151,7 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, dr
break break
} }
if quorumCount == drivesPerSet { if quorumCount == setDriveCount {
// Skip good entries. // Skip good entries.
continue continue
} }

View file

@ -230,7 +230,7 @@ func IsServerResolvable(endpoint Endpoint) error {
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct // connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
// and are in quorum, if no formats are found attempt to initialize all of them for the first // and are in quorum, if no formats are found attempt to initialize all of them for the first
// time. additionally make sure to close all the disks used in this attempt. // time. additionally make sure to close all the disks used in this attempt.
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
// Initialize all storage disks // Initialize all storage disks
storageDisks, errs := initStorageDisksWithErrors(endpoints) storageDisks, errs := initStorageDisksWithErrors(endpoints)
@ -268,17 +268,17 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
// most part unless one of the formats is not consistent // most part unless one of the formats is not consistent
// with expected Erasure format. For example if a user is // with expected Erasure format. For example if a user is
// trying to pool FS backend into an Erasure set. // trying to pool FS backend into an Erasure set.
if err = checkFormatErasureValues(formatConfigs, drivesPerSet); err != nil { if err = checkFormatErasureValues(formatConfigs, setDriveCount); err != nil {
return nil, nil, err return nil, nil, err
} }
// All disks report unformatted we should initialized everyone. // All disks report unformatted we should initialized everyone.
if shouldInitErasureDisks(sErrs) && firstDisk { if shouldInitErasureDisks(sErrs) && firstDisk {
logger.Info("Formatting %s zone, %v set(s), %v drives per set.", logger.Info("Formatting %s zone, %v set(s), %v drives per set.",
humanize.Ordinal(zoneCount), setCount, drivesPerSet) humanize.Ordinal(zoneCount), setCount, setDriveCount)
// Initialize erasure code format on disks // Initialize erasure code format on disks
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, drivesPerSet, deploymentID, sErrs) format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, sErrs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -347,8 +347,8 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
} }
// Format disks before initialization of object layer. // Format disks before initialization of object layer.
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) { func waitForFormatErasure(firstDisk bool, endpoints Endpoints, zoneCount, setCount, setDriveCount int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) {
if len(endpoints) == 0 || setCount == 0 || drivesPerSet == 0 { if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
return nil, nil, errInvalidArgument return nil, nil, errInvalidArgument
} }
@ -374,7 +374,7 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, zoneCount, setCou
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, zoneCount, setCount, drivesPerSet, deploymentID) storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, zoneCount, setCount, setDriveCount, deploymentID)
if err != nil { if err != nil {
tries++ tries++
switch err { switch err {

View file

@ -842,11 +842,7 @@ func (s *xlStorage) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan
} }
walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, true, listDir, s.isLeafSplunk, s.isLeafDir, endWalkCh) walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, true, listDir, s.isLeafSplunk, s.isLeafDir, endWalkCh)
for { for walkResult := range walkResultCh {
walkResult, ok := <-walkResultCh
if !ok {
return
}
var fi FileInfo var fi FileInfo
if HasSuffix(walkResult.entry, SlashSeparator) { if HasSuffix(walkResult.entry, SlashSeparator) {
fi = FileInfo{ fi = FileInfo{