bug-fix: fix for tests failure when cache is disabled (#2439)

This commit is contained in:
karthic rao 2016-08-15 13:55:41 +05:30 committed by Harshavardhana
parent 5526ac13d2
commit a3592228f5
2 changed files with 48 additions and 32 deletions

View file

@ -43,8 +43,8 @@ func isSuccessDecodeBlocks(enBlocks [][]byte, dataBlocks int) bool {
} // else { // update parity block count. } // else { // update parity block count.
successParityBlocksCount++ successParityBlocksCount++
} }
// Returns true if we have atleast dataBlocks + 1 parity. // Returns true if we have atleast dataBlocks parity.
return successDataBlocksCount == dataBlocks || successDataBlocksCount+successParityBlocksCount >= dataBlocks+1 return successDataBlocksCount == dataBlocks || successDataBlocksCount+successParityBlocksCount >= dataBlocks
} }
// isSuccessDataBlocks - do we have all the data blocks? // isSuccessDataBlocks - do we have all the data blocks?
@ -86,7 +86,7 @@ func getReadDisks(orderedDisks []StorageAPI, index int, dataBlocks int) (readDis
if dataDisks == dataBlocks { if dataDisks == dataBlocks {
return nil, 0, errUnexpected return nil, 0, errUnexpected
} }
if dataDisks+parityDisks >= dataBlocks+1 { if dataDisks+parityDisks >= dataBlocks {
return nil, 0, errUnexpected return nil, 0, errUnexpected
} }
@ -103,8 +103,7 @@ func getReadDisks(orderedDisks []StorageAPI, index int, dataBlocks int) (readDis
readDisks[i] = orderedDisks[i] readDisks[i] = orderedDisks[i]
if dataDisks == dataBlocks { if dataDisks == dataBlocks {
return readDisks, i + 1, nil return readDisks, i + 1, nil
} } else if dataDisks+parityDisks == dataBlocks {
if dataDisks+parityDisks == dataBlocks+1 {
return readDisks, i + 1, nil return readDisks, i + 1, nil
} }
} }

View file

@ -37,57 +37,64 @@ func testGetReadDisks(t *testing.T, xl xlObjects) {
nextIndex int // return value from getReadDisks nextIndex int // return value from getReadDisks
err error // error return value from getReadDisks err error // error return value from getReadDisks
}{ }{
// Test case - 1.
// When all disks are available, should return data disks.
{ {
0, 0,
// When all disks are available, should return data disks.
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, []StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil}, []StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil},
8, 8,
nil, nil,
}, },
// Test case - 2.
// If a parity disk is down, should return all data disks.
{ {
0, 0,
// If a parity disk is down, should return all data disks.
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, d[10], d[11], d[12], d[13], d[14], d[15]}, []StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil}, []StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil},
8, 8,
nil, nil,
}, },
// Test case - 3.
// If a data disk is down, should return 7 data and 1 parity.
{ {
0, 0,
// If a data disk is down, should return 7 data and 2 parity.
[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, []StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], nil, nil, nil, nil, nil, nil}, []StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, nil, nil, nil, nil, nil, nil},
9,
nil,
},
// Test case - 4.
// If 7 data disks are down, should return 1 data and 7 parity.
{
0,
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], nil},
15,
nil,
},
// Test case - 5.
// When 2 disks fail during parallelRead, next call to getReadDisks should return 3 disks
{
8,
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, d[8], d[9], nil, nil, nil, nil, nil, nil},
10, 10,
nil, nil,
}, },
{ // Test case - 6.
0, // If 2 disks again fail from the 3 disks returned previously, return next 2 disks
// If 7 data disks are down, should return 1 data and 8 parity.
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
16,
nil,
},
{
8,
// When all disks are available, should return data disks.
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, d[8], d[9], d[10], nil, nil, nil, nil, nil},
11,
nil,
},
{ {
11, 11,
// When all disks are available, should return data disks.
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], d[11], d[12], d[13], d[14], d[15]}, []StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], d[11], d[12], d[13], d[14], d[15]},
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, d[11], d[12], nil, nil, nil}, []StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, d[11], nil, nil, nil, nil},
13, 12,
nil, nil,
}, },
// Test case - 7.
// No more disks are available for read, return error
{ {
13, 13,
// When all disks are available, should return data disks.
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], nil, nil, nil, nil, nil}, []StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], nil, nil, nil, nil, nil},
nil, nil,
0, 0,
@ -106,7 +113,7 @@ func testGetReadDisks(t *testing.T, xl xlObjects) {
continue continue
} }
if reflect.DeepEqual(test.retDisks, disks) == false { if reflect.DeepEqual(test.retDisks, disks) == false {
t.Errorf("test-case %d : incorrect disks returned. %v", i+1, disks) t.Errorf("test-case %d : incorrect disks returned. expected %+v, got %+v", i+1, test.retDisks, disks)
continue continue
} }
} }
@ -177,12 +184,21 @@ func TestIsSuccessBlocks(t *testing.T) {
true, true,
}, },
{ {
// Not enough disks for reedsolomon.Reconstruct() // When all data disks are not available, enough for reedsolomon.Reconstruct()
[][]byte{ [][]byte{
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
}, },
false, false,
true,
},
{
// Not enough disks for reedsolomon.Reconstruct()
[][]byte{
nil, nil, nil, nil, nil, nil, nil, nil,
nil, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
},
false,
false, false,
}, },
} }
@ -293,8 +309,9 @@ func TestErasureReadFileDiskFail(t *testing.T) {
t.Error("Contents of the erasure coded file differs") t.Error("Contents of the erasure coded file differs")
} }
// 1 more disk down. 7 disks down in total. Read should fail. // 2 more disk down. 8 disks down in total. Read should fail.
disks[12] = ReadDiskDown{disks[12].(*posix)} disks[12] = ReadDiskDown{disks[12].(*posix)}
disks[13] = ReadDiskDown{disks[13].(*posix)}
buf.Reset() buf.Reset()
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != errXLReadQuorum { if err != errXLReadQuorum {