performance: gjson parsing for readXLMeta, listParts, getObjectInfo. (#2631)

- Using gjson for constructing xlMetaV1{} in realXLMeta.
- Test for parsing constructing xlMetaV1{} using gjson.
- Changes made since benchmarks showed 30-40% improvement in speed.
- Follow up comments in issue https://github.com/minio/minio/issues/2208
  for more details.
- gjson parsing of parts from xl.json for listParts.
- gjson parsing of statInfo from xl.json for getObjectInfo.
- Vendorizing gjson dependency.
This commit is contained in:
Karthic Rao 2016-09-09 11:08:18 +05:30 committed by Harshavardhana
parent 66459a4ce0
commit 8bd78fbdfb
15 changed files with 2249 additions and 38 deletions

View file

@ -57,19 +57,19 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"Test", "", ObjectInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", ObjectInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", ObjectInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Test cases with valid but non-existing bucket names (Test number 5-7).
// Test cases with valid but non-existing bucket names (Test number 5-6).
{"abcdefgh", "abc", ObjectInfo{}, BucketNotFound{Bucket: "abcdefgh"}, false},
{"ijklmnop", "efg", ObjectInfo{}, BucketNotFound{Bucket: "ijklmnop"}, false},
// Test cases with valid but non-existing bucket names and invalid object name (Test number 8-9).
// Test cases with valid but non-existing bucket names and invalid object name (Test number 7-8).
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
// Test cases with non-existing object name with existing bucket (Test number 10-12).
// Test cases with non-existing object name with existing bucket (Test number 9-11).
{"test-getobjectinfo", "Africa", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Africa"}, false},
{"test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false},
{"test-getobjectinfo", "Asia/myfile", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/myfile"}, false},
// Test case with existing bucket but object name set to a directory (Test number 13).
// Test case with existing bucket but object name set to a directory (Test number 12).
{"test-getobjectinfo", "Asia", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia"}, false},
// Valid case with existing object (Test number 14).
// Valid case with existing object (Test number 13).
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
}
for i, testCase := range testCases {

View file

@ -136,9 +136,9 @@ func (m xlMetaV1) IsValid() bool {
return m.Version == "1.0.0" && m.Format == "xl"
}
// ObjectPartIndex - returns the index of matching object part number.
func (m xlMetaV1) ObjectPartIndex(partNumber int) int {
for i, part := range m.Parts {
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []objectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
@ -214,16 +214,15 @@ var objMetadataOpIgnoredErrs = []error{
errFileNotFound,
}
// readXLMetadata - returns the object metadata `xl.json` content from
// one of the disks picked at random.
func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err error) {
// readXLMetaParts - returns the XL Metadata Parts from xl.json of one of the disks picked at random.
func (xl xlObjects) readXLMetaParts(bucket, object string) (xlMetaParts []objectPartInfo, err error) {
for _, disk := range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
xlMeta, err = readXLMeta(disk, bucket, object)
xlMetaParts, err = readXLMetaParts(disk, bucket, object)
if err == nil {
return xlMeta, nil
return xlMetaParts, nil
}
// For any reason disk or bucket is not available continue
// and read from other disks.
@ -233,7 +232,29 @@ func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err
break
}
// Return error here.
return xlMetaV1{}, err
return nil, err
}
// readXLMetaStat - return xlMetaV1.Stat and xlMetaV1.Meta from one of the disks picked at random.
func (xl xlObjects) readXLMetaStat(bucket, object string) (xlStat statInfo, xlMeta map[string]string, err error) {
for _, disk := range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
// parses only xlMetaV1.Meta and xlMeta.Stat
xlStat, xlMeta, err = readXLMetaStat(disk, bucket, object)
if err == nil {
return xlStat, xlMeta, nil
}
// For any reason disk or bucket is not available continue
// and read from other disks.
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
}
break
}
// Return error here.
return statInfo{}, nil, err
}
// deleteXLMetadata - deletes `xl.json` on a single disk.

View file

@ -55,13 +55,14 @@ func TestAddObjectPart(t *testing.T) {
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+MiB))
}
if index := xlMeta.ObjectPartIndex(testCase.partNum); index != testCase.expectedIndex {
if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test xlMetaV1.ObjectPartIndex()
// Test objectPartIndex().
// generates a sample xlMeta data and asserts the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
@ -94,7 +95,7 @@ func TestObjectPartIndex(t *testing.T) {
// Test them.
for _, testCase := range testCases {
if index := xlMeta.ObjectPartIndex(testCase.partNum); index != testCase.expectedIndex {
if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}

View file

@ -512,7 +512,7 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
xlParts, err := xl.readXLMetaParts(minioMetaBucket, uploadIDPath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, uploadIDPath)
}
@ -524,7 +524,7 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
result.MaxParts = maxParts
// For empty number of parts or maxParts as zero, return right here.
if len(xlMeta.Parts) == 0 || maxParts == 0 {
if len(xlParts) == 0 || maxParts == 0 {
return result, nil
}
@ -534,10 +534,10 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
}
// Only parts with higher part numbers will be listed.
partIdx := xlMeta.ObjectPartIndex(partNumberMarker)
parts := xlMeta.Parts
partIdx := objectPartIndex(xlParts, partNumberMarker)
parts := xlParts
if partIdx != -1 {
parts = xlMeta.Parts[partIdx+1:]
parts = xlParts[partIdx+1:]
}
count := maxParts
for _, part := range parts {
@ -675,7 +675,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Validate each part and then commit to disk.
for i, part := range parts {
partIdx := currentXLMeta.ObjectPartIndex(part.PartNumber)
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
return "", traceError(InvalidPart{})
@ -779,7 +779,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Remove parts that weren't present in CompleteMultipartUpload request.
for _, curpart := range currentXLMeta.Parts {
if xlMeta.ObjectPartIndex(curpart.Number) == -1 {
if objectPartIndex(xlMeta.Parts, curpart.Number) == -1 {
// Delete the missing part files. e.g,
// Request 1: NewMultipart
// Request 2: PutObjectPart 1

View file

@ -375,22 +375,23 @@ func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
var xlMeta xlMetaV1
xlMeta, err = xl.readXLMetadata(bucket, object)
// returns xl meta map and stat info.
xlStat, xlMetaMap, err := xl.readXLMetaStat(bucket, object)
if err != nil {
// Return error.
return ObjectInfo{}, err
}
objInfo = ObjectInfo{
IsDir: false,
Bucket: bucket,
Name: object,
Size: xlMeta.Stat.Size,
ModTime: xlMeta.Stat.ModTime,
MD5Sum: xlMeta.Meta["md5Sum"],
ContentType: xlMeta.Meta["content-type"],
ContentEncoding: xlMeta.Meta["content-encoding"],
UserDefined: xlMeta.Meta,
Size: xlStat.Size,
ModTime: xlStat.ModTime,
MD5Sum: xlMetaMap["md5Sum"],
ContentType: xlMetaMap["content-type"],
ContentEncoding: xlMetaMap["content-encoding"],
UserDefined: xlMetaMap,
}
return objInfo, nil
}

View file

@ -17,10 +17,12 @@
package cmd
import (
"encoding/json"
"hash/crc32"
"path"
"sync"
"time"
"github.com/tidwall/gjson"
)
// Returns number of errors that occurred the most (incl. nil) and the
@ -99,19 +101,160 @@ func hashOrder(key string, cardinality int) []int {
return nums
}
func parseXLStat(xlMetaBuf []byte) (statInfo, error) {
// obtain stat info.
stat := statInfo{}
// fetching modTime.
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
if err != nil {
return statInfo{}, err
}
stat.ModTime = modTime
// obtain Stat.Size .
stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int()
return stat, nil
}
func parseXLVersion(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "version").String()
}
func parseXLFormat(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "format").String()
}
func parseXLRelease(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "minio.release").String()
}
func parseXLErasureInfo(xlMetaBuf []byte) erasureInfo {
erasure := erasureInfo{}
erasureResult := gjson.GetBytes(xlMetaBuf, "erasure")
// parse the xlV1Meta.Erasure.Distribution.
disResult := erasureResult.Get("distribution").Array()
distribution := make([]int, len(disResult))
for i, dis := range disResult {
distribution[i] = int(dis.Int())
}
erasure.Distribution = distribution
erasure.Algorithm = erasureResult.Get("algorithm").String()
erasure.DataBlocks = int(erasureResult.Get("data").Int())
erasure.ParityBlocks = int(erasureResult.Get("parity").Int())
erasure.BlockSize = erasureResult.Get("blockSize").Int()
erasure.Index = int(erasureResult.Get("index").Int())
// Pare xlMetaV1.Erasure.Checksum array.
checkSumsResult := erasureResult.Get("checksum").Array()
checkSums := make([]checkSumInfo, len(checkSumsResult))
for i, checkSumResult := range checkSumsResult {
checkSum := checkSumInfo{}
checkSum.Name = checkSumResult.Get("name").String()
checkSum.Algorithm = checkSumResult.Get("algorithm").String()
checkSum.Hash = checkSumResult.Get("hash").String()
checkSums[i] = checkSum
}
erasure.Checksum = checkSums
return erasure
}
func parseXLParts(xlMetaBuf []byte) []objectPartInfo {
// Parse the XL Parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
partInfo := make([]objectPartInfo, len(partsResult))
for i, p := range partsResult {
info := objectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
info.Size = p.Get("size").Int()
partInfo[i] = info
}
return partInfo
}
func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
// Get xlMetaV1.Meta map.
metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map()
metaMap := make(map[string]string)
for key, valResult := range metaMapResult {
metaMap[key] = valResult.String()
}
return metaMap
}
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMetaV1, error) {
xlMeta := xlMetaV1{}
// obtain version.
xlMeta.Version = parseXLVersion(xlMetaBuf)
// obtain format.
xlMeta.Format = parseXLFormat(xlMetaBuf)
// Parse xlMetaV1.Stat .
stat, err := parseXLStat(xlMetaBuf)
if err != nil {
return xlMetaV1{}, err
}
xlMeta.Stat = stat
// parse the xlV1Meta.Erasure fields.
xlMeta.Erasure = parseXLErasureInfo(xlMetaBuf)
// Parse the XL Parts.
xlMeta.Parts = parseXLParts(xlMetaBuf)
// Get the xlMetaV1.Realse field.
xlMeta.Minio.Release = parseXLRelease(xlMetaBuf)
// parse xlMetaV1.
xlMeta.Meta = parseXLMetaMap(xlMetaBuf)
return xlMeta, nil
}
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPartInfo, error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return nil, traceError(err)
}
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
xlMetaParts := parseXLParts(xlMetaBuf)
return xlMetaParts, nil
}
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, map[string]string, error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return statInfo{}, nil, traceError(err)
}
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
xlMetaMap := parseXLMetaMap(xlMetaBuf)
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf)
if err != nil {
return statInfo{}, nil, traceError(err)
}
// Return structured `xl.json`.
return xlStat, xlMetaMap, nil
}
// readXLMeta reads `xl.json` and returns back XL metadata structure.
func readXLMeta(disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {
// Reads entire `xl.json`.
buf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return xlMetaV1{}, traceError(err)
}
// Unmarshal xl metadata.
if err = json.Unmarshal(buf, &xlMeta); err != nil {
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
xlMeta, err = xlMetaV1UnmarshalJSON(xlMetaBuf)
if err != nil {
return xlMetaV1{}, traceError(err)
}
// Return structured `xl.json`.
return xlMeta, nil
}

View file

@ -17,8 +17,11 @@
package cmd
import (
"encoding/json"
"reflect"
"strconv"
"testing"
"time"
)
// Test for reduceErrs, reduceErr reduces collection
@ -93,3 +96,201 @@ func TestHashOrder(t *testing.T) {
t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder)
}
}
// newTestXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info and metadata.
func newTestXLMetaV1() xlMetaV1 {
xlMeta := xlMetaV1{}
xlMeta.Version = "1.0.0"
xlMeta.Format = "xl"
xlMeta.Minio.Release = "1.0.0"
xlMeta.Erasure = erasureInfo{
Algorithm: "klauspost/reedsolomon/vandermonde",
DataBlocks: 5,
ParityBlocks: 5,
BlockSize: 10485760,
Index: 10,
Distribution: []int{9, 10, 1, 2, 3, 4, 5, 6, 7, 8},
}
xlMeta.Stat = statInfo{
Size: int64(20),
ModTime: time.Now().UTC(),
}
// Set meta data.
xlMeta.Meta = make(map[string]string)
xlMeta.Meta["testKey1"] = "val1"
xlMeta.Meta["testKey2"] = "val2"
return xlMeta
}
func (m *xlMetaV1) AddTestObjectCheckSum(checkSumNum int, name string, hash string, algo string) {
checkSum := checkSumInfo{
Name: name,
Algorithm: algo,
Hash: hash,
}
m.Erasure.Checksum[checkSumNum] = checkSum
}
// AddTestObjectPart - add a new object part in order.
func (m *xlMetaV1) AddTestObjectPart(partNumber int, partName string, partETag string, partSize int64) {
partInfo := objectPartInfo{
Number: partNumber,
Name: partName,
ETag: partETag,
Size: partSize,
}
// Proceed to include new part info.
m.Parts[partNumber] = partInfo
}
// Constructs xlMetaV1{} for given number of parts and converts it into bytes.
func getXLMetaBytes(totalParts int) []byte {
xlSampleMeta := getSampleXLMeta(totalParts)
xlMetaBytes, err := json.Marshal(xlSampleMeta)
if err != nil {
panic(err)
}
return xlMetaBytes
}
// Returns sample xlMetaV1{} for number of parts.
func getSampleXLMeta(totalParts int) xlMetaV1 {
xlMeta := newTestXLMetaV1()
// Number of checksum info == total parts.
xlMeta.Erasure.Checksum = make([]checkSumInfo, totalParts)
// total number of parts.
xlMeta.Parts = make([]objectPartInfo, totalParts)
for i := 0; i < totalParts; i++ {
partName := "part." + strconv.Itoa(i+1)
// hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.json the magnitude doesn't affect the test,
// The magnitude doesn't make a difference, only the size does.
xlMeta.AddTestObjectCheckSum(i, partName, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a", "blake2b")
xlMeta.AddTestObjectPart(i, partName, "d3fdd79cc3efd5fe5c068d7be397934b", 67108864)
}
return xlMeta
}
// Compare the unmarshaled XLMetaV1 with the one obtained from gjson parsing.
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, gjsonXLMeta xlMetaV1) {
// Start comparing the fields of xlMetaV1 obtained from gjson parsing with one parsed using json unmarshaling.
if unMarshalXLMeta.Version != gjsonXLMeta.Version {
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, gjsonXLMeta.Version)
}
if unMarshalXLMeta.Format != gjsonXLMeta.Format {
t.Errorf("Expected the format to be \"%s\", but got \"%s\".", unMarshalXLMeta.Format, gjsonXLMeta.Format)
}
if unMarshalXLMeta.Stat.Size != gjsonXLMeta.Stat.Size {
t.Errorf("Expected the stat size to be %v, but got %v.", unMarshalXLMeta.Stat.Size, gjsonXLMeta.Stat.Size)
}
if unMarshalXLMeta.Stat.ModTime != gjsonXLMeta.Stat.ModTime {
t.Errorf("Expected the modTime to be \"%v\", but got \"%v\".", unMarshalXLMeta.Stat.ModTime, gjsonXLMeta.Stat.ModTime)
}
if unMarshalXLMeta.Erasure.Algorithm != gjsonXLMeta.Erasure.Algorithm {
t.Errorf("Expected the erasure algorithm to be \"%v\", but got \"%v\".", unMarshalXLMeta.Erasure.Algorithm, gjsonXLMeta.Erasure.Algorithm)
}
if unMarshalXLMeta.Erasure.DataBlocks != gjsonXLMeta.Erasure.DataBlocks {
t.Errorf("Expected the erasure data blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.DataBlocks, gjsonXLMeta.Erasure.DataBlocks)
}
if unMarshalXLMeta.Erasure.ParityBlocks != gjsonXLMeta.Erasure.ParityBlocks {
t.Errorf("Expected the erasure parity blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.ParityBlocks, gjsonXLMeta.Erasure.ParityBlocks)
}
if unMarshalXLMeta.Erasure.BlockSize != gjsonXLMeta.Erasure.BlockSize {
t.Errorf("Expected the erasure block size to be %v, but got %v.", unMarshalXLMeta.Erasure.BlockSize, gjsonXLMeta.Erasure.BlockSize)
}
if unMarshalXLMeta.Erasure.Index != gjsonXLMeta.Erasure.Index {
t.Errorf("Expected the erasure index to be %v, but got %v.", unMarshalXLMeta.Erasure.Index, gjsonXLMeta.Erasure.Index)
}
if len(unMarshalXLMeta.Erasure.Distribution) != len(gjsonXLMeta.Erasure.Distribution) {
t.Errorf("Expected the size of Erasure Distribution to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Distribution), len(gjsonXLMeta.Erasure.Distribution))
} else {
for i := 0; i < len(unMarshalXLMeta.Erasure.Distribution); i++ {
if unMarshalXLMeta.Erasure.Distribution[i] != gjsonXLMeta.Erasure.Distribution[i] {
t.Errorf("Expected the Erasure Distribution to be %d, got %d.", unMarshalXLMeta.Erasure.Distribution[i], gjsonXLMeta.Erasure.Distribution[i])
}
}
}
if len(unMarshalXLMeta.Erasure.Checksum) != len(gjsonXLMeta.Erasure.Checksum) {
t.Errorf("Expected the size of Erasure Checksum to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Checksum), len(gjsonXLMeta.Erasure.Checksum))
} else {
for i := 0; i < len(unMarshalXLMeta.Erasure.Checksum); i++ {
if unMarshalXLMeta.Erasure.Checksum[i].Name != gjsonXLMeta.Erasure.Checksum[i].Name {
t.Errorf("Expected the Erasure Checksum Name to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksum[i].Name, gjsonXLMeta.Erasure.Checksum[i].Name)
}
if unMarshalXLMeta.Erasure.Checksum[i].Algorithm != gjsonXLMeta.Erasure.Checksum[i].Algorithm {
t.Errorf("Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s.\"", unMarshalXLMeta.Erasure.Checksum[i].Algorithm, gjsonXLMeta.Erasure.Checksum[i].Algorithm)
}
if unMarshalXLMeta.Erasure.Checksum[i] != gjsonXLMeta.Erasure.Checksum[i] {
t.Errorf("Expected the Erasure Checksum Hash to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksum[i].Hash, gjsonXLMeta.Erasure.Checksum[i].Hash)
}
}
}
if unMarshalXLMeta.Minio.Release != gjsonXLMeta.Minio.Release {
t.Errorf("Expected the Release string to be \"%s\", but got \"%s\".", unMarshalXLMeta.Minio.Release, gjsonXLMeta.Minio.Release)
}
if len(unMarshalXLMeta.Parts) != len(gjsonXLMeta.Parts) {
t.Errorf("Expected info of %d parts to be present, but got %d instead.", len(unMarshalXLMeta.Parts), len(gjsonXLMeta.Parts))
} else {
for i := 0; i < len(unMarshalXLMeta.Parts); i++ {
if unMarshalXLMeta.Parts[i].Name != gjsonXLMeta.Parts[i].Name {
t.Errorf("Expected the name of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].Name, gjsonXLMeta.Parts[i].Name)
}
if unMarshalXLMeta.Parts[i].ETag != gjsonXLMeta.Parts[i].ETag {
t.Errorf("Expected the ETag of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].ETag, gjsonXLMeta.Parts[i].ETag)
}
if unMarshalXLMeta.Parts[i].Number != gjsonXLMeta.Parts[i].Number {
t.Errorf("Expected the number of part %d to be \"%d\", got \"%d\".", i+1, unMarshalXLMeta.Parts[i].Number, gjsonXLMeta.Parts[i].Number)
}
if unMarshalXLMeta.Parts[i].Size != gjsonXLMeta.Parts[i].Size {
t.Errorf("Expected the size of part %d to be %v, got %v.", i+1, unMarshalXLMeta.Parts[i].Size, gjsonXLMeta.Parts[i].Size)
}
}
}
for key, val := range unMarshalXLMeta.Meta {
gjsonVal, exists := gjsonXLMeta.Meta[key]
if !exists {
t.Errorf("No meta data entry for Key \"%s\" exists.", key)
}
if val != gjsonVal {
t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, gjsonVal)
}
}
}
// Tests the correctness of constructing XLMetaV1 using gjson lib.
// The result will be compared with the result obtained from json.unMarshal of the byte data.
func TestGetXLMetaV1GJson1(t *testing.T) {
xlMetaJSON := getXLMetaBytes(1)
var unMarshalXLMeta xlMetaV1
if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
t.Errorf("Unmarshalling failed")
}
gjsonXLMeta, err := xlMetaV1UnmarshalJSON(xlMetaJSON)
if err != nil {
t.Errorf("gjson parsing of XLMeta failed")
}
compareXLMetaV1(t, unMarshalXLMeta, gjsonXLMeta)
}
// Tests the correctness of constructing XLMetaV1 using gjson lib for XLMetaV1 of size 10 parts.
// The result will be compared with the result obtained from json.unMarshal of the byte data.
func TestGetXLMetaV1GJson10(t *testing.T) {
xlMetaJSON := getXLMetaBytes(10)
var unMarshalXLMeta xlMetaV1
if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
t.Errorf("Unmarshalling failed")
}
gjsonXLMeta, err := xlMetaV1UnmarshalJSON(xlMetaJSON)
if err != nil {
t.Errorf("gjson parsing of XLMeta failed")
}
compareXLMetaV1(t, unMarshalXLMeta, gjsonXLMeta)
}

20
vendor/github.com/tidwall/gjson/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2016 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

278
vendor/github.com/tidwall/gjson/README.md generated vendored Normal file
View file

@ -0,0 +1,278 @@
<p align="center">
<img
src="logo.png"
width="240" height="78" border="0" alt="GJSON">
<br>
<a href="https://travis-ci.org/tidwall/gjson"><img src="https://img.shields.io/travis/tidwall/gjson.svg?style=flat-square" alt="Build Status"></a><!--
<a href="http://gocover.io/github.com/tidwall/gjson"><img src="https://img.shields.io/badge/coverage-97%25-brightgreen.svg?style=flat-square" alt="Code Coverage"></a>
-->
<a href="https://godoc.org/github.com/tidwall/gjson"><img src="https://img.shields.io/badge/api-reference-blue.svg?style=flat-square" alt="GoDoc"></a>
</p>
<p align="center">get a json value quickly</a></p>
GJSON is a Go package the provides a [very fast](#performance) and simple way to get a value from a json document. The reason for this library it to give efficient json indexing for the [BuntDB](https://github.com/tidwall/buntdb) project.
Getting Started
===============
## Installing
To start using GJSON, install Go and run `go get`:
```sh
$ go get -u github.com/tidwall/gjson
```
This will retrieve the library.
## Get a value
Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". This function expects that the json is well-formed and validates. Invalid json will not panic, but it may return back unexpected results. When the value is found it's returned immediately.
```go
package main
import "github.com/tidwall/gjson"
const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}`
func main() {
value := gjson.Get(json, "name.last")
println(value.String())
}
```
This will print:
```
Prichard
```
## Path Syntax
A path is a series of keys separated by a dot.
A key may contain special wildcard characters '\*' and '?'.
To access an array value use the index as the key.
To get the number of elements in an array or to access a child path, use the '#' character.
The dot and wildcard characters can be escaped with '\'.
```json
{
"name": {"first": "Tom", "last": "Anderson"},
"age":37,
"children": ["Sara","Alex","Jack"],
"fav.movie": "Deer Hunter",
"friends": [
{"first": "James", "last": "Murphy"},
{"first": "Roger", "last": "Craig"}
]
}
```
```
"name.last" >> "Anderson"
"age" >> 37
"children.#" >> 3
"children.1" >> "Alex"
"child*.2" >> "Jack"
"c?ildren.0" >> "Sara"
"fav\.movie" >> "Deer Hunter"
"friends.#.first" >> [ "James", "Roger" ]
"friends.1.last" >> "Craig"
```
To query an array:
```
`friends.#[last="Murphy"].first` >> "James"
```
## Result Type
GJSON supports the json types `string`, `number`, `bool`, and `null`.
Arrays and Objects are returned as their raw json types.
The `Result` type holds one of these:
```
bool, for JSON booleans
float64, for JSON numbers
string, for JSON string literals
nil, for JSON null
```
To directly access the value:
```go
result.Type // can be String, Number, True, False, Null, or JSON
result.Str // holds the string
result.Num // holds the float64 number
result.Raw // holds the raw json
result.Multi // holds nested array values
```
There are a variety of handy functions that work on a result:
```go
result.Value() interface{}
result.Int() int64
result.Float() float64
result.String() string
result.Bool() bool
result.Array() []gjson.Result
result.Map() map[string]gjson.Result
result.Get(path string) Result
```
The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types:
```go
boolean >> bool
number >> float64
string >> string
null >> nil
array >> []interface{}
object >> map[string]interface{}
```
## Get nested array values
Suppose you want all the last names from the following json:
```json
{
"programmers": [
{
"firstName": "Janet",
"lastName": "McLaughlin",
}, {
"firstName": "Elliotte",
"lastName": "Hunter",
}, {
"firstName": "Jason",
"lastName": "Harold",
}
]
}`
```
You would use the path "programmers.#.lastName" like such:
```go
result := gjson.Get(json, "programmers.#.lastName")
for _,name := range result.Array() {
println(name.String())
}
```
You can also query an object inside an array:
```go
name := gjson.Get(json, `programmers.#[lastName="Hunter"].firstName`)
println(name.String()) // prints "Elliotte"
```
## Simple Parse and Get
There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result.
For example, all of these will return the same result:
```go
gjson.Parse(json).Get("name").Get("last")
gjson.Get(json, "name").Get("last")
gjson.Get(json, "name.last")
```
## Check for the existence of a value
Sometimes you just want to know you if a value exists.
```go
value := gjson.Get(json, "name.last")
if !value.Exists() {
println("no last name")
} else {
println(value.String())
}
// Or as one step
if gjson.Get(json, "name.last").Exists(){
println("has a last name")
}
```
## Unmarshal to a map
To unmarshal to a `map[string]interface{}`:
```go
m, ok := gjson.Parse(json).Value().(map[string]interface{})
if !ok{
// not a map
}
```
## Performance
Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/),
[ffjson](https://github.com/pquerna/ffjson),
[EasyJSON](https://github.com/mailru/easyjson),
and [jsonparser](https://github.com/buger/jsonparser)
```
BenchmarkGJSONGet-8 15000000 333 ns/op 0 B/op 0 allocs/op
BenchmarkGJSONUnmarshalMap-8 900000 4188 ns/op 1920 B/op 26 allocs/op
BenchmarkJSONUnmarshalMap-8 600000 8908 ns/op 3048 B/op 69 allocs/op
BenchmarkJSONUnmarshalStruct-8 600000 9026 ns/op 1832 B/op 69 allocs/op
BenchmarkJSONDecoder-8 300000 14339 ns/op 4224 B/op 184 allocs/op
BenchmarkFFJSONLexer-8 1500000 3156 ns/op 896 B/op 8 allocs/op
BenchmarkEasyJSONLexer-8 3000000 938 ns/op 613 B/op 6 allocs/op
BenchmarkJSONParserGet-8 3000000 442 ns/op 21 B/op 0 allocs/op
```
JSON document used:
```json
{
"widget": {
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}
}
```
Each operation was rotated though one of the following search paths:
```
widget.window.name
widget.image.hOffset
widget.text.onMouseUp
```
*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7.*
## Contact
Josh Baker [@tidwall](http://twitter.com/tidwall)
## License
GJSON source code is available under the MIT [License](/LICENSE).

1291
vendor/github.com/tidwall/gjson/gjson.go generated vendored Normal file

File diff suppressed because it is too large Load diff

BIN
vendor/github.com/tidwall/gjson/logo.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

20
vendor/github.com/tidwall/match/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2016 Josh Baker
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

31
vendor/github.com/tidwall/match/README.md generated vendored Normal file
View file

@ -0,0 +1,31 @@
Match
=====
<a href="https://travis-ci.org/tidwall/match"><img src="https://img.shields.io/travis/tidwall/match.svg?style=flat-square" alt="Build Status"></a>
<a href="https://godoc.org/github.com/tidwall/match"><img src="https://img.shields.io/badge/api-reference-blue.svg?style=flat-square" alt="GoDoc"></a>
Match is a very simple pattern matcher where '*' matches on any
number characters and '?' matches on any one character.
Installing
----------
```
go get -u github.com/tidwall/match
```
Example
-------
```go
match.Match("hello", "*llo")
match.Match("jello", "?ello")
match.Match("hello", "h*o")
```
Contact
-------
Josh Baker [@tidwall](http://twitter.com/tidwall)
License
-------
Redcon source code is available under the MIT [License](/LICENSE).

192
vendor/github.com/tidwall/match/match.go generated vendored Normal file
View file

@ -0,0 +1,192 @@
// Match provides a simple pattern matcher with unicode support.
package match
import "unicode/utf8"
// Match returns true if str matches pattern. This is a very
// simple wildcard match where '*' matches on any number characters
// and '?' matches on any one character.
// pattern:
// { term }
// term:
// '*' matches any sequence of non-Separator characters
// '?' matches any single non-Separator character
// c matches character c (c != '*', '?', '\\')
// '\\' c matches character c
//
func Match(str, pattern string) bool {
if pattern == "*" {
return true
}
return deepMatch(str, pattern)
}
func deepMatch(str, pattern string) bool {
for len(pattern) > 0 {
if pattern[0] > 0x7f {
return deepMatchRune(str, pattern)
}
switch pattern[0] {
default:
if len(str) == 0 {
return false
}
if str[0] > 0x7f {
return deepMatchRune(str, pattern)
}
if str[0] != pattern[0] {
return false
}
case '?':
if len(str) == 0 {
return false
}
case '*':
return deepMatch(str, pattern[1:]) ||
(len(str) > 0 && deepMatch(str[1:], pattern))
}
str = str[1:]
pattern = pattern[1:]
}
return len(str) == 0 && len(pattern) == 0
}
func deepMatchRune(str, pattern string) bool {
var sr, pr rune
var srsz, prsz int
// read the first rune ahead of time
if len(str) > 0 {
if str[0] > 0x7f {
sr, srsz = utf8.DecodeRuneInString(str)
} else {
sr, srsz = rune(str[0]), 1
}
} else {
sr, srsz = utf8.RuneError, 0
}
if len(pattern) > 0 {
if pattern[0] > 0x7f {
pr, prsz = utf8.DecodeRuneInString(pattern)
} else {
pr, prsz = rune(pattern[0]), 1
}
} else {
pr, prsz = utf8.RuneError, 0
}
// done reading
for pr != utf8.RuneError {
switch pr {
default:
if srsz == utf8.RuneError {
return false
}
if sr != pr {
return false
}
case '?':
if srsz == utf8.RuneError {
return false
}
case '*':
return deepMatchRune(str, pattern[prsz:]) ||
(srsz > 0 && deepMatchRune(str[srsz:], pattern))
}
str = str[srsz:]
pattern = pattern[prsz:]
// read the next runes
if len(str) > 0 {
if str[0] > 0x7f {
sr, srsz = utf8.DecodeRuneInString(str)
} else {
sr, srsz = rune(str[0]), 1
}
} else {
sr, srsz = utf8.RuneError, 0
}
if len(pattern) > 0 {
if pattern[0] > 0x7f {
pr, prsz = utf8.DecodeRuneInString(pattern)
} else {
pr, prsz = rune(pattern[0]), 1
}
} else {
pr, prsz = utf8.RuneError, 0
}
// done reading
}
return srsz == 0 && prsz == 0
}
var maxRuneBytes = func() []byte {
b := make([]byte, 4)
if utf8.EncodeRune(b, '\U0010FFFF') != 4 {
panic("invalid rune encoding")
}
return b
}()
// Allowable parses the pattern and determines the minimum and maximum allowable
// values that the pattern can represent.
// When the max cannot be determined, 'true' will be returned
// for infinite.
func Allowable(pattern string) (min, max string) {
if pattern == "" || pattern[0] == '*' {
return "", ""
}
minb := make([]byte, 0, len(pattern))
maxb := make([]byte, 0, len(pattern))
var wild bool
for i := 0; i < len(pattern); i++ {
if pattern[i] == '*' {
wild = true
break
}
if pattern[i] == '?' {
minb = append(minb, 0)
maxb = append(maxb, maxRuneBytes...)
} else {
minb = append(minb, pattern[i])
maxb = append(maxb, pattern[i])
}
}
if wild {
r, n := utf8.DecodeLastRune(maxb)
if r != utf8.RuneError {
if r < utf8.MaxRune {
r++
if r > 0x7f {
b := make([]byte, 4)
nn := utf8.EncodeRune(b, r)
maxb = append(maxb[:len(maxb)-n], b[:nn]...)
} else {
maxb = append(maxb[:len(maxb)-n], byte(r))
}
}
}
}
return string(minb), string(maxb)
/*
return
if wild {
r, n := utf8.DecodeLastRune(maxb)
if r != utf8.RuneError {
if r < utf8.MaxRune {
infinite = true
} else {
r++
if r > 0x7f {
b := make([]byte, 4)
nn := utf8.EncodeRune(b, r)
maxb = append(maxb[:len(maxb)-n], b[:nn]...)
} else {
maxb = append(maxb[:len(maxb)-n], byte(r))
}
}
}
}
return string(minb), string(maxb), infinite
*/
}

12
vendor/vendor.json vendored
View file

@ -169,6 +169,18 @@
"revision": "2e25825abdbd7752ff08b270d313b93519a0a232",
"revisionTime": "2016-03-11T21:55:03Z"
},
{
"checksumSHA1": "+Pcohsuq0Mi/y8bgaDFjb/CGzkk=",
"path": "github.com/tidwall/gjson",
"revision": "7c631e98686a791e5fc60ff099512968122afb52",
"revisionTime": "2016-09-08T16:02:40Z"
},
{
"checksumSHA1": "qmePMXEDYGwkAfT9QvtMC58JN/E=",
"path": "github.com/tidwall/match",
"revision": "173748da739a410c5b0b813b956f89ff94730b4c",
"revisionTime": "2016-08-30T17:39:30Z"
},
{
"path": "golang.org/x/crypto/bcrypt",
"revision": "7b85b097bf7527677d54d3220065e966a0e3b613",