diff --git a/pkg/storage/donut/bucket.go b/pkg/storage/donut/bucket.go index e59cffc4e..bb937435c 100644 --- a/pkg/storage/donut/bucket.go +++ b/pkg/storage/donut/bucket.go @@ -117,10 +117,6 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]st return nil, nil, false, iodine.New(err, nil) } for _, file := range files { - if len(objects) >= maxkeys { - isTruncated = true - goto truncated - } objectName, err := b.getObjectName(file.Name(), disk.GetPath(), bucketPath) if err != nil { return nil, nil, false, iodine.New(err, nil) @@ -134,8 +130,6 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]st } nodeSlice = nodeSlice + 1 } - -truncated: { if strings.TrimSpace(prefix) != "" { objects = removePrefix(objects, prefix) @@ -150,10 +144,15 @@ truncated: } else { filteredObjects = objects } - var results []string var commonPrefixes []string + + sort.Strings(filteredObjects) for _, objectName := range filteredObjects { + if len(results) >= maxkeys { + isTruncated = true + break + } results = appendUniq(results, prefix+objectName) } for _, commonPrefix := range prefixes { @@ -418,6 +417,9 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO writer.CloseWithError(iodine.New(err, nil)) return } + for _, reader := range readers { + defer reader.Close() + } hasher := md5.New() mwriter := io.MultiWriter(writer, hasher) switch len(readers) == 1 { diff --git a/pkg/storage/donut/disk/disk.go b/pkg/storage/donut/disk/disk.go index 3cc355393..326777de8 100644 --- a/pkg/storage/donut/disk/disk.go +++ b/pkg/storage/donut/disk/disk.go @@ -93,6 +93,7 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) { if err != nil { return nil, iodine.New(err, nil) } + defer dir.Close() contents, err := dir.Readdir(-1) if err != nil { return nil, iodine.New(err, nil) @@ -113,6 +114,7 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) { if err != nil { return nil, iodine.New(err, nil) } + defer dir.Close() contents, err := dir.Readdir(-1) if err != nil { return nil, iodine.New(err, nil) diff --git a/pkg/storage/donut/donut.go b/pkg/storage/donut/donut.go index 971279c65..d8ef8ea3d 100644 --- a/pkg/storage/donut/donut.go +++ b/pkg/storage/donut/donut.go @@ -192,7 +192,7 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadC if _, ok := dt.buckets[bucket]; !ok { return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } - objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1) + objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000) if err != nil { return "", iodine.New(err, nil) } @@ -245,7 +245,12 @@ func (dt donut) GetObjectMetadata(bucket, object string) (map[string]string, err if _, ok := dt.buckets[bucket]; !ok { return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams) } - objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1) + // + // there is a potential issue here, if the object comes after the truncated list + // below GetObjectMetadata would fail as ObjectNotFound{} + // + // will fix it when we bring in persistent json into Donut - TODO + objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000) if err != nil { return nil, iodine.New(err, errParams) } diff --git a/pkg/storage/drivers/api_testsuite.go b/pkg/storage/drivers/api_testsuite.go index 7b6762d6a..b20cae176 100644 --- a/pkg/storage/drivers/api_testsuite.go +++ b/pkg/storage/drivers/api_testsuite.go @@ -187,9 +187,9 @@ func testPaging(c *check.C, create func() Driver) { resources.Maxkeys = 5 resources.Prefix = "" objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(len(objects), check.Equals, i+1) c.Assert(resources.IsTruncated, check.Equals, false) - c.Assert(err, check.IsNil) } // check after paging occurs pages work for i := 6; i <= 10; i++ { @@ -198,9 +198,9 @@ func testPaging(c *check.C, create func() Driver) { resources.Maxkeys = 5 resources.Prefix = "" objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(len(objects), check.Equals, 5) c.Assert(resources.IsTruncated, check.Equals, true) - c.Assert(err, check.IsNil) } // check paging with prefix at end returns less objects { @@ -209,6 +209,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Prefix = "new" resources.Maxkeys = 5 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(len(objects), check.Equals, 2) } @@ -217,6 +218,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Prefix = "" resources.Maxkeys = 1000 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") c.Assert(objects[2].Key, check.Equals, "obj0") @@ -248,6 +250,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Prefix = "" resources.Maxkeys = 1000 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") c.Assert(objects[2].Key, check.Equals, "obj0") @@ -265,6 +268,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Delimiter = "" resources.Maxkeys = 3 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(objects[0].Key, check.Equals, "newPrefix2") c.Assert(objects[1].Key, check.Equals, "obj0") c.Assert(objects[2].Key, check.Equals, "obj1") @@ -276,6 +280,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Marker = "" resources.Maxkeys = 1000 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(objects[0].Key, check.Equals, "obj0") c.Assert(objects[1].Key, check.Equals, "obj1") c.Assert(objects[2].Key, check.Equals, "obj10") @@ -288,6 +293,7 @@ func testPaging(c *check.C, create func() Driver) { resources.Marker = "" resources.Maxkeys = 5 objects, resources, err = drivers.ListObjects("bucket", resources) + c.Assert(err, check.IsNil) c.Assert(objects[0].Key, check.Equals, "newPrefix") c.Assert(objects[1].Key, check.Equals, "newPrefix2") } @@ -313,8 +319,8 @@ func testObjectOverwriteFails(c *check.C, create func() Driver) { var bytesBuffer bytes.Buffer length, err := drivers.GetObject(&bytesBuffer, "bucket", "object") - c.Assert(length, check.Equals, int64(len("one"))) c.Assert(err, check.IsNil) + c.Assert(length, check.Equals, int64(len("one"))) c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one") } @@ -358,9 +364,9 @@ func testPutObjectInSubdir(c *check.C, create func() Driver) { var bytesBuffer bytes.Buffer length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object") + c.Assert(err, check.IsNil) c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world")) c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length) - c.Assert(err, check.IsNil) } func testListBuckets(c *check.C, create func() Driver) { @@ -405,8 +411,8 @@ func testListBucketsOrder(c *check.C, create func() Driver) { drivers.CreateBucket("bucket2", "") buckets, err := drivers.ListBuckets() - c.Assert(len(buckets), check.Equals, 2) c.Assert(err, check.IsNil) + c.Assert(len(buckets), check.Equals, 2) c.Assert(buckets[0].Name, check.Equals, "bucket1") c.Assert(buckets[1].Name, check.Equals, "bucket2") }