Return back proper errors in writeObjectData(), rename few functions

This commit is contained in:
Harshavardhana 2015-06-29 11:42:58 -07:00
parent be816145a9
commit d07d0c670a

View file

@ -212,7 +212,7 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
return nil, 0, iodine.New(err, nil) return nil, 0, iodine.New(err, nil)
} }
// read and reply back to GetObject() request in a go-routine // read and reply back to GetObject() request in a go-routine
go b.readEncodedData(normalizeObjectName(objectName), writer, objMetadata) go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata)
return reader, objMetadata.Size, nil return reader, objMetadata.Size, nil
} }
@ -223,7 +223,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5
if objectName == "" || objectData == nil { if objectName == "" || objectData == nil {
return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil) return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil)
} }
writers, err := b.getDiskWriters(normalizeObjectName(objectName), "data") writers, err := b.getWriters(normalizeObjectName(objectName), "data")
if err != nil { if err != nil {
return ObjectMetadata{}, iodine.New(err, nil) return ObjectMetadata{}, iodine.New(err, nil)
} }
@ -247,8 +247,8 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5
if err != nil { if err != nil {
return ObjectMetadata{}, iodine.New(err, nil) return ObjectMetadata{}, iodine.New(err, nil)
} }
// encoded data with k, m and write // write encoded data with k, m and writers
chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, sumMD5, sum512) chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, sumMD5, sum512)
if err != nil { if err != nil {
return ObjectMetadata{}, iodine.New(err, nil) return ObjectMetadata{}, iodine.New(err, nil)
} }
@ -310,7 +310,7 @@ func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadat
if objMetadata.Object == "" { if objMetadata.Object == "" {
return iodine.New(InvalidArgument{}, nil) return iodine.New(InvalidArgument{}, nil)
} }
objMetadataWriters, err := b.getDiskWriters(objectName, objectMetadataConfig) objMetadataWriters, err := b.getWriters(objectName, objectMetadataConfig)
if err != nil { if err != nil {
return iodine.New(err, nil) return iodine.New(err, nil)
} }
@ -332,7 +332,7 @@ func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, error) {
if objectName == "" { if objectName == "" {
return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil) return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil)
} }
objMetadataReaders, err := b.getDiskReaders(objectName, objectMetadataConfig) objMetadataReaders, err := b.getReaders(objectName, objectMetadataConfig)
if err != nil { if err != nil {
return ObjectMetadata{}, iodine.New(err, nil) return ObjectMetadata{}, iodine.New(err, nil)
} }
@ -378,8 +378,8 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error)
return k, m, nil return k, m, nil
} }
// writeEncodedData - // writeObjectData -
func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, sumMD5, sum512 hash.Hash) (int, int, error) { func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, sumMD5, sum512 hash.Hash) (int, int, error) {
encoder, err := newEncoder(k, m, "Cauchy") encoder, err := newEncoder(k, m, "Cauchy")
if err != nil { if err != nil {
return 0, 0, iodine.New(err, nil) return 0, 0, iodine.New(err, nil)
@ -387,16 +387,17 @@ func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectDat
chunkCount := 0 chunkCount := 0
totalLength := 0 totalLength := 0
for chunk := range split.Stream(objectData, 10*1024*1024) { for chunk := range split.Stream(objectData, 10*1024*1024) {
if chunk.Err == nil { if chunk.Err != nil {
totalLength = totalLength + len(chunk.Data) return 0, 0, iodine.New(err, nil)
encodedBlocks, _ := encoder.Encode(chunk.Data) }
sumMD5.Write(chunk.Data) totalLength = totalLength + len(chunk.Data)
sum512.Write(chunk.Data) encodedBlocks, _ := encoder.Encode(chunk.Data)
for blockIndex, block := range encodedBlocks { sumMD5.Write(chunk.Data)
_, err := io.Copy(writers[blockIndex], bytes.NewBuffer(block)) sum512.Write(chunk.Data)
if err != nil { for blockIndex, block := range encodedBlocks {
return 0, 0, iodine.New(err, nil) _, err := io.Copy(writers[blockIndex], bytes.NewBuffer(block))
} if err != nil {
return 0, 0, iodine.New(err, nil)
} }
} }
chunkCount = chunkCount + 1 chunkCount = chunkCount + 1
@ -404,9 +405,9 @@ func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectDat
return chunkCount, totalLength, nil return chunkCount, totalLength, nil
} }
// readEncodedData - // readObjectData -
func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
readers, err := b.getDiskReaders(objectName, "data") readers, err := b.getReaders(objectName, "data")
if err != nil { if err != nil {
writer.CloseWithError(iodine.New(err, nil)) writer.CloseWithError(iodine.New(err, nil))
return return
@ -490,8 +491,8 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadC
return decodedData, nil return decodedData, nil
} }
// getDiskReaders - // getReaders -
func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser, error) { func (b bucket) getReaders(objectName, objectMeta string) ([]io.ReadCloser, error) {
var readers []io.ReadCloser var readers []io.ReadCloser
nodeSlice := 0 nodeSlice := 0
for _, node := range b.nodes { for _, node := range b.nodes {
@ -514,8 +515,8 @@ func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser,
return readers, nil return readers, nil
} }
// getDiskWriters - // getWriters -
func (b bucket) getDiskWriters(objectName, objectMeta string) ([]io.WriteCloser, error) { func (b bucket) getWriters(objectName, objectMeta string) ([]io.WriteCloser, error) {
var writers []io.WriteCloser var writers []io.WriteCloser
nodeSlice := 0 nodeSlice := 0
for _, node := range b.nodes { for _, node := range b.nodes {