fs: cleanup - do not cache size of metafiles (#3630)

* Remove Size() method and size field from lock.LockedFile
* WriteTo method of fsMeta and uploadsV1 now takes concrete type *lock.LockedFile
This commit is contained in:
Krishna Srinivas 2017-01-25 12:29:06 -08:00 committed by Harshavardhana
parent 76b8abcd8e
commit 82373e3d50
10 changed files with 32 additions and 36 deletions

View file

@ -125,18 +125,18 @@ func (m *fsMetaV1) AddObjectPart(partNumber int, partName string, partETag strin
sort.Sort(byObjectPartNumber(m.Parts))
}
func (m *fsMetaV1) WriteTo(writer io.Writer) (n int64, err error) {
func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var metadataBytes []byte
metadataBytes, err = json.Marshal(m)
if err != nil {
return 0, traceError(err)
}
if err = writer.(*lock.LockedFile).Truncate(0); err != nil {
if err = lk.Truncate(0); err != nil {
return 0, traceError(err)
}
if _, err = writer.Write(metadataBytes); err != nil {
if _, err = lk.Write(metadataBytes); err != nil {
return 0, traceError(err)
}
@ -144,9 +144,14 @@ func (m *fsMetaV1) WriteTo(writer io.Writer) (n int64, err error) {
return int64(len(metadataBytes)), nil
}
func (m *fsMetaV1) ReadFrom(reader io.Reader) (n int64, err error) {
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var metadataBytes []byte
metadataBytes, err = ioutil.ReadAll(reader)
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
}
metadataBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
}

View file

@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"io"
"os"
"path/filepath"
"testing"
@ -69,10 +68,9 @@ func TestReadFSMetadata(t *testing.T) {
}
defer rlk.Close()
sectionReader := io.NewSectionReader(rlk, 0, rlk.Size())
// Regular fs metadata reading, no errors expected
fsMeta := fsMetaV1{}
if _, err = fsMeta.ReadFrom(sectionReader); err != nil {
if _, err = fsMeta.ReadFrom(rlk.LockedFile); err != nil {
t.Fatal("Unexpected error ", err)
}
@ -84,7 +82,7 @@ func TestReadFSMetadata(t *testing.T) {
file.Write([]byte{'a'})
file.Close()
fsMeta = fsMetaV1{}
if _, err := fsMeta.ReadFrom(sectionReader); err == nil {
if _, err := fsMeta.ReadFrom(rlk.LockedFile); err == nil {
t.Fatal("Should fail", err)
}
}
@ -119,10 +117,9 @@ func TestWriteFSMetadata(t *testing.T) {
}
defer rlk.Close()
sectionReader := io.NewSectionReader(rlk, 0, rlk.Size())
// FS metadata reading, no errors expected (healthy disk)
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(sectionReader)
_, err = fsMeta.ReadFrom(rlk.LockedFile)
if err != nil {
t.Fatal("Unexpected error ", err)
}

View file

@ -97,7 +97,7 @@ func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error {
// slice is empty then we remove/purge the file.
func (fs fsObjects) removeUploadID(bucket, object, uploadID string, rwlk *lock.LockedFile) error {
uploadIDs := uploadsV1{}
_, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
_, err := uploadIDs.ReadFrom(rwlk)
if err != nil {
return err
}
@ -121,7 +121,7 @@ func (fs fsObjects) removeUploadID(bucket, object, uploadID string, rwlk *lock.L
func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.Time, rwlk *lock.LockedFile) error {
uploadIDs := uploadsV1{}
_, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
_, err := uploadIDs.ReadFrom(rwlk)
// For all unexpected errors, we return.
if err != nil && errorCause(err) != io.EOF {
return err

View file

@ -52,7 +52,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
// Read `uploads.json`.
uploadIDs := uploadsV1{}
if _, err = uploadIDs.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); err != nil {
if _, err = uploadIDs.ReadFrom(rlk.LockedFile); err != nil {
return nil, false, err
}
@ -378,7 +378,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
defer rwlk.Close()
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size()))
_, err = fsMeta.ReadFrom(rwlk)
if err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
}
@ -499,7 +499,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
defer fs.rwPool.Close(fsMetaPath)
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom((io.NewSectionReader(metaFile, 0, metaFile.Size())))
_, err = fsMeta.ReadFrom(metaFile.LockedFile)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath)
}
@ -630,7 +630,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
fsMeta := fsMetaV1{}
// Read saved fs metadata for ongoing multipart.
_, err = fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size()))
_, err = fsMeta.ReadFrom(rlk.LockedFile)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)

View file

@ -505,7 +505,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
if err == nil {
// Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath)
if _, rerr := fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); rerr != nil {
if _, rerr := fsMeta.ReadFrom(rlk.LockedFile); rerr != nil {
return ObjectInfo{}, toObjectErr(rerr, bucket, object)
}
}

View file

@ -76,26 +76,30 @@ func (u *uploadsV1) IsEmpty() bool {
return len(u.Uploads) == 0
}
func (u *uploadsV1) WriteTo(writer io.Writer) (n int64, err error) {
func (u *uploadsV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
// Serialize to prepare to write to disk.
var uplBytes []byte
uplBytes, err = json.Marshal(u)
if err != nil {
return 0, traceError(err)
}
if err = writer.(*lock.LockedFile).Truncate(0); err != nil {
if err = lk.Truncate(0); err != nil {
return 0, traceError(err)
}
_, err = writer.Write(uplBytes)
_, err = lk.Write(uplBytes)
if err != nil {
return 0, traceError(err)
}
return int64(len(uplBytes)), nil
}
func (u *uploadsV1) ReadFrom(reader io.Reader) (n int64, err error) {
func (u *uploadsV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var uploadIDBytes []byte
uploadIDBytes, err = ioutil.ReadAll(reader)
fi, err := lk.Stat()
if err != nil {
return 0, traceError(err)
}
uploadIDBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
return 0, traceError(err)
}

View file

@ -89,14 +89,7 @@ func RLockedOpenFile(path string) (*RLockedFile, error) {
}
// LockedFile represents a locked file, implements a helper
// method Size(), represents the size of the underlying object.
// LockedFile represents a locked file
type LockedFile struct {
*os.File
size int64
}
// Size - size of the underlying locked file.
func (l *LockedFile) Size() int64 {
return l.size
}

View file

@ -71,5 +71,5 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
}
}
return &LockedFile{File: f, size: st.Size()}, nil
return &LockedFile{File: f}, nil
}

View file

@ -80,9 +80,6 @@ func TestRWLockedFile(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if rlk.Size() != 0 {
t.Fatal("File size should be zero", rlk.Size())
}
isClosed := rlk.IsClosed()
if isClosed {
t.Fatal("File ref count shouldn't be zero")

View file

@ -65,7 +65,7 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
}
}
return &LockedFile{File: f, size: st.Size()}, nil
return &LockedFile{File: f}, nil
}
func makeInheritSa() *syscall.SecurityAttributes {