Compare commits

...

2 Commits

Author SHA1 Message Date
Klaus Post 49f6035b1e Fix incremental usage accounting (#12871)
Remote caches were not returned correctly, so they would not get updated on save.

Furthermore make some tweaks for more reliable updates.

Invalidate bloom filter to ensure rescan.
2021-08-17 23:52:06 -07:00
Klaus Post d20746cf24 fix: two different scanner update races (#12615) 2021-08-17 23:51:34 -07:00
6 changed files with 29 additions and 11 deletions

View File

@ -526,12 +526,22 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
return
}
if !into.Compacted {
into.addChild(dataUsageHash(folder.name))
h := dataUsageHash(folder.name)
into.addChild(h)
// We scanned a folder, optionally send update.
f.updateCache.deleteRecursive(h)
f.updateCache.copyWithChildren(&f.newCache, h, folder.parent)
f.sendUpdate()
}
// We scanned a folder, optionally send update.
f.sendUpdate()
}
// Transfer existing
if !into.Compacted {
for _, folder := range existingFolders {
h := hashPath(folder.name)
f.updateCache.copyWithChildren(&f.oldCache, h, folder.parent)
}
}
// Scan new...
for _, folder := range newFolders {
h := hashPath(folder.name)

View File

@ -46,7 +46,7 @@ const (
dataUpdateTrackerQueueSize = 0
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
dataUpdateTrackerVersion = 5
dataUpdateTrackerVersion = 6
dataUpdateTrackerSaveInterval = 5 * time.Minute
)
@ -400,7 +400,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
return err
}
switch tmp[0] {
case 1, 2, 3, 4:
case 1, 2, 3, 4, 5:
if intDataUpdateTracker.debug {
console.Debugln(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
}

View File

@ -513,7 +513,11 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
// StringAll returns a detailed string representation of all entries in the cache.
func (d *dataUsageCache) StringAll() string {
// Remove bloom filter from print.
bf := d.Info.BloomFilter
d.Info.BloomFilter = nil
s := fmt.Sprintf("info:%+v\n", d.Info)
d.Info.BloomFilter = bf
for k, v := range d.Cache {
s += fmt.Sprintf("\t%v: %+v\n", k, v)
}

View File

@ -484,20 +484,19 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
updates := make(chan dataUsageEntry, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
go func(name string) {
defer wg.Done()
for update := range updates {
bucketResults <- dataUsageEntryInfo{
Name: cache.Info.Name,
Name: name,
Parent: dataUsageRoot,
Entry: update,
}
if intDataUpdateTracker.debug {
console.Debugln("bucket", bucket.Name, "got update", update)
console.Debugln("z:", er.poolIndex, "s:", er.setIndex, "bucket", name, "got update", update)
}
}
}()
}(cache.Info.Name)
// Calc usage
before := cache.Info.LastUpdate
var err error

View File

@ -250,7 +250,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
if err == io.EOF {
err = nil
}
return cache, err
return newCache, err
}
func (client *storageRESTClient) GetDiskID() (string, error) {

View File

@ -188,6 +188,7 @@ func (s *storageRESTServer) NSScannerHandler(w http.ResponseWriter, r *http.Requ
defer wg.Done()
for update := range updates {
// Write true bool to indicate update.
var err error
if err = respW.WriteBool(true); err == nil {
err = update.EncodeMsg(respW)
}
@ -211,6 +212,10 @@ func (s *storageRESTServer) NSScannerHandler(w http.ResponseWriter, r *http.Requ
if err = respW.WriteBool(false); err == nil {
err = usageInfo.EncodeMsg(respW)
}
if err != nil {
resp.CloseWithError(err)
return
}
resp.CloseWithError(respW.Flush())
}