Fix incremental usage accounting (#12871)

Remote caches were not returned correctly, so they would not get updated on save.

Furthermore make some tweaks for more reliable updates.

Invalidate bloom filter to ensure rescan.
This commit is contained in:
Klaus Post 2021-08-04 09:14:14 -07:00 committed by Harshavardhana
parent d20746cf24
commit 49f6035b1e
5 changed files with 21 additions and 8 deletions

View file

@ -526,12 +526,22 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
return return
} }
if !into.Compacted { if !into.Compacted {
into.addChild(dataUsageHash(folder.name)) h := dataUsageHash(folder.name)
into.addChild(h)
// We scanned a folder, optionally send update.
f.updateCache.deleteRecursive(h)
f.updateCache.copyWithChildren(&f.newCache, h, folder.parent)
f.sendUpdate()
} }
// We scanned a folder, optionally send update.
f.sendUpdate()
} }
// Transfer existing
if !into.Compacted {
for _, folder := range existingFolders {
h := hashPath(folder.name)
f.updateCache.copyWithChildren(&f.oldCache, h, folder.parent)
}
}
// Scan new... // Scan new...
for _, folder := range newFolders { for _, folder := range newFolders {
h := hashPath(folder.name) h := hashPath(folder.name)

View file

@ -46,7 +46,7 @@ const (
dataUpdateTrackerQueueSize = 0 dataUpdateTrackerQueueSize = 0
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin" dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
dataUpdateTrackerVersion = 5 dataUpdateTrackerVersion = 6
dataUpdateTrackerSaveInterval = 5 * time.Minute dataUpdateTrackerSaveInterval = 5 * time.Minute
) )
@ -400,7 +400,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
return err return err
} }
switch tmp[0] { switch tmp[0] {
case 1, 2, 3, 4: case 1, 2, 3, 4, 5:
if intDataUpdateTracker.debug { if intDataUpdateTracker.debug {
console.Debugln(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.") console.Debugln(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
} }

View file

@ -513,7 +513,11 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
// StringAll returns a detailed string representation of all entries in the cache. // StringAll returns a detailed string representation of all entries in the cache.
func (d *dataUsageCache) StringAll() string { func (d *dataUsageCache) StringAll() string {
// Remove bloom filter from print.
bf := d.Info.BloomFilter
d.Info.BloomFilter = nil
s := fmt.Sprintf("info:%+v\n", d.Info) s := fmt.Sprintf("info:%+v\n", d.Info)
d.Info.BloomFilter = bf
for k, v := range d.Cache { for k, v := range d.Cache {
s += fmt.Sprintf("\t%v: %+v\n", k, v) s += fmt.Sprintf("\t%v: %+v\n", k, v)
} }

View file

@ -493,11 +493,10 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
Entry: update, Entry: update,
} }
if intDataUpdateTracker.debug { if intDataUpdateTracker.debug {
console.Debugln("bucket", bucket.Name, "got update", update) console.Debugln("z:", er.poolIndex, "s:", er.setIndex, "bucket", name, "got update", update)
} }
} }
}(cache.Info.Name) }(cache.Info.Name)
// Calc usage // Calc usage
before := cache.Info.LastUpdate before := cache.Info.LastUpdate
var err error var err error

View file

@ -250,7 +250,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
if err == io.EOF { if err == io.EOF {
err = nil err = nil
} }
return cache, err return newCache, err
} }
func (client *storageRESTClient) GetDiskID() (string, error) { func (client *storageRESTClient) GetDiskID() (string, error) {