Optionally track memory usage of each LruCache (#9881)

This will double count slightly in the presence of interned strings. It's off by default as it can consume a lot of resources.
This commit is contained in:
Erik Johnston 2021-05-05 16:54:36 +01:00 committed by GitHub
parent 1fb9a2d0bf
commit ef889c98a6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 97 additions and 1 deletions

1
changelog.d/9881.feature Normal file
View file

@ -0,0 +1 @@
Add experimental option to track memory usage of the caches.

View file

@ -171,3 +171,6 @@ ignore_missing_imports = True
[mypy-txacme.*] [mypy-txacme.*]
ignore_missing_imports = True ignore_missing_imports = True
[mypy-pympler.*]
ignore_missing_imports = True

View file

@ -454,6 +454,7 @@ def start(config_options):
config.server.update_user_directory = False config.server.update_user_directory = False
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds: if config.server.gc_seconds:
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds

View file

@ -341,6 +341,7 @@ def setup(config_options):
sys.exit(0) sys.exit(0)
events.USE_FROZEN_DICTS = config.use_frozen_dicts events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds: if config.server.gc_seconds:
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds

View file

@ -17,6 +17,8 @@ import re
import threading import threading
from typing import Callable, Dict from typing import Callable, Dict
from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError from ._base import Config, ConfigError
# The prefix for all cache factor-related environment variables # The prefix for all cache factor-related environment variables
@ -189,6 +191,15 @@ class CacheConfig(Config):
) )
self.cache_factors[cache] = factor self.cache_factors[cache] = factor
self.track_memory_usage = cache_config.get("track_memory_usage", False)
if self.track_memory_usage:
try:
check_requirements("cache_memory")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
)
# Resize all caches (if necessary) with the new factors we've loaded # Resize all caches (if necessary) with the new factors we've loaded
self.resize_all_caches() self.resize_all_caches()

View file

@ -116,6 +116,8 @@ CONDITIONAL_REQUIREMENTS = {
# hiredis is not a *strict* dependency, but it makes things much faster. # hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.) # (if it is not installed, we fall back to slow code.)
"redis": ["txredisapi>=1.4.7", "hiredis"], "redis": ["txredisapi>=1.4.7", "hiredis"],
# Required to use experimental `caches.track_memory_usage` config option.
"cache_memory": ["pympler"],
} }
ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]

View file

@ -24,6 +24,11 @@ from synapse.config.cache import add_resizable_cache
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Whether to track estimated memory usage of the LruCaches.
TRACK_MEMORY_USAGE = False
caches_by_name = {} # type: Dict[str, Sized] caches_by_name = {} # type: Dict[str, Sized]
collectors_by_name = {} # type: Dict[str, CacheMetric] collectors_by_name = {} # type: Dict[str, CacheMetric]
@ -32,6 +37,11 @@ cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"])
cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"]) cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"])
cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"])
cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"])
cache_memory_usage = Gauge(
"synapse_util_caches_cache_size_bytes",
"Estimated memory usage of the caches",
["name"],
)
response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"])
response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"])
@ -52,6 +62,7 @@ class CacheMetric:
hits = attr.ib(default=0) hits = attr.ib(default=0)
misses = attr.ib(default=0) misses = attr.ib(default=0)
evicted_size = attr.ib(default=0) evicted_size = attr.ib(default=0)
memory_usage = attr.ib(default=None)
def inc_hits(self): def inc_hits(self):
self.hits += 1 self.hits += 1
@ -62,6 +73,19 @@ class CacheMetric:
def inc_evictions(self, size=1): def inc_evictions(self, size=1):
self.evicted_size += size self.evicted_size += size
def inc_memory_usage(self, memory: int):
if self.memory_usage is None:
self.memory_usage = 0
self.memory_usage += memory
def dec_memory_usage(self, memory: int):
self.memory_usage -= memory
def clear_memory_usage(self):
if self.memory_usage is not None:
self.memory_usage = 0
def describe(self): def describe(self):
return [] return []
@ -81,6 +105,13 @@ class CacheMetric:
cache_total.labels(self._cache_name).set(self.hits + self.misses) cache_total.labels(self._cache_name).set(self.hits + self.misses)
if getattr(self._cache, "max_size", None): if getattr(self._cache, "max_size", None):
cache_max_size.labels(self._cache_name).set(self._cache.max_size) cache_max_size.labels(self._cache_name).set(self._cache.max_size)
if TRACK_MEMORY_USAGE:
# self.memory_usage can be None if nothing has been inserted
# into the cache yet.
cache_memory_usage.labels(self._cache_name).set(
self.memory_usage or 0
)
if self._collect_callback: if self._collect_callback:
self._collect_callback() self._collect_callback()
except Exception as e: except Exception as e:

View file

@ -32,9 +32,36 @@ from typing import (
from typing_extensions import Literal from typing_extensions import Literal
from synapse.config import cache as cache_config from synapse.config import cache as cache_config
from synapse.util import caches
from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches import CacheMetric, register_cache
from synapse.util.caches.treecache import TreeCache from synapse.util.caches.treecache import TreeCache
try:
from pympler.asizeof import Asizer
def _get_size_of(val: Any, *, recurse=True) -> int:
"""Get an estimate of the size in bytes of the object.
Args:
val: The object to size.
recurse: If true will include referenced values in the size,
otherwise only sizes the given object.
"""
# Ignore singleton values when calculating memory usage.
if val in ((), None, ""):
return 0
sizer = Asizer()
sizer.exclude_refs((), None, "")
return sizer.asizeof(val, limit=100 if recurse else 0)
except ImportError:
def _get_size_of(val: Any, *, recurse=True) -> int:
return 0
# Function type: the type used for invalidation callbacks # Function type: the type used for invalidation callbacks
FT = TypeVar("FT", bound=Callable[..., Any]) FT = TypeVar("FT", bound=Callable[..., Any])
@ -56,7 +83,7 @@ def enumerate_leaves(node, depth):
class _Node: class _Node:
__slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] __slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"]
def __init__( def __init__(
self, self,
@ -84,6 +111,16 @@ class _Node:
self.add_callbacks(callbacks) self.add_callbacks(callbacks)
self.memory = 0
if caches.TRACK_MEMORY_USAGE:
self.memory = (
_get_size_of(key)
+ _get_size_of(value)
+ _get_size_of(self.callbacks, recurse=False)
+ _get_size_of(self, recurse=False)
)
self.memory += _get_size_of(self.memory, recurse=False)
def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None:
"""Add to stored list of callbacks, removing duplicates.""" """Add to stored list of callbacks, removing duplicates."""
@ -233,6 +270,9 @@ class LruCache(Generic[KT, VT]):
if size_callback: if size_callback:
cached_cache_len[0] += size_callback(node.value) cached_cache_len[0] += size_callback(node.value)
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.inc_memory_usage(node.memory)
def move_node_to_front(node): def move_node_to_front(node):
prev_node = node.prev_node prev_node = node.prev_node
next_node = node.next_node next_node = node.next_node
@ -258,6 +298,9 @@ class LruCache(Generic[KT, VT]):
node.run_and_clear_callbacks() node.run_and_clear_callbacks()
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.dec_memory_usage(node.memory)
return deleted_len return deleted_len
@overload @overload
@ -373,6 +416,9 @@ class LruCache(Generic[KT, VT]):
if size_callback: if size_callback:
cached_cache_len[0] = 0 cached_cache_len[0] = 0
if caches.TRACK_MEMORY_USAGE and metrics:
metrics.clear_memory_usage()
@synchronized @synchronized
def cache_contains(key: KT) -> bool: def cache_contains(key: KT) -> bool:
return key in cache return key in cache