0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-13 18:33:23 +01:00

Rename TimerMetric to DistributionMetric; as it could count more than just time

This commit is contained in:
Paul "LeoNerd" Evans 2015-03-10 15:21:03 +00:00
parent 642f725fd7
commit f1fbe3e09f
4 changed files with 37 additions and 33 deletions

View file

@ -19,7 +19,9 @@ from __future__ import absolute_import
import logging
from resource import getrusage, getpagesize, RUSAGE_SELF
from .metric import CounterMetric, CallbackMetric, TimerMetric, CacheMetric
from .metric import (
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
)
logger = logging.getLogger(__name__)
@ -59,8 +61,8 @@ class Metrics(object):
def register_callback(self, *args, **kwargs):
return self._register(CallbackMetric, *args, **kwargs)
def register_timer(self, *args, **kwargs):
return self._register(TimerMetric, *args, **kwargs)
def register_distribution(self, *args, **kwargs):
return self._register(DistributionMetric, *args, **kwargs)
def register_cache(self, *args, **kwargs):
return self._register(CacheMetric, *args, **kwargs)

View file

@ -101,35 +101,37 @@ class CallbackMetric(BaseMetric):
for k in sorted(value.keys())]
class TimerMetric(CounterMetric):
"""A combination of an event counter and a time accumulator, which counts
both the number of events and how long each one takes.
class DistributionMetric(CounterMetric):
"""A combination of an event counter and an accumulator, which counts
both the number of events and accumulates the total value. Typically this
could be used to keep track of method-running times, or other distributions
of values that occur in discrete occurances.
TODO(paul): Try to export some heatmap-style stats?
"""
def __init__(self, *args, **kwargs):
super(TimerMetric, self).__init__(*args, **kwargs)
super(DistributionMetric, self).__init__(*args, **kwargs)
self.times = {}
self.totals = {}
# Scalar metrics are never empty
if self.is_scalar():
self.times[()] = 0
self.totals[()] = 0
def inc_time(self, msec, *values):
def inc_by(self, inc, *values):
self.inc(*values)
if values not in self.times:
self.times[values] = msec
if values not in self.totals:
self.totals[values] = inc
else:
self.times[values] += msec
self.totals[values] += inc
def render_item(self, k):
keystr = self._render_key(k)
return ["%s:count%s %d" % (self.name, keystr, self.counts[k]),
"%s:msec%s %d" % (self.name, keystr, self.times[k])]
"%s:total%s %d" % (self.name, keystr, self.totals[k])]
class CacheMetric(object):

View file

@ -38,9 +38,9 @@ transaction_logger = logging.getLogger("synapse.storage.txn")
metrics = synapse.metrics.get_metrics_for("synapse.storage")
sql_query_timer = metrics.register_timer("queries", labels=["verb"])
sql_txn_timer = metrics.register_timer("transactions", labels=["desc"])
sql_getevents_timer = metrics.register_timer("getEvents", labels=["desc"])
sql_query_timer = metrics.register_distribution("queries", labels=["verb"])
sql_txn_timer = metrics.register_distribution("transactions", labels=["desc"])
sql_getevents_timer = metrics.register_distribution("getEvents", labels=["desc"])
caches_by_name = {}
cache_counter = metrics.register_cache(
@ -143,7 +143,7 @@ class LoggingTransaction(object):
finally:
msecs = (time.time() * 1000) - start
sql_logger.debug("[SQL time] {%s} %f", self.name, msecs)
sql_query_timer.inc_time(msecs, sql.split()[0])
sql_query_timer.inc_by(msecs, sql.split()[0])
class PerformanceCounters(object):
@ -268,7 +268,7 @@ class SQLBaseStore(object):
self._current_txn_total_time += end - start
self._txn_perf_counters.update(desc, start, end)
sql_txn_timer.inc_time(self._current_txn_total_time, desc)
sql_txn_timer.inc_by(self._current_txn_total_time, desc)
with PreserveLoggingContext():
result = yield self._db_pool.runInteraction(
@ -672,7 +672,7 @@ class SQLBaseStore(object):
def update_counter(desc, last_time):
curr_time = self._get_event_counters.update(desc, last_time)
sql_getevents_timer.inc_time(curr_time - last_time, desc)
sql_getevents_timer.inc_by(curr_time - last_time, desc)
return curr_time
cache = self._get_event_cache.setdefault(event_id, {})
@ -727,7 +727,7 @@ class SQLBaseStore(object):
def update_counter(desc, last_time):
curr_time = self._get_event_counters.update(desc, last_time)
sql_getevents_timer.inc_time(curr_time - last_time, desc)
sql_getevents_timer.inc_by(curr_time - last_time, desc)
return curr_time
d = json.loads(js)

View file

@ -16,7 +16,7 @@
from tests import unittest
from synapse.metrics.metric import (
CounterMetric, CallbackMetric, TimerMetric, CacheMetric
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
)
@ -97,37 +97,37 @@ class CallbackMetricTestCase(unittest.TestCase):
])
class TimerMetricTestCase(unittest.TestCase):
class DistributionMetricTestCase(unittest.TestCase):
def test_scalar(self):
metric = TimerMetric("thing")
metric = DistributionMetric("thing")
self.assertEquals(metric.render(), [
'thing:count 0',
'thing:msec 0',
'thing:total 0',
])
metric.inc_time(500)
metric.inc_by(500)
self.assertEquals(metric.render(), [
'thing:count 1',
'thing:msec 500',
'thing:total 500',
])
def test_vector(self):
metric = TimerMetric("queries", labels=["verb"])
metric = DistributionMetric("queries", labels=["verb"])
self.assertEquals(metric.render(), [])
metric.inc_time(300, "SELECT")
metric.inc_time(200, "SELECT")
metric.inc_time(800, "INSERT")
metric.inc_by(300, "SELECT")
metric.inc_by(200, "SELECT")
metric.inc_by(800, "INSERT")
self.assertEquals(metric.render(), [
'queries:count{verb="INSERT"} 1',
'queries:msec{verb="INSERT"} 800',
'queries:total{verb="INSERT"} 800',
'queries:count{verb="SELECT"} 2',
'queries:msec{verb="SELECT"} 500',
'queries:total{verb="SELECT"} 500',
])