diff --git a/locust/static/locust.js b/locust/static/locust.js index f6a804becc..fa425d5438 100644 --- a/locust/static/locust.js +++ b/locust/static/locust.js @@ -117,7 +117,7 @@ $(".stats_label").click(function(event) { // init charts var rpsChart = new LocustLineChart($(".charts-container"), "Total Requests per Second", ["RPS"], "reqs/s"); -var responseTimeChart = new LocustLineChart($(".charts-container"), "Average Response Time", ["Average Response Time"], "ms"); +var responseTimeChart = new LocustLineChart($(".charts-container"), "Response Times", ["Median Response Time", "95% percentile"], "ms"); var usersChart = new LocustLineChart($(".charts-container"), "Number of Users", ["Users"], "users"); function updateStats() { @@ -149,7 +149,7 @@ function updateStats() { var total = report.stats[report.stats.length-1]; // update charts rpsChart.addValue([total.current_rps]); - responseTimeChart.addValue([total.avg_response_time]); + responseTimeChart.addValue([report.current_response_time_percentile_50, report.current_response_time_percentile_95]); usersChart.addValue([report.user_count]); } diff --git a/locust/static/vintage.js b/locust/static/vintage.js index 781f469fee..94303ce92c 100644 --- a/locust/static/vintage.js +++ b/locust/static/vintage.js @@ -19,7 +19,7 @@ log('ECharts is not Loaded'); return; } - var colorPalette = ['#00ca5a','#919e8b', '#d7ab82', '#6e7074','#61a0a8','#efa18d', '#787464', '#cc7e63', '#724e58', '#4b565b']; + var colorPalette = ['#00ca5a','#ffca5a', '#d7ab82', '#6e7074','#61a0a8','#efa18d', '#787464', '#cc7e63', '#724e58', '#4b565b']; echarts.registerTheme('vintage', { color: colorPalette, backgroundColor: '#132b21', diff --git a/locust/stats.py b/locust/stats.py index 195ac99d33..a061eae834 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -1,10 +1,11 @@ import hashlib import time +from collections import namedtuple, OrderedDict +from copy import copy from itertools import chain import gevent import six - from six.moves import xrange from . import events @@ -20,19 +21,90 @@ """Default interval for how frequently results are written to console.""" CONSOLE_STATS_INTERVAL_SEC = 2 +""" +Default window size/resolution - in seconds - when calculating the current +response time percentile +""" +CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10 + + +CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"]) + + class RequestStatsAdditionError(Exception): pass +def calculate_response_time_percentile(response_times, num_requests, percent): + """ + Get the response time that a certain number of percent of the requests + finished within. Arguments: + + response_times: A StatsEntry.response_times dict + num_requests: Number of request made (could be derived from response_times, + but we save some CPU cycles by using the value which we already store) + percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0 + """ + num_of_request = int((num_requests * percent)) + + processed_count = 0 + for response_time in sorted(six.iterkeys(response_times), reverse=True): + processed_count += response_times[response_time] + if(num_requests - processed_count <= num_of_request): + return response_time + + +def diff_response_time_dicts(latest, old): + """ + Returns the delta between two {response_times:request_count} dicts. + + Used together with the response_times cache to get the response times for the + last X seconds, which in turn is used to calculate the current response time + percentiles. + """ + new = {} + for time in latest: + diff = latest[time] - old.get(time, 0) + if diff: + new[time] = diff + return new + + class RequestStats(object): def __init__(self): self.entries = {} self.errors = {} - self.num_requests = 0 - self.num_failures = 0 - self.last_request_timestamp = None + self.total = StatsEntry(self, "Total", None, use_response_times_cache=True) self.start_time = None + @property + def num_requests(self): + return self.total.num_requests + + @property + def num_failures(self): + return self.total.num_failures + + @property + def last_request_timestamp(self): + return self.total.last_request_timestamp + + def log_request(self, method, name, response_time, content_length): + self.total.log(response_time, content_length) + self.get(name, method).log(response_time, content_length) + + def log_error(self, method, name, error): + self.total.log_error(error) + self.get(name, method).log_error(error) + + # store error in errors dict + key = StatsError.create_key(method, name, error) + entry = self.errors.get(key) + if not entry: + entry = StatsError(method, name, error) + self.errors[key] = entry + entry.occured() + def get(self, name, method): """ Retrieve a StatsEntry instance by name and method @@ -43,23 +115,12 @@ def get(self, name, method): self.entries[(name, method)] = entry return entry - def aggregated_stats(self, name="Total", full_request_history=False): - """ - Returns a StatsEntry which is an aggregate of all stats entries - within entries. - """ - total = StatsEntry(self, name, method=None) - for r in six.itervalues(self.entries): - total.extend(r, full_request_history=full_request_history) - return total - def reset_all(self): """ Go through all stats entries and reset them to zero """ self.start_time = time.time() - self.num_requests = 0 - self.num_failures = 0 + self.total.reset() for r in six.itervalues(self.entries): r.reset() @@ -67,12 +128,16 @@ def clear_all(self): """ Remove all stats entries and errors """ - self.num_requests = 0 - self.num_failures = 0 + self.total = StatsEntry(self, "Total", None, use_response_times_cache=True) self.entries = {} self.errors = {} - self.last_request_timestamp = None self.start_time = None + + def serialize_stats(self): + return [self.entries[key].get_stripped_report() for key in six.iterkeys(self.entries) if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0)] + + def serialize_errors(self): + return dict([(k, e.to_dict()) for k, e in six.iteritems(self.errors)]) class StatsEntry(object): @@ -115,6 +180,20 @@ class StatsEntry(object): This dict is used to calculate the median and percentile response times. """ + use_response_times_cache = False + """ + If set to True, the copy of the response_time dict will be stored in response_times_cache + every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10). + We can use this dict to calculate the *current* median response time, as well as other response + time percentiles. + """ + + response_times_cache = None + """ + If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()} + OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds. + """ + total_content_length = None """ The sum of the content length of all the requests for this entry """ @@ -124,10 +203,11 @@ class StatsEntry(object): last_request_timestamp = None """ Time of the last request for this entry """ - def __init__(self, stats, name, method): + def __init__(self, stats, name, method, use_response_times_cache=False): self.stats = stats self.name = name self.method = method + self.use_response_times_cache = use_response_times_cache self.reset() def reset(self): @@ -141,22 +221,28 @@ def reset(self): self.last_request_timestamp = int(time.time()) self.num_reqs_per_sec = {} self.total_content_length = 0 + if self.use_response_times_cache: + self.response_times_cache = OrderedDict() + self._cache_response_times(int(time.time())) def log(self, response_time, content_length): - self.stats.num_requests += 1 + # get the time + t = int(time.time()) + + if self.use_response_times_cache and self.last_request_timestamp and t > self.last_request_timestamp: + # see if we shall make a copy of the respone_times dict and store in the cache + self._cache_response_times(t-1) + self.num_requests += 1 - - self._log_time_of_request() + self._log_time_of_request(t) self._log_response_time(response_time) # increase total content-length self.total_content_length += content_length - def _log_time_of_request(self): - t = int(time.time()) + def _log_time_of_request(self, t): self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1 self.last_request_timestamp = t - self.stats.last_request_timestamp = t def _log_response_time(self, response_time): self.total_response_time += response_time @@ -185,14 +271,6 @@ def _log_response_time(self, response_time): def log_error(self, error): self.num_failures += 1 - self.stats.num_failures += 1 - key = StatsError.create_key(self.method, self.name, error) - entry = self.stats.errors.get(key) - if not entry: - entry = StatsError(self.method, self.name, error) - self.stats.errors[key] = entry - - entry.occured() @property def fail_ratio(self): @@ -241,16 +319,10 @@ def avg_content_length(self): except ZeroDivisionError: return 0 - def extend(self, other, full_request_history=False): + def extend(self, other): """ - Extend the data fro the current StatsEntry with the stats from another + Extend the data from the current StatsEntry with the stats from another StatsEntry instance. - - If full_request_history is False, we'll only care to add the data from - the last 20 seconds of other's stats. The reason for this argument is that - extend can be used to generate an aggregate of multiple different StatsEntry - instances on the fly, in order to get the *total* current RPS, average - response time, etc. """ self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp) self.start_time = min(self.start_time, other.start_time) @@ -262,16 +334,10 @@ def extend(self, other, full_request_history=False): self.min_response_time = min(self.min_response_time or 0, other.min_response_time or 0) or other.min_response_time self.total_content_length = self.total_content_length + other.total_content_length - if full_request_history: - for key in other.response_times: - self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key] - for key in other.num_reqs_per_sec: - self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key] - else: - # still add the number of reqs per seconds the last 20 seconds - for i in xrange(other.last_request_timestamp-20, other.last_request_timestamp+1): - if i in other.num_reqs_per_sec: - self.num_reqs_per_sec[i] = self.num_reqs_per_sec.get(i, 0) + other.num_reqs_per_sec[i] + for key in other.response_times: + self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key] + for key in other.num_reqs_per_sec: + self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key] def serialize(self): return { @@ -322,7 +388,7 @@ def __str__(self): fail_percent = 0 return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7d %7.2f") % ( - self.method + " " + self.name, + (self.method and self.method + " " or "") + self.name, self.num_requests, "%d(%.2f%%)" % (self.num_failures, fail_percent), self.avg_response_time, @@ -339,20 +405,52 @@ def get_response_time_percentile(self, percent): Percent specified in range: 0.0 - 1.0 """ - num_of_request = int((self.num_requests * percent)) - - processed_count = 0 - for response_time in sorted(six.iterkeys(self.response_times), reverse=True): - processed_count += self.response_times[response_time] - if((self.num_requests - processed_count) <= num_of_request): - return response_time - + return calculate_response_time_percentile(self.response_times, self.num_requests, percent) + + def get_current_response_time_percentile(self, percent): + """ + Calculate the *current* response time for a certain percentile. We use a sliding + window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) + when calculating this. + """ + if not self.use_response_times_cache: + raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile") + # First, we want to determine which of the cached response_times dicts we should + # use to get response_times for approximately 10 seconds ago. + t = int(time.time()) + # Since we can't be sure that the cache contains an entry for every second. + # We'll construct a list of timestamps which we consider acceptable keys to be used + # when trying to fetch the cached response_times. We construct this list in such a way + # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, + # and so on + acceptable_timestamps = [] + for i in xrange(9): + acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i) + acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i) + + cached = None + for ts in acceptable_timestamps: + if ts in self.response_times_cache: + cached = self.response_times_cache[ts] + break + + if cached: + # If we fond an acceptable cached response times, we'll calculate a new response + # times dict of the last 10 seconds (approximately) by diffing it with the current + # total response times. Then we'll use that to calculate a response time percentile + # for that timeframe + return calculate_response_time_percentile( + diff_response_time_dicts(self.response_times, cached.response_times), + self.num_requests - cached.num_requests, + percent, + ) + def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"): if not self.num_requests: raise ValueError("Can't calculate percentile on url with no successful requests") return tpl % ( - str(self.method) + " " + self.name, + (self.method and self.method + " " or "") + self.name, self.num_requests, self.get_response_time_percentile(0.5), self.get_response_time_percentile(0.66), @@ -364,6 +462,25 @@ def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d self.get_response_time_percentile(0.99), self.max_response_time ) + + def _cache_response_times(self, t): + self.response_times_cache[t] = CachedResponseTimes( + response_times=copy(self.response_times), + num_requests=self.num_requests, + ) + + + # We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case - + # we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10 + # to calculate the current response time percentile, if we're missing cached values for the subsequent + # 20 seconds + cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 + + if len(self.response_times_cache) > cache_size: + # only keep the latest 20 response_times dicts + for i in xrange(len(self.response_times_cache) - cache_size): + self.response_times_cache.popitem(last=False) + class StatsError(object): def __init__(self, method, name, error, occurences=0): @@ -437,14 +554,15 @@ def median_from_dict(total, count): """ def on_request_success(request_type, name, response_time, response_length): - global_stats.get(name, request_type).log(response_time, response_length) + global_stats.log_request(request_type, name, response_time, response_length) def on_request_failure(request_type, name, response_time, exception): - global_stats.get(name, request_type).log_error(exception) + global_stats.log_error(request_type, name, exception) def on_report_to_master(client_id, data): - data["stats"] = [global_stats.entries[key].get_stripped_report() for key in six.iterkeys(global_stats.entries) if not (global_stats.entries[key].num_requests == 0 and global_stats.entries[key].num_failures == 0)] - data["errors"] = dict([(k, e.to_dict()) for k, e in six.iteritems(global_stats.errors)]) + data["stats"] = global_stats.serialize_stats() + data["stats_total"] = global_stats.total.get_stripped_report() + data["errors"] = global_stats.serialize_errors() global_stats.errors = {} def on_slave_report(client_id, data): @@ -453,14 +571,28 @@ def on_slave_report(client_id, data): request_key = (entry.name, entry.method) if not request_key in global_stats.entries: global_stats.entries[request_key] = StatsEntry(global_stats, entry.name, entry.method) - global_stats.entries[request_key].extend(entry, full_request_history=True) - global_stats.last_request_timestamp = max(global_stats.last_request_timestamp or 0, entry.last_request_timestamp) + global_stats.entries[request_key].extend(entry) for error_key, error in six.iteritems(data["errors"]): if error_key not in global_stats.errors: global_stats.errors[error_key] = StatsError.from_dict(error) else: global_stats.errors[error_key].occurences += error["occurences"] + + # save the old last_request_timestamp, to see if we should store a new copy + # of the response times in the response times cache + old_last_request_timestamp = global_stats.total.last_request_timestamp + # update the total StatsEntry + global_stats.total.extend(StatsEntry.unserialize(data["stats_total"])) + if global_stats.total.last_request_timestamp > old_last_request_timestamp: + # If we've entered a new second, we'll cache the response times. Note that there + # might still be reports from other slave nodes - that contains requests for the same + # time periods - that hasn't been received/accounted for yet. This will cause the cache to + # lag behind a second or two, but since StatsEntry.current_response_time_percentile() + # (which is what the response times cache is used for) uses an approximation of the + # last 10 seconds anyway, it should be fine to ignore this. + global_stats.total._cache_response_times(global_stats.total.last_request_timestamp) + events.request_success += on_request_success events.request_failure += on_request_failure @@ -500,7 +632,7 @@ def print_percentile_stats(stats): console_logger.info(r.percentile()) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - total_stats = global_stats.aggregated_stats() + total_stats = global_stats.total if total_stats.response_times: console_logger.info(total_stats.percentile()) console_logger.info("") @@ -561,7 +693,7 @@ def requests_csv(): ]) ] - for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]): + for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]): rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % ( s.method, s.name, @@ -593,7 +725,7 @@ def distribution_csv(): '"99%"', '"100%"', ))] - for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]): + for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]): if s.num_requests: rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i')) else: diff --git a/locust/test/test_runners.py b/locust/test/test_runners.py index b785410222..73d2c65dc6 100644 --- a/locust/test/test_runners.py +++ b/locust/test/test_runners.py @@ -11,7 +11,7 @@ from locust.main import parse_options from locust.rpc import Message from locust.runners import LocalLocustRunner, MasterLocustRunner -from locust.stats import global_stats +from locust.stats import global_stats, RequestStats from locust.test.testcases import LocustTestCase @@ -26,6 +26,7 @@ def __init__(self, host, port): @classmethod def mocked_send(cls, message): cls.queue.put(message.serialize()) + sleep(0) def recv(self): results = self.queue.get() @@ -62,17 +63,14 @@ class MyTestLocust(Locust): with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: master = MasterLocustRunner(MyTestLocust, self.options) server.mocked_send(Message("client_ready", None, "zeh_fake_client1")) - sleep(0) self.assertEqual(1, len(master.clients)) self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict") server.mocked_send(Message("client_ready", None, "zeh_fake_client2")) server.mocked_send(Message("client_ready", None, "zeh_fake_client3")) server.mocked_send(Message("client_ready", None, "zeh_fake_client4")) - sleep(0) self.assertEqual(4, len(master.clients)) server.mocked_send(Message("quit", None, "zeh_fake_client3")) - sleep(0) self.assertEqual(3, len(master.clients)) def test_slave_stats_report_median(self): @@ -84,7 +82,6 @@ class MyTestLocust(Locust): with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: master = MasterLocustRunner(MyTestLocust, self.options) server.mocked_send(Message("client_ready", None, "fake_client")) - sleep(0) master.stats.get("/", "GET").log(100, 23455) master.stats.get("/", "GET").log(800, 23455) @@ -95,10 +92,88 @@ class MyTestLocust(Locust): master.stats.clear_all() server.mocked_send(Message("stats", data, "fake_client")) - sleep(0) s = master.stats.get("/", "GET") self.assertEqual(700, s.median_response_time) + def test_master_total_stats(self): + import mock + + class MyTestLocust(Locust): + pass + + with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: + master = MasterLocustRunner(MyTestLocust, self.options) + server.mocked_send(Message("client_ready", None, "fake_client")) + stats = RequestStats() + stats.log_request("GET", "/1", 100, 3546) + stats.log_request("GET", "/1", 800, 56743) + stats2 = RequestStats() + stats2.log_request("GET", "/2", 700, 2201) + server.mocked_send(Message("stats", { + "stats":stats.serialize_stats(), + "stats_total": stats.total.serialize(), + "errors":stats.serialize_errors(), + "user_count": 1, + }, "fake_client")) + server.mocked_send(Message("stats", { + "stats":stats2.serialize_stats(), + "stats_total": stats2.total.serialize(), + "errors":stats2.serialize_errors(), + "user_count": 2, + }, "fake_client")) + self.assertEqual(700, master.stats.total.median_response_time) + + def test_master_current_response_times(self): + import mock + + class MyTestLocust(Locust): + pass + + start_time = 1 + with mock.patch("time.time") as mocked_time: + mocked_time.return_value = start_time + global_stats.reset_all() + with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: + master = MasterLocustRunner(MyTestLocust, self.options) + mocked_time.return_value += 1 + server.mocked_send(Message("client_ready", None, "fake_client")) + stats = RequestStats() + stats.log_request("GET", "/1", 100, 3546) + stats.log_request("GET", "/1", 800, 56743) + server.mocked_send(Message("stats", { + "stats":stats.serialize_stats(), + "stats_total": stats.total.get_stripped_report(), + "errors":stats.serialize_errors(), + "user_count": 1, + }, "fake_client")) + mocked_time.return_value += 1 + stats2 = RequestStats() + stats2.log_request("GET", "/2", 400, 2201) + server.mocked_send(Message("stats", { + "stats":stats2.serialize_stats(), + "stats_total": stats2.total.get_stripped_report(), + "errors":stats2.serialize_errors(), + "user_count": 2, + }, "fake_client")) + mocked_time.return_value += 4 + self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5)) + self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95)) + + # let 10 second pass, do some more requests, send it to the master and make + # sure the current response time percentiles only accounts for these new requests + mocked_time.return_value += 10 + stats.log_request("GET", "/1", 20, 1) + stats.log_request("GET", "/1", 30, 1) + stats.log_request("GET", "/1", 3000, 1) + server.mocked_send(Message("stats", { + "stats":stats.serialize_stats(), + "stats_total": stats.total.get_stripped_report(), + "errors":stats.serialize_errors(), + "user_count": 2, + }, "fake_client")) + self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5)) + self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95)) + def test_spawn_zero_locusts(self): class MyTaskSet(TaskSet): @task @@ -137,7 +212,6 @@ class MyTestLocust(Locust): master = MasterLocustRunner(MyTestLocust, self.options) for i in range(5): server.mocked_send(Message("client_ready", None, "fake_client%i" % i)) - sleep(0) master.start_hatching(7, 7) self.assertEqual(5, len(server.outbox)) @@ -158,7 +232,6 @@ class MyTestLocust(Locust): master = MasterLocustRunner(MyTestLocust, self.options) for i in range(5): server.mocked_send(Message("client_ready", None, "fake_client%i" % i)) - sleep(0) master.start_hatching(2, 2) self.assertEqual(5, len(server.outbox)) diff --git a/locust/test/test_stats.py b/locust/test/test_stats.py index 25d2d4fa63..b7b8bc5735 100644 --- a/locust/test/test_stats.py +++ b/locust/test/test_stats.py @@ -4,7 +4,7 @@ from locust.core import HttpLocust, TaskSet, task from locust.inspectlocust import get_task_ratio_dict from locust.rpc.protocol import Message -from locust.stats import RequestStats, StatsEntry, global_stats +from locust.stats import CachedResponseTimes, RequestStats, StatsEntry, diff_response_time_dicts, global_stats from six.moves import xrange from .testcases import WebserverTestCase @@ -42,10 +42,10 @@ def test_total_rps(self): self.assertEqual(self.s.total_rps, 7) def test_current_rps(self): - self.stats.last_request_timestamp = int(time.time()) + 4 + self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 3.5) - self.stats.last_request_timestamp = int(time.time()) + 25 + self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_num_reqs_fails(self): @@ -91,8 +91,8 @@ def test_aggregation(self): s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") - s.extend(s1, full_request_history=True) - s.extend(s2, full_request_history=True) + s.extend(s1) + s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) @@ -103,16 +103,15 @@ def test_error_grouping(self): # reset stats self.stats = RequestStats() - s = StatsEntry(self.stats, "/some-path", "GET") - s.log_error(Exception("Exception!")) - s.log_error(Exception("Exception!")) + self.stats.log_error("GET", "/some-path", Exception("Exception!")) + self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurences) - s.log_error(Exception("Another exception!")) - s.log_error(Exception("Another exception!")) - s.log_error(Exception("Third exception!")) + self.stats.log_error("GET", "/some-path", Exception("Another exception!")) + self.stats.log_error("GET", "/some-path", Exception("Another exception!")) + self.stats.log_error("GET", "/some-path", Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors)) def test_error_grouping_errors_with_memory_addresses(self): @@ -121,10 +120,7 @@ def test_error_grouping_errors_with_memory_addresses(self): class Dummy(object): pass - s = StatsEntry(self.stats, "/", "GET") - s.log_error(Exception("Error caused by %r" % Dummy())) - s.log_error(Exception("Error caused by %r" % Dummy())) - + self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors)) def test_serialize_through_message(self): @@ -143,6 +139,88 @@ def test_serialize_through_message(self): u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time) + + +class TestStatsEntryResponseTimesCache(unittest.TestCase): + def setUp(self, *args, **kwargs): + super(TestStatsEntryResponseTimesCache, self).setUp(*args, **kwargs) + self.stats = RequestStats() + + def test_response_times_cached(self): + s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) + self.assertEqual(1, len(s.response_times_cache)) + s.log(11, 1337) + self.assertEqual(1, len(s.response_times_cache)) + s.last_request_timestamp -= 1 + s.log(666, 1337) + self.assertEqual(2, len(s.response_times_cache)) + self.assertEqual(CachedResponseTimes( + response_times={11:1}, + num_requests=1, + ), s.response_times_cache[s.last_request_timestamp-1]) + + def test_response_times_not_cached_if_not_enabled(self): + s = StatsEntry(self.stats, "/", "GET") + s.log(11, 1337) + self.assertEqual(None, s.response_times_cache) + s.last_request_timestamp -= 1 + s.log(666, 1337) + self.assertEqual(None, s.response_times_cache) + + def test_latest_total_response_times_pruned(self): + """ + Check that RequestStats.latest_total_response_times are pruned when execeeding 20 entries + """ + s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) + t = int(time.time()) + for i in reversed(range(2, 30)): + s.response_times_cache[t-i] = CachedResponseTimes(response_times={}, num_requests=0) + self.assertEqual(29, len(s.response_times_cache)) + s.log(17, 1337) + s.last_request_timestamp -= 1 + s.log(1, 1) + self.assertEqual(20, len(s.response_times_cache)) + self.assertEqual( + CachedResponseTimes(response_times={17:1}, num_requests=1), + s.response_times_cache.popitem(last=True)[1], + ) + + def test_get_current_response_time_percentile(self): + s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) + t = int(time.time()) + s.response_times_cache[t-10] = CachedResponseTimes( + response_times={i:1 for i in xrange(100)}, + num_requests=200 + ) + s.response_times_cache[t-10].response_times[1] = 201 + + s.response_times = {i:2 for i in xrange(100)} + s.response_times[1] = 202 + s.num_requests = 300 + + self.assertEqual(95, s.get_current_response_time_percentile(0.95)) + + def test_diff_response_times_dicts(self): + self.assertEqual({1:5, 6:8}, diff_response_time_dicts( + {1:6, 6:16, 2:2}, + {1:1, 6:8, 2:2}, + )) + self.assertEqual({}, diff_response_time_dicts( + {}, + {}, + )) + self.assertEqual({10:15}, diff_response_time_dicts( + {10:15}, + {}, + )) + self.assertEqual({10:10}, diff_response_time_dicts( + {10:10}, + {}, + )) + self.assertEqual({}, diff_response_time_dicts( + {1:1}, + {1:1}, + )) class TestRequestStatsWithWebserver(WebserverTestCase): diff --git a/locust/test/test_web.py b/locust/test/test_web.py index a070fc4e35..078ad1d909 100644 --- a/locust/test/test_web.py +++ b/locust/test/test_web.py @@ -8,7 +8,7 @@ import requests from gevent import wsgi -from locust import runners, stats, web +from locust import events, runners, stats, web from locust.main import parse_options from locust.runners import LocustRunner from six.moves import StringIO @@ -43,7 +43,7 @@ def test_stats_no_data(self): self.assertEqual(200, requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).status_code) def test_stats(self): - stats.global_stats.get("/test", "GET").log(120, 5612) + stats.global_stats.log_request("GET", "/test", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) @@ -53,15 +53,19 @@ def test_stats(self): self.assertEqual("GET", data["stats"][0]["method"]) self.assertEqual(120, data["stats"][0]["avg_response_time"]) + self.assertEqual("Total", data["stats"][1]["name"]) + self.assertEqual(1, data["stats"][1]["num_requests"]) + self.assertEqual(120, data["stats"][1]["avg_response_time"]) + def test_stats_cache(self): - stats.global_stats.get("/test", "GET").log(120, 5612) + stats.global_stats.log_request("GET", "/test", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) data = json.loads(response.text) self.assertEqual(2, len(data["stats"])) # one entry plus Total # add another entry - stats.global_stats.get("/test2", "GET").log(120, 5612) + stats.global_stats.log_request("GET", "/test2", 120, 5612) data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text) self.assertEqual(2, len(data["stats"])) # old value should be cached now @@ -71,17 +75,29 @@ def test_stats_cache(self): self.assertEqual(3, len(data["stats"])) # this should no longer be cached def test_request_stats_csv(self): - stats.global_stats.get("/test", "GET").log(120, 5612) + stats.global_stats.log_request("GET", "/test2", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests/csv" % self.web_port) self.assertEqual(200, response.status_code) def test_distribution_stats_csv(self): - stats.global_stats.get("/test", "GET").log(120, 5612) + for i in range(19): + stats.global_stats.log_request("GET", "/test2", 400, 5612) + stats.global_stats.log_request("GET", "/test2", 1200, 5612) response = requests.get("http://127.0.0.1:%i/stats/distribution/csv" % self.web_port) self.assertEqual(200, response.status_code) + rows = str(response.content.decode("utf-8")).split("\n") + # check that /test2 is present in stats + row = rows[len(rows)-2].split(",") + self.assertEqual('"GET /test2"', row[0]) + # check total row + total_cols = rows[len(rows)-1].split(",") + self.assertEqual('"Total"', total_cols[0]) + # verify that the 95%, 98%, 99% and 100% percentiles are 1200 + for value in total_cols[-4:]: + self.assertEqual('1200', value) def test_request_stats_with_errors(self): - stats.global_stats.get("/", "GET").log_error(Exception("Error1337")) + stats.global_stats.log_error("GET", "/", Exception("Error1337")) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) self.assertIn("Error1337", str(response.content)) diff --git a/locust/web.py b/locust/web.py index d39701638c..030b0b0cbe 100644 --- a/locust/web.py +++ b/locust/web.py @@ -79,7 +79,6 @@ def reset_stats(): @app.route("/stats/requests/csv") def request_stats_csv(): response = make_response(requests_csv()) - file_name = "requests_{0}.csv".format(time()) disposition = "attachment;filename={0}".format(file_name) response.headers["Content-type"] = "text/csv" @@ -88,7 +87,6 @@ def request_stats_csv(): @app.route("/stats/distribution/csv") def distribution_stats_csv(): - response = make_response(distribution_csv()) file_name = "distribution_{0}.csv".format(time()) disposition = "attachment;filename={0}".format(file_name) @@ -100,7 +98,8 @@ def distribution_stats_csv(): @memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True) def request_stats(): stats = [] - for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total")]): + + for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]): stats.append({ "method": s.method, "name": s.name, @@ -122,18 +121,9 @@ def request_stats(): if stats: report["total_rps"] = stats[len(stats)-1]["current_rps"] - report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats("Total").fail_ratio - - # since generating a total response times dict with all response times from all - # urls is slow, we make a new total response time dict which will consist of one - # entry per url with the median response time as key and the number of requests as - # value - response_times = defaultdict(int) # used for calculating total median - for i in xrange(len(stats)-1): - response_times[stats[i]["median_response_time"]] += stats[i]["num_requests"] - - # calculate total median - stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_requests"], response_times) + report["fail_ratio"] = runners.locust_runner.stats.total.fail_ratio + report["current_response_time_percentile_95"] = runners.locust_runner.stats.total.get_current_response_time_percentile(0.95) + report["current_response_time_percentile_50"] = runners.locust_runner.stats.total.get_current_response_time_percentile(0.5) is_distributed = isinstance(runners.locust_runner, MasterLocustRunner) if is_distributed: