Skip to content

Commit

Permalink
Merge "All scenario runs time measurement"
Browse files Browse the repository at this point in the history
  • Loading branch information
Jenkins authored and openstack-gerrit committed Aug 8, 2014
2 parents c4269ec + 7a3ed21 commit 49e0bc9
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 10 deletions.
10 changes: 6 additions & 4 deletions rally/benchmark/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,8 @@ def run(self):
target=self.consume_results,
args=(key, self.task, runner.result_queue, is_done))
consumer.start()
runner.run(name, kw.get("context", {}), kw.get("args", {}))
self.duration = runner.run(
name, kw.get("context", {}), kw.get("args", {}))
is_done.set()
consumer.join()
self.task.update_status(consts.TaskStatus.FINISHED)
Expand All @@ -216,8 +217,7 @@ def bind(self, endpoints):
clients.verified_keystone()
return self

@staticmethod
def consume_results(key, task, result_queue, is_done):
def consume_results(self, key, task, result_queue, is_done):
"""Consume scenario runner results from queue and send them to db.
Has to be run from different thread simultaneously with the runner.run
Expand All @@ -238,4 +238,6 @@ def consume_results(key, task, result_queue, is_done):
break
else:
time.sleep(0.1)
task.append_results(key, {"raw": results})

task.append_results(key, {"raw": results,
"scenario_duration": self.duration})
13 changes: 11 additions & 2 deletions rally/benchmark/runners/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,13 @@ def _run_scenario(self, cls, method_name, context, args):
where each result is a dictionary
"""

def _wrap_run_scenario(self, cls, method_name, context, args):
"""Whole scenario time measurement without context preparation."""

with rutils.Timer() as timer:
self._run_scenario(cls, method_name, context, args)
return timer.duration()

def run(self, name, context, args):
cls_name, method_name = name.split(".", 1)
cls = scenario_base.Scenario.get_by_name(cls_name)
Expand All @@ -217,8 +224,10 @@ def run(self, name, context, args):
}

args = cls.preprocess(method_name, context_obj, args)
base_ctx.ContextManager.run(context_obj, self._run_scenario,
cls, method_name, args)

return base_ctx.ContextManager.run(context_obj,
self._wrap_run_scenario,
cls, method_name, args)

def _send_result(self, result):
"""Send partial result to consumer.
Expand Down
9 changes: 8 additions & 1 deletion rally/cmd/commands/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ def _get_atomic_action_durations(raw):
print("args values:")
pprint.pprint(key["kw"])

scenario_time = result["data"]["scenario_duration"]
raw = result["data"]["raw"]
table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)",
"90 percentile", "95 percentile", "success",
Expand Down Expand Up @@ -235,6 +236,9 @@ def _get_atomic_action_durations(raw):
if iterations_data:
_print_iterations_data(raw)

print(_("Whole scenario time without context preparation: "),
scenario_time)

# NOTE(hughsaunders): ssrs=scenario specific results
ssrs = []
for result in raw:
Expand Down Expand Up @@ -330,7 +334,10 @@ def list(self, task_list=None):
help='Open it in browser.')
@envutils.with_default_task_id
def plot2html(self, task_id=None, out=None, open_it=False):
results = map(lambda x: {"key": x["key"], 'result': x['data']['raw']},
results = map(lambda x: {
"key": x["key"],
'result': x['data']['raw']
},
db.task_result_get_all_by_uuid(task_id))

output_file = out or ("%s.html" % task_id)
Expand Down
2 changes: 1 addition & 1 deletion tests/benchmark/runners/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def test_run(self, mock_ctx_manager, mock_osclients):
}
}

expected = [context_obj, runner._run_scenario, cls,
expected = [context_obj, runner._wrap_run_scenario, cls,
method_name, config_kwargs]
mock_ctx_manager.run.assert_called_once_with(*expected)

Expand Down
14 changes: 13 additions & 1 deletion tests/cmd/commands/test_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,19 @@ def test_detailed(self, mock_db):
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [],
"results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"data": {
"scenario_duration": 1.0,
"raw": []
}
}
],
"failed": False
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
Expand Down
5 changes: 4 additions & 1 deletion tests/orchestrator/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ def test_start_task(self, mock_task_create, mock_task_update,
mock_utils_runner.return_value = mock_runner = mock.Mock()
mock_runner.result_queue = collections.deque(['fake_result'])

mock_runner.run.return_value = 42

mock_osclients.Clients.return_value = fakes.FakeClients()

api.start_task(self.deploy_uuid, self.task_config)
Expand Down Expand Up @@ -164,7 +166,8 @@ def test_start_task(self, mock_task_create, mock_task_update,
'pos': 0,
},
{
'raw': ['fake_result']
'raw': ['fake_result'],
'scenario_duration': 42
}
)

Expand Down

0 comments on commit 49e0bc9

Please sign in to comment.